diff --git a/.gitattributes b/.gitattributes index b223c8ac5fb84..f0b1c67bd0fdd 100644 --- a/.gitattributes +++ b/.gitattributes @@ -9,3 +9,7 @@ src/etc/installer/gfx/* binary *.woff binary src/vendor/** -text Cargo.lock -merge linguist-generated=false + +# Older git versions try to fix line endings on images, this prevents it. +*.png binary +*.ico binary diff --git a/.gitignore b/.gitignore index e18acfd98e241..67e0dd8e795bb 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ __pycache__/ .settings/ .valgrindrc .vscode/ +.favorites.json /*-*-*-*/ /*-*-*/ /Makefile diff --git a/.gitmodules b/.gitmodules index 70164d48a307b..a32b82666a75f 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,7 +1,3 @@ -[submodule "src/llvm"] - path = src/llvm - url = https://github.com/rust-lang/llvm.git - branch = master [submodule "src/rust-installer"] path = src/tools/rust-installer url = https://github.com/rust-lang/rust-installer.git @@ -28,7 +24,7 @@ url = https://github.com/rust-lang-nursery/rustfmt.git [submodule "src/tools/miri"] path = src/tools/miri - url = https://github.com/solson/miri.git + url = https://github.com/rust-lang/miri.git [submodule "src/doc/rust-by-example"] path = src/doc/rust-by-example url = https://github.com/rust-lang/rust-by-example.git @@ -38,23 +34,16 @@ [submodule "src/stdsimd"] path = src/stdsimd url = https://github.com/rust-lang-nursery/stdsimd.git -[submodule "src/tools/lld"] - path = src/tools/lld - url = https://github.com/rust-lang/lld.git -[submodule "src/libbacktrace"] - path = src/libbacktrace - url = https://github.com/rust-lang-nursery/libbacktrace.git -[submodule "src/tools/lldb"] - path = src/tools/lldb - url = https://github.com/rust-lang-nursery/lldb.git - branch = rust-release-80-v2 -[submodule "src/tools/clang"] - path = src/tools/clang - url = https://github.com/rust-lang-nursery/clang.git - branch = rust-release-80-v2 [submodule "src/doc/rustc-guide"] path = src/doc/rustc-guide url = https://github.com/rust-lang/rustc-guide.git [submodule "src/doc/edition-guide"] path = src/doc/edition-guide - url = https://github.com/rust-lang-nursery/edition-guide + url = https://github.com/rust-lang-nursery/edition-guide.git +[submodule "src/llvm-project"] + path = src/llvm-project + url = https://github.com/avr-rust/llvm-project.git + branch = avr-rustc/8.0-2019-03-18 +[submodule "src/doc/embedded-book"] + path = src/doc/embedded-book + url = https://github.com/rust-embedded/book.git diff --git a/.mailmap b/.mailmap index a928606b693e5..120d1f1457e65 100644 --- a/.mailmap +++ b/.mailmap @@ -29,6 +29,7 @@ Ariel Ben-Yehuda Ariel Ben-Yehuda Ariel Ben-Yehuda arielb1 Austin Seipp Aydin Kim aydin.kim +Bastian Kauschke Barosl Lee Barosl LEE Ben Alpert Ben Sago Ben S @@ -155,11 +156,13 @@ Matt Brubeck Matthew Auld Matthew McPherrin Matthijs Hofstra +Melody Horn Michael Williams Michael Woerister Mickaël Raybaud-Roig m-r-r Ms2ger Mukilan Thiagarajan +Nathan West Nathan Wilson Nathaniel Herman Nathaniel Herman Neil Pankey diff --git a/.travis.yml b/.travis.yml index 9e46e6b8ef6b1..7249af7ac0328 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,328 +1,32 @@ -language: shell +language: generic sudo: required -dist: trusty services: - docker -addons: - apt: - packages: - - gdb +# LLVM takes awhile to check out and otherwise we'll manage the submodules in +# our configure script, so disable auto submodule management. git: - depth: 2 submodules: false + depth: 1 -matrix: - fast_finish: true - include: - # Images used in testing PR and try-build should be run first. - - env: IMAGE=x86_64-gnu-llvm-6.0 RUST_BACKTRACE=1 - if: type = pull_request OR branch = auto - - - env: IMAGE=dist-x86_64-linux DEPLOY=1 - if: branch = try OR branch = auto - - # "alternate" deployments, these are "nightlies" but have LLVM assertions - # turned on, they're deployed to a different location primarily for - # additional testing. - - env: IMAGE=dist-x86_64-linux DEPLOY_ALT=1 CI_JOB_NAME=dist-x86_64-linux-alt - if: branch = try OR branch = auto - - - env: > - RUST_CHECK_TARGET=dist - RUST_CONFIGURE_ARGS="--enable-extended --enable-profiler --enable-lldb --set rust.jemalloc" - SRC=. - DEPLOY_ALT=1 - RUSTC_RETRY_LINKER_ON_SEGFAULT=1 - MACOSX_DEPLOYMENT_TARGET=10.7 - NO_LLVM_ASSERTIONS=1 - NO_DEBUG_ASSERTIONS=1 - CI_JOB_NAME=dist-x86_64-apple-alt - os: osx - osx_image: xcode9.3-moar - if: branch = auto - - # macOS builders. These are placed near the beginning because they are very - # slow to run. - - # OSX builders running tests, these run the full test suite. - # NO_DEBUG_ASSERTIONS=1 to make them go faster, but also do have some - # runners that run `//ignore-debug` tests. - # - # Note that the compiler is compiled to target 10.8 here because the Xcode - # version that we're using, 8.2, cannot compile LLVM for OSX 10.7. - - env: > - RUST_CHECK_TARGET=check - RUST_CONFIGURE_ARGS="--build=x86_64-apple-darwin --enable-sanitizers --enable-profiler --set rust.jemalloc" - SRC=. - RUSTC_RETRY_LINKER_ON_SEGFAULT=1 - MACOSX_DEPLOYMENT_TARGET=10.8 - MACOSX_STD_DEPLOYMENT_TARGET=10.7 - NO_LLVM_ASSERTIONS=1 - NO_DEBUG_ASSERTIONS=1 - CI_JOB_NAME=x86_64-apple - os: osx - osx_image: xcode9.3-moar - if: branch = auto - - - env: > - RUST_CHECK_TARGET=check - RUST_CONFIGURE_ARGS="--build=i686-apple-darwin --set rust.jemalloc" - SRC=. - RUSTC_RETRY_LINKER_ON_SEGFAULT=1 - MACOSX_DEPLOYMENT_TARGET=10.8 - MACOSX_STD_DEPLOYMENT_TARGET=10.7 - NO_LLVM_ASSERTIONS=1 - NO_DEBUG_ASSERTIONS=1 - CI_JOB_NAME=i686-apple - os: osx - osx_image: xcode9.3-moar - if: branch = auto - - # OSX builders producing releases. These do not run the full test suite and - # just produce a bunch of artifacts. - # - # Note that these are running in the `xcode7` image instead of the - # `xcode8.2` image as above. That's because we want to build releases for - # OSX 10.7 and `xcode7` is the latest Xcode able to compile LLVM for 10.7. - - env: > - RUST_CHECK_TARGET=dist - RUST_CONFIGURE_ARGS="--build=i686-apple-darwin --enable-full-tools --enable-profiler --enable-lldb --set rust.jemalloc" - SRC=. - DEPLOY=1 - RUSTC_RETRY_LINKER_ON_SEGFAULT=1 - MACOSX_DEPLOYMENT_TARGET=10.7 - NO_LLVM_ASSERTIONS=1 - NO_DEBUG_ASSERTIONS=1 - DIST_REQUIRE_ALL_TOOLS=1 - CI_JOB_NAME=dist-i686-apple - os: osx - osx_image: xcode9.3-moar - if: branch = auto - - - env: > - RUST_CHECK_TARGET=dist - RUST_CONFIGURE_ARGS="--target=aarch64-apple-ios,armv7-apple-ios,armv7s-apple-ios,i386-apple-ios,x86_64-apple-ios --enable-full-tools --enable-sanitizers --enable-profiler --enable-lldb --set rust.jemalloc" - SRC=. - DEPLOY=1 - RUSTC_RETRY_LINKER_ON_SEGFAULT=1 - MACOSX_DEPLOYMENT_TARGET=10.7 - NO_LLVM_ASSERTIONS=1 - NO_DEBUG_ASSERTIONS=1 - DIST_REQUIRE_ALL_TOOLS=1 - CI_JOB_NAME=dist-x86_64-apple - os: osx - osx_image: xcode9.3-moar - if: branch = auto - - # Linux builders, remaining docker images - - env: IMAGE=arm-android - if: branch = auto - - env: IMAGE=armhf-gnu - if: branch = auto - - env: IMAGE=dist-various-1 DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-various-2 DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-aarch64-linux DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-android DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-arm-linux DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-armhf-linux DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-armv7-linux DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-i586-gnu-i586-i686-musl DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-i686-freebsd DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-i686-linux DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-mips-linux DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-mips64-linux DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-mips64el-linux DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-mipsel-linux DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-powerpc-linux DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-powerpc64-linux DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-powerpc64le-linux DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-s390x-linux DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-x86_64-freebsd DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-x86_64-musl DEPLOY=1 - if: branch = auto - - env: IMAGE=dist-x86_64-netbsd DEPLOY=1 - if: branch = auto - - env: IMAGE=asmjs - if: branch = auto - - env: IMAGE=i686-gnu - if: branch = auto - - env: IMAGE=i686-gnu-nopt - if: branch = auto - - env: IMAGE=wasm32-unknown - if: branch = auto - - env: IMAGE=x86_64-gnu - if: branch = auto - - env: IMAGE=x86_64-gnu-full-bootstrap - if: branch = auto - - env: IMAGE=x86_64-gnu-aux - if: branch = auto - - env: IMAGE=x86_64-gnu-tools - if: branch = auto OR (type = pull_request AND commit_message =~ /(?i:^update.*\b(rls|rustfmt|clippy|miri|cargo)\b)/) - - env: IMAGE=x86_64-gnu-debug - if: branch = auto - - env: IMAGE=x86_64-gnu-nopt - if: branch = auto - - env: IMAGE=x86_64-gnu-distcheck - if: branch = auto - - env: IMAGE=mingw-check - if: type = pull_request OR branch = auto - - - stage: publish toolstate - if: branch = master AND type = push - before_install: [] - install: [] - sudo: false - script: - MESSAGE_FILE=$(mktemp -t msg.XXXXXX); - . src/ci/docker/x86_64-gnu-tools/repo.sh; - commit_toolstate_change "$MESSAGE_FILE" "$TRAVIS_BUILD_DIR/src/tools/publish_toolstate.py" "$(git rev-parse HEAD)" "$(git log --format=%s -n1 HEAD)" "$MESSAGE_FILE" "$TOOLSTATE_REPO_ACCESS_TOKEN"; +env: + - CXX=/usr/bin/g++-4.7 RUST_BACKTRACE=1 before_install: - # We'll use the AWS cli to download/upload cached docker layers as well as - # push our deployments, so download that here. - - pip install --user awscli; export PATH=$PATH:$HOME/.local/bin:$HOME/Library/Python/2.7/bin/ - - mkdir -p $HOME/rustsrc - # FIXME(#46924): these two commands are required to enable IPv6, - # they shouldn't exist, please revert once more official solutions appeared. - # see https://github.com/travis-ci/travis-ci/issues/8891#issuecomment-353403729 - - if [ "$TRAVIS_OS_NAME" = linux ]; then - echo '{"ipv6":true,"fixed-cidr-v6":"fd9a:8454:6789:13f7::/64"}' | sudo tee /etc/docker/daemon.json; - sudo service docker restart; - fi + - docker build -t rust -f src/etc/Dockerfile src/etc -install: - - case "$TRAVIS_OS_NAME" in - linux) - travis_retry curl -fo $HOME/stamp https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-03-17-stamp-x86_64-unknown-linux-musl && - chmod +x $HOME/stamp && - export PATH=$PATH:$HOME - ;; - osx) - if [[ "$RUST_CHECK_TARGET" == dist ]]; then - travis_retry brew update && - travis_retry brew install xz && - travis_retry brew install swig; - fi && - travis_retry curl -fo /usr/local/bin/sccache https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2018-04-02-sccache-x86_64-apple-darwin && - chmod +x /usr/local/bin/sccache && - travis_retry curl -fo /usr/local/bin/stamp https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-03-17-stamp-x86_64-apple-darwin && - chmod +x /usr/local/bin/stamp && - travis_retry curl -f http://releases.llvm.org/7.0.0/clang+llvm-7.0.0-x86_64-apple-darwin.tar.xz | tar xJf - && - export CC=`pwd`/clang+llvm-7.0.0-x86_64-apple-darwin/bin/clang && - export CXX=`pwd`/clang+llvm-7.0.0-x86_64-apple-darwin/bin/clang++ && - export AR=ar - ;; - esac - -before_script: - - > - echo "#### Disk usage before running script:"; - df -h; - du . | sort -nr | head -n100 - - > - RUN_SCRIPT="src/ci/init_repo.sh . $HOME/rustsrc"; - if [ "$TRAVIS_OS_NAME" = "osx" ]; then - export RUN_SCRIPT="$RUN_SCRIPT && src/ci/run.sh"; - else - export RUN_SCRIPT="$RUN_SCRIPT && src/ci/docker/run.sh $IMAGE"; - # Enable core dump on Linux. - sudo sh -c 'echo "/checkout/obj/cores/core.%p.%E" > /proc/sys/kernel/core_pattern'; - fi - -# Log time information from this machine and an external machine for insight into possible -# clock drift. Timezones don't matter since relative deltas give all the necessary info. script: - - > - date && (curl -fs --head https://google.com | grep ^Date: | sed 's/Date: //g' || true) - - stamp sh -x -c "$RUN_SCRIPT" - - > - date && (curl -fs --head https://google.com | grep ^Date: | sed 's/Date: //g' || true) - -after_success: - - > - echo "#### Build successful; Disk usage after running script:"; - df -h; - du . | sort -nr | head -n100 - - > - if [ "$DEPLOY$DEPLOY_ALT" == "1" ]; then - mkdir -p deploy/$TRAVIS_COMMIT; - if [ "$TRAVIS_OS_NAME" == "osx" ]; then - rm -rf build/dist/doc && - cp -r build/dist/* deploy/$TRAVIS_COMMIT; - else - rm -rf obj/build/dist/doc && - cp -r obj/build/dist/* deploy/$TRAVIS_COMMIT; - fi; - ls -la deploy/$TRAVIS_COMMIT; - deploy_dir=rustc-builds; - if [ "$DEPLOY_ALT" == "1" ]; then - deploy_dir=rustc-builds-alt; - fi; - travis_retry aws s3 cp --no-progress --recursive --acl public-read ./deploy s3://rust-lang-ci2/$deploy_dir - fi - -after_failure: - - > - echo "#### Build failed; Disk usage after running script:"; - df -h; - du . | sort -nr | head -n100 - - # Random attempt at debugging currently. Just poking around in here to see if - # anything shows up. - - # Dump backtrace for macOS - - ls -lat $HOME/Library/Logs/DiagnosticReports/ - - find $HOME/Library/Logs/DiagnosticReports - -type f - -name '*.crash' - -not -name '*.stage2-*.crash' - -not -name 'com.apple.CoreSimulator.CoreSimulatorService-*.crash' - -exec printf travis_fold":start:crashlog\n\033[31;1m%s\033[0m\n" {} \; - -exec head -750 {} \; - -exec echo travis_fold":"end:crashlog \; || true - - # Dump backtrace for Linux - - ln -s . checkout && - for CORE in obj/cores/core.*; do - EXE=$(echo $CORE | sed 's|obj/cores/core\.[0-9]*\.!checkout!\(.*\)|\1|;y|!|/|'); - if [ -f "$EXE" ]; then - printf travis_fold":start:crashlog\n\033[31;1m%s\033[0m\n" "$CORE"; - gdb --batch -q -c "$CORE" "$EXE" - -iex 'set auto-load off' - -iex 'dir src/' - -iex 'set sysroot .' - -ex bt - -ex q; - echo travis_fold":"end:crashlog; - fi; - done || true - - # see #50887 - - cat ./obj/build/x86_64-unknown-linux-gnu/native/asan/build/lib/asan/clang_rt.asan-dynamic-i386.vers || true - - # attempt to debug anything killed by the oom killer on linux, just to see if - # it happened - - dmesg | grep -i kill + - docker run -v `pwd`:/build rust + sh -c " + ./configure --enable-rustbuild --llvm-root=/usr/lib/llvm-3.7 --enable-quiet-tests && + make tidy && + make check -j4 + " + +# Real testing happens on http://buildbot.rust-lang.org/ +# +# See https://github.com/rust-lang/rust-buildbot +# CONTRIBUTING.md#pull-requests notifications: email: false diff --git a/.travis_rust.yml b/.travis_rust.yml new file mode 100644 index 0000000000000..7a8772d7abd63 --- /dev/null +++ b/.travis_rust.yml @@ -0,0 +1,379 @@ +language: shell +sudo: required +dist: xenial +services: + - docker +addons: + apt: + packages: + - gdb + +git: + depth: 2 + submodules: false + +env: + global: + - CI_JOB_NAME=$TRAVIS_JOB_NAME + +matrix: + fast_finish: true + include: + # Images used in testing PR and try-build should be run first. + - env: IMAGE=x86_64-gnu-llvm-6.0 RUST_BACKTRACE=1 + name: x86_64-gnu-llvm-6.0 + if: type = pull_request OR branch = auto + + - env: IMAGE=dist-x86_64-linux DEPLOY=1 + name: dist-x86_64-linux + if: branch = try OR branch = auto + + # "alternate" deployments, these are "nightlies" but have LLVM assertions + # turned on, they're deployed to a different location primarily for + # additional testing. + - env: IMAGE=dist-x86_64-linux DEPLOY_ALT=1 + name: dist-x86_64-linux-alt + if: branch = try OR branch = auto + + - env: > + RUST_CHECK_TARGET=dist + RUST_CONFIGURE_ARGS="--enable-extended --enable-profiler --enable-lldb --set rust.jemalloc" + SRC=. + DEPLOY_ALT=1 + RUSTC_RETRY_LINKER_ON_SEGFAULT=1 + MACOSX_DEPLOYMENT_TARGET=10.7 + NO_LLVM_ASSERTIONS=1 + NO_DEBUG_ASSERTIONS=1 + os: osx + osx_image: xcode9.3-moar + name: dist-x86_64-apple-alt + if: branch = auto + + # macOS builders. These are placed near the beginning because they are very + # slow to run. + + # OSX builders running tests, these run the full test suite. + # NO_DEBUG_ASSERTIONS=1 to make them go faster, but also do have some + # runners that run `//ignore-debug` tests. + # + # Note that the compiler is compiled to target 10.8 here because the Xcode + # version that we're using, 8.2, cannot compile LLVM for OSX 10.7. + - env: > + RUST_CHECK_TARGET=check + RUST_CONFIGURE_ARGS="--build=x86_64-apple-darwin --enable-sanitizers --enable-profiler --set rust.jemalloc" + SRC=. + RUSTC_RETRY_LINKER_ON_SEGFAULT=1 + MACOSX_DEPLOYMENT_TARGET=10.8 + MACOSX_STD_DEPLOYMENT_TARGET=10.7 + NO_LLVM_ASSERTIONS=1 + NO_DEBUG_ASSERTIONS=1 + os: osx + osx_image: xcode9.3-moar + name: x86_64-apple + if: branch = auto + + - env: > + RUST_CHECK_TARGET=check + RUST_CONFIGURE_ARGS="--build=i686-apple-darwin --set rust.jemalloc" + SRC=. + RUSTC_RETRY_LINKER_ON_SEGFAULT=1 + MACOSX_DEPLOYMENT_TARGET=10.8 + MACOSX_STD_DEPLOYMENT_TARGET=10.7 + NO_LLVM_ASSERTIONS=1 + NO_DEBUG_ASSERTIONS=1 + os: osx + osx_image: xcode9.3-moar + name: i686-apple + if: branch = auto + + # OSX builders producing releases. These do not run the full test suite and + # just produce a bunch of artifacts. + # + # Note that these are running in the `xcode7` image instead of the + # `xcode8.2` image as above. That's because we want to build releases for + # OSX 10.7 and `xcode7` is the latest Xcode able to compile LLVM for 10.7. + - env: > + RUST_CHECK_TARGET=dist + RUST_CONFIGURE_ARGS="--build=i686-apple-darwin --enable-full-tools --enable-profiler --enable-lldb --set rust.jemalloc" + SRC=. + DEPLOY=1 + RUSTC_RETRY_LINKER_ON_SEGFAULT=1 + MACOSX_DEPLOYMENT_TARGET=10.7 + NO_LLVM_ASSERTIONS=1 + NO_DEBUG_ASSERTIONS=1 + DIST_REQUIRE_ALL_TOOLS=1 + os: osx + osx_image: xcode9.3-moar + name: dist-i686-apple + if: branch = auto + + - env: > + RUST_CHECK_TARGET=dist + RUST_CONFIGURE_ARGS="--target=aarch64-apple-ios,armv7-apple-ios,armv7s-apple-ios,i386-apple-ios,x86_64-apple-ios --enable-full-tools --enable-sanitizers --enable-profiler --enable-lldb --set rust.jemalloc" + SRC=. + DEPLOY=1 + RUSTC_RETRY_LINKER_ON_SEGFAULT=1 + MACOSX_DEPLOYMENT_TARGET=10.7 + NO_LLVM_ASSERTIONS=1 + NO_DEBUG_ASSERTIONS=1 + DIST_REQUIRE_ALL_TOOLS=1 + os: osx + osx_image: xcode9.3-moar + name: dist-x86_64-apple + if: branch = auto + + # Linux builders, remaining docker images + - env: IMAGE=arm-android + name: arm-android + if: branch = auto + - env: IMAGE=armhf-gnu + name: armhf-gnu + if: branch = auto + - env: IMAGE=dist-various-1 DEPLOY=1 + name: dist-various-1 + if: branch = auto + - env: IMAGE=dist-various-2 DEPLOY=1 + name: dist-various-2 + if: branch = auto + - env: IMAGE=dist-aarch64-linux DEPLOY=1 + name: dist-aarch64-linux + if: branch = auto + - env: IMAGE=dist-android DEPLOY=1 + name: dist-android + if: branch = auto + - env: IMAGE=dist-arm-linux DEPLOY=1 + name: dist-arm-linux + if: branch = auto + - env: IMAGE=dist-armhf-linux DEPLOY=1 + name: dist-armhf-linux + if: branch = auto + - env: IMAGE=dist-armv7-linux DEPLOY=1 + name: dist-armv7-linux + if: branch = auto + - env: IMAGE=dist-i586-gnu-i586-i686-musl DEPLOY=1 + name: dist-i586-gnu-i586-i686-musl + if: branch = auto + - env: IMAGE=dist-i686-freebsd DEPLOY=1 + name: dist-i686-freebsd + if: branch = auto + - env: IMAGE=dist-i686-linux DEPLOY=1 + name: dist-i686-linux + if: branch = auto + - env: IMAGE=dist-mips-linux DEPLOY=1 + name: dist-mips-linux + if: branch = auto + - env: IMAGE=dist-mips64-linux DEPLOY=1 + name: dist-mips64-linux + if: branch = auto + - env: IMAGE=dist-mips64el-linux DEPLOY=1 + name: dist-mips64el-linux + if: branch = auto + - env: IMAGE=dist-mipsel-linux DEPLOY=1 + name: dist-mipsel-linux + if: branch = auto + - env: IMAGE=dist-powerpc-linux DEPLOY=1 + name: dist-powerpc-linux + if: branch = auto + - env: IMAGE=dist-powerpc64-linux DEPLOY=1 + name: dist-powerpc64-linux + if: branch = auto + - env: IMAGE=dist-powerpc64le-linux DEPLOY=1 + name: dist-powerpc64le-linux + if: branch = auto + - env: IMAGE=dist-s390x-linux DEPLOY=1 + name: dist-s390x-linux + if: branch = auto + - env: IMAGE=dist-x86_64-freebsd DEPLOY=1 + name: dist-x86_64-freebsd + if: branch = auto + - env: IMAGE=dist-x86_64-musl DEPLOY=1 + name: dist-x86_64-musl + if: branch = auto + - env: IMAGE=dist-x86_64-netbsd DEPLOY=1 + name: dist-x86_64-netbsd + if: branch = auto + - env: IMAGE=asmjs + name: asmjs + if: branch = auto + - env: IMAGE=i686-gnu + name: i686-gnu + if: branch = auto + - env: IMAGE=i686-gnu-nopt + name: i686-gnu-nopt + if: branch = auto + - env: IMAGE=test-various + name: test-various + if: branch = auto + - env: IMAGE=x86_64-gnu + name: x86_64-gnu + if: branch = auto + - env: IMAGE=x86_64-gnu-full-bootstrap + name: x86_64-gnu-full-bootstrap + if: branch = auto + - env: IMAGE=x86_64-gnu-aux + name: x86_64-gnu-aux + if: branch = auto + - env: IMAGE=x86_64-gnu-tools + name: x86_64-gnu-tools + if: branch = auto OR (type = pull_request AND commit_message =~ /(?i:^update.*\b(rls|rustfmt|clippy|miri|cargo)\b)/) + - env: IMAGE=x86_64-gnu-debug + name: x86_64-gnu-debug + if: branch = auto + - env: IMAGE=x86_64-gnu-nopt + name: x86_64-gnu-nopt + if: branch = auto + - env: IMAGE=x86_64-gnu-distcheck + name: x86_64-gnu-distcheck + if: branch = auto + - env: IMAGE=mingw-check + name: mingw-check + if: type = pull_request OR branch = auto + + - stage: publish toolstate + if: branch = master AND type = push + before_install: [] + install: [] + sudo: false + script: + MESSAGE_FILE=$(mktemp -t msg.XXXXXX); + . src/ci/docker/x86_64-gnu-tools/repo.sh; + commit_toolstate_change "$MESSAGE_FILE" "$TRAVIS_BUILD_DIR/src/tools/publish_toolstate.py" "$(git rev-parse HEAD)" "$(git log --format=%s -n1 HEAD)" "$MESSAGE_FILE" "$TOOLSTATE_REPO_ACCESS_TOKEN"; + +before_install: + # We'll use the AWS cli to download/upload cached docker layers as well as + # push our deployments, so download that here. + - pip install --user awscli; export PATH=$PATH:$HOME/.local/bin:$HOME/Library/Python/2.7/bin/ + - mkdir -p $HOME/rustsrc + # FIXME(#46924): these two commands are required to enable IPv6, + # they shouldn't exist, please revert once more official solutions appeared. + # see https://github.com/travis-ci/travis-ci/issues/8891#issuecomment-353403729 + - if [ "$TRAVIS_OS_NAME" = linux ]; then + echo '{"ipv6":true,"fixed-cidr-v6":"fd9a:8454:6789:13f7::/64"}' | sudo tee /etc/docker/daemon.json; + sudo service docker restart; + fi + +install: + - case "$TRAVIS_OS_NAME" in + linux) + travis_retry curl -fo $HOME/stamp https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-03-17-stamp-x86_64-unknown-linux-musl && + chmod +x $HOME/stamp && + export PATH=$PATH:$HOME + ;; + osx) + if [[ "$RUST_CHECK_TARGET" == dist ]]; then + travis_retry brew update && + travis_retry brew install xz && + travis_retry brew install swig; + fi && + travis_retry curl -fo /usr/local/bin/sccache https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2018-04-02-sccache-x86_64-apple-darwin && + chmod +x /usr/local/bin/sccache && + travis_retry curl -fo /usr/local/bin/stamp https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2017-03-17-stamp-x86_64-apple-darwin && + chmod +x /usr/local/bin/stamp && + travis_retry curl -f http://releases.llvm.org/7.0.0/clang+llvm-7.0.0-x86_64-apple-darwin.tar.xz | tar xJf - && + export CC=`pwd`/clang+llvm-7.0.0-x86_64-apple-darwin/bin/clang && + export CXX=`pwd`/clang+llvm-7.0.0-x86_64-apple-darwin/bin/clang++ && + export AR=ar + ;; + esac + +before_script: + - > + echo "#### Disk usage before running script:"; + df -h; + du . | sort -nr | head -n100 + - > + RUN_SCRIPT="src/ci/init_repo.sh . $HOME/rustsrc"; + if [ "$TRAVIS_OS_NAME" = "osx" ]; then + export RUN_SCRIPT="$RUN_SCRIPT && src/ci/run.sh"; + else + export RUN_SCRIPT="$RUN_SCRIPT && src/ci/docker/run.sh $IMAGE"; + # Enable core dump on Linux. + sudo sh -c 'echo "/checkout/obj/cores/core.%p.%E" > /proc/sys/kernel/core_pattern'; + fi + - > + if [ "$IMAGE" = mingw-check ]; then + # verify the publish_toolstate script works. + git clone --depth=1 https://github.com/rust-lang-nursery/rust-toolstate.git; + cd rust-toolstate; + python2.7 "$TRAVIS_BUILD_DIR/src/tools/publish_toolstate.py" "$(git rev-parse HEAD)" "$(git log --format=%s -n1 HEAD)" "" ""; + cd ..; + rm -rf rust-toolstate; + fi + +# Log time information from this machine and an external machine for insight into possible +# clock drift. Timezones don't matter since relative deltas give all the necessary info. +script: + - > + date && (curl -fs --head https://google.com | grep ^Date: | sed 's/Date: //g' || true) + - stamp sh -x -c "$RUN_SCRIPT" + - > + date && (curl -fs --head https://google.com | grep ^Date: | sed 's/Date: //g' || true) + +after_success: + - > + echo "#### Build successful; Disk usage after running script:"; + df -h; + du . | sort -nr | head -n100 + - > + if [ "$DEPLOY$DEPLOY_ALT" == "1" ]; then + mkdir -p deploy/$TRAVIS_COMMIT; + if [ "$TRAVIS_OS_NAME" == "osx" ]; then + rm -rf build/dist/doc && + cp -r build/dist/* deploy/$TRAVIS_COMMIT; + else + rm -rf obj/build/dist/doc && + cp -r obj/build/dist/* deploy/$TRAVIS_COMMIT; + fi; + ls -la deploy/$TRAVIS_COMMIT; + deploy_dir=rustc-builds; + if [ "$DEPLOY_ALT" == "1" ]; then + deploy_dir=rustc-builds-alt; + fi; + travis_retry aws s3 cp --no-progress --recursive --acl public-read ./deploy s3://rust-lang-ci2/$deploy_dir + fi + +after_failure: + - > + echo "#### Build failed; Disk usage after running script:"; + df -h; + du . | sort -nr | head -n100 + + # Random attempt at debugging currently. Just poking around in here to see if + # anything shows up. + + # Dump backtrace for macOS + - ls -lat $HOME/Library/Logs/DiagnosticReports/ + - find $HOME/Library/Logs/DiagnosticReports + -type f + -name '*.crash' + -not -name '*.stage2-*.crash' + -not -name 'com.apple.CoreSimulator.CoreSimulatorService-*.crash' + -exec printf travis_fold":start:crashlog\n\033[31;1m%s\033[0m\n" {} \; + -exec head -750 {} \; + -exec echo travis_fold":"end:crashlog \; || true + + # Dump backtrace for Linux + - ln -s . checkout && + for CORE in obj/cores/core.*; do + EXE=$(echo $CORE | sed 's|obj/cores/core\.[0-9]*\.!checkout!\(.*\)|\1|;y|!|/|'); + if [ -f "$EXE" ]; then + printf travis_fold":start:crashlog\n\033[31;1m%s\033[0m\n" "$CORE"; + gdb --batch -q -c "$CORE" "$EXE" + -iex 'set auto-load off' + -iex 'dir src/' + -iex 'set sysroot .' + -ex bt + -ex q; + echo travis_fold":"end:crashlog; + fi; + done || true + + # see #50887 + - cat ./obj/build/x86_64-unknown-linux-gnu/native/asan/build/lib/asan/clang_rt.asan-dynamic-i386.vers || true + + # attempt to debug anything killed by the oom killer on linux, just to see if + # it happened + - dmesg | grep -i kill + +notifications: + email: false diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index d70b2b52aca1b..ece8dedb0aed7 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -35,6 +35,6 @@ And if someone takes issue with something you said or did, resist the urge to be The enforcement policies listed above apply to all official Rust venues; including official IRC channels (#rust, #rust-internals, #rust-tools, #rust-libs, #rustc, #rust-beginners, #rust-docs, #rust-community, #rust-lang, and #cargo); GitHub repositories under rust-lang, rust-lang-nursery, and rust-lang-deprecated; and all forums under rust-lang.org (users.rust-lang.org, internals.rust-lang.org). For other projects adopting the Rust Code of Conduct, please contact the maintainers of those projects for enforcement. If you wish to use this code of conduct for your own project, consider explicitly mentioning your moderation policy or making a copy with your own moderation policy so as to avoid confusion. -*Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling) as well as the [Contributor Covenant v1.3.0](https://www.contributor-covenant.org/version/1/3/0/).* +*Adapted from the [Node.js Policy on Trolling](https://blog.izs.me/2012/08/policy-on-trolling) as well as the [Contributor Covenant v1.3.0](https://www.contributor-covenant.org/version/1/3/0/).* [mod_team]: https://www.rust-lang.org/team.html#Moderation-team diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 65cdfe67b5b08..db37fa0caf6c7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,19 +15,29 @@ links to the major sections: * [Helpful Links and Information](#helpful-links-and-information) If you have questions, please make a post on [internals.rust-lang.org][internals] or -hop on [#rust-internals][pound-rust-internals]. +hop on the [Rust Discord server][rust-discord], [Rust Zulip server][rust-zulip] or [#rust-internals][pound-rust-internals]. As a reminder, all contributors are expected to follow our [Code of Conduct][coc]. +The [rustc-guide] is your friend! It describes how the compiler works and how +to contribute to it in more detail than this document. + +If this is your first time contributing, the [walkthrough] chapter of the guide +can give you a good example of how a typical contribution would go. + [pound-rust-internals]: https://chat.mibbit.com/?server=irc.mozilla.org&channel=%23rust-internals [internals]: https://internals.rust-lang.org +[rust-discord]: http://discord.gg/rust-lang +[rust-zulip]: https://rust-lang.zulipchat.com [coc]: https://www.rust-lang.org/conduct.html +[rustc-guide]: https://rust-lang.github.io/rustc-guide/ +[walkthrough]: https://rust-lang.github.io/rustc-guide/walkthrough.html ## Feature Requests [feature-requests]: #feature-requests To request a change to the way the Rust language works, please head over -to the [RFCs repository](https://github.com/rust-lang/rfcs) and view the +to the [RFCs repository](https://github.com/rust-lang/rfcs) and view the [README](https://github.com/rust-lang/rfcs/blob/master/README.md) for instructions. @@ -89,222 +99,14 @@ $ RUST_BACKTRACE=1 rustc ... ``` ## The Build System -[the-build-system]: #the-build-system - -Rust's build system allows you to bootstrap the compiler, run tests & -benchmarks, generate documentation, install a fresh build of Rust, and more. -It's your best friend when working on Rust, allowing you to compile & test -your contributions before submission. - -The build system lives in [the `src/bootstrap` directory][bootstrap] in the -project root. Our build system is itself written in Rust and is based on Cargo -to actually build all the compiler's crates. If you have questions on the build -system internals, try asking in [`#rust-internals`][pound-rust-internals]. - -[bootstrap]: https://github.com/rust-lang/rust/tree/master/src/bootstrap/ - -### Configuration -[configuration]: #configuration - -Before you can start building the compiler you need to configure the build for -your system. In most cases, that will just mean using the defaults provided -for Rust. - -To change configuration, you must copy the file `config.toml.example` -to `config.toml` in the directory from which you will be running the build, and -change the settings provided. - -There are large number of options provided in this config file that will alter the -configuration used in the build process. Some options to note: - -#### `[llvm]`: -- `assertions = true` = This enables LLVM assertions, which makes LLVM misuse cause an assertion failure instead of weird misbehavior. This also slows down the compiler's runtime by ~20%. -- `ccache = true` - Use ccache when building llvm - -#### `[build]`: -- `compiler-docs = true` - Build compiler documentation - -#### `[rust]`: -- `debuginfo = true` - Build a compiler with debuginfo. Makes building rustc slower, but then you can use a debugger to debug `rustc`. -- `debuginfo-lines = true` - An alternative to `debuginfo = true` that doesn't let you use a debugger, but doesn't make building rustc slower and still gives you line numbers in backtraces. -- `debuginfo-tools = true` - Build the extended tools with debuginfo. -- `debug-assertions = true` - Makes the log output of `debug!` work. -- `optimize = false` - Disable optimizations to speed up compilation of stage1 rust, but makes the stage1 compiler x100 slower. - -For more options, the `config.toml` file contains commented out defaults, with -descriptions of what each option will do. - -Note: Previously the `./configure` script was used to configure this -project. It can still be used, but it's recommended to use a `config.toml` -file. If you still have a `config.mk` file in your directory - from -`./configure` - you may need to delete it for `config.toml` to work. - -### Building -[building]: #building - -A default configuration requires around 3.5 GB of disk space, whereas building a debug configuration may require more than 30 GB. - -Dependencies -- [build dependencies](README.md#building-from-source) -- `gdb` 6.2.0 minimum, 7.1 or later recommended for test builds - -The build system uses the `x.py` script to control the build process. This script -is used to build, test, and document various parts of the compiler. You can -execute it as: - -```sh -python x.py build -``` - -On some systems you can also use the shorter version: - -```sh -./x.py build -``` - -To learn more about the driver and top-level targets, you can execute: - -```sh -python x.py --help -``` - -The general format for the driver script is: - -```sh -python x.py [] -``` -Some example commands are `build`, `test`, and `doc`. These will build, test, -and document the specified directory. The second argument, ``, is -optional and defaults to working over the entire compiler. If specified, -however, only that specific directory will be built. For example: +For info on how to configure and build the compiler, please see [this +chapter][rustcguidebuild] of the rustc-guide. This chapter contains info for +contributions to the compiler and the standard library. It also lists some +really useful commands to the build system (`./x.py`), which could save you a +lot of time. -```sh -# build the entire compiler -python x.py build - -# build all documentation -python x.py doc - -# run all test suites -python x.py test - -# build only the standard library -python x.py build src/libstd - -# test only one particular test suite -python x.py test src/test/rustdoc - -# build only the stage0 libcore library -python x.py build src/libcore --stage 0 -``` - -You can explore the build system through the various `--help` pages for each -subcommand. For example to learn more about a command you can run: - -``` -python x.py build --help -``` - -To learn about all possible rules you can execute, run: - -``` -python x.py build --help --verbose -``` - -Note: Previously `./configure` and `make` were used to build this project. -They are still available, but `x.py` is the recommended build system. - -### Useful commands -[useful-commands]: #useful-commands - -Some common invocations of `x.py` are: - -- `x.py build --help` - show the help message and explain the subcommand -- `x.py build src/libtest --stage 1` - build up to (and including) the first - stage. For most cases we don't need to build the stage2 compiler, so we can - save time by not building it. The stage1 compiler is a fully functioning - compiler and (probably) will be enough to determine if your change works as - expected. -- `x.py build src/rustc --stage 1` - This will build just rustc, without libstd. - This is the fastest way to recompile after you changed only rustc source code. - Note however that the resulting rustc binary won't have a stdlib to link - against by default. You can build libstd once with `x.py build src/libstd`, - but it is only guaranteed to work if recompiled, so if there are any issues - recompile it. -- `x.py test` - build the full compiler & run all tests (takes a while). This - is what gets run by the continuous integration system against your pull - request. You should run this before submitting to make sure your tests pass - & everything builds in the correct manner. -- `x.py test src/libstd --stage 1` - test the standard library without - recompiling stage 2. -- `x.py test src/test/run-pass --test-args TESTNAME` - Run a matching set of - tests. - - `TESTNAME` should be a substring of the tests to match against e.g. it could - be the fully qualified test name, or just a part of it. - `TESTNAME=collections::hash::map::test_map::test_capacity_not_less_than_len` - or `TESTNAME=test_capacity_not_less_than_len`. -- `x.py test src/test/run-pass --stage 1 --test-args ` - - Run a single rpass test with the stage1 compiler (this will be quicker than - running the command above as we only build the stage1 compiler, not the entire - thing). You can also leave off the directory argument to run all stage1 test - types. -- `x.py test src/libcore --stage 1` - Run stage1 tests in `libcore`. -- `x.py test src/tools/tidy` - Check that the source code is in compliance with - Rust's style guidelines. There is no official document describing Rust's full - guidelines as of yet, but basic rules like 4 spaces for indentation and no - more than 99 characters in a single line should be kept in mind when writing - code. - -### Using your local build -[using-local-build]: #using-local-build - -If you use Rustup to manage your rust install, it has a feature called ["custom -toolchains"][toolchain-link] that you can use to access your newly-built compiler -without having to install it to your system or user PATH. If you've run `python -x.py build`, then you can add your custom rustc to a new toolchain like this: - -[toolchain-link]: https://github.com/rust-lang-nursery/rustup.rs#working-with-custom-toolchains-and-local-builds - -``` -rustup toolchain link build//stage2 -``` - -Where `` is the build triple for the host (the triple of your -computer, by default), and `` is the name for your custom toolchain. (If you -added `--stage 1` to your build command, the compiler will be in the `stage1` -folder instead.) You'll only need to do this once - it will automatically point -to the latest build you've done. - -Once this is set up, you can use your custom toolchain just like any other. For -example, if you've named your toolchain `local`, running `cargo +local build` will -compile a project with your custom rustc, setting `rustup override set local` will -override the toolchain for your current directory, and `cargo +local doc` will use -your custom rustc and rustdoc to generate docs. (If you do this with a `--stage 1` -build, you'll need to build rustdoc specially, since it's not normally built in -stage 1. `python x.py build --stage 1 src/libstd src/tools/rustdoc` will build -rustdoc and libstd, which will allow rustdoc to be run with that toolchain.) - -### Out-of-tree builds -[out-of-tree-builds]: #out-of-tree-builds - -Rust's `x.py` script fully supports out-of-tree builds - it looks for -the Rust source code from the directory `x.py` was found in, but it -reads the `config.toml` configuration file from the directory it's -run in, and places all build artifacts within a subdirectory named `build`. - -This means that if you want to do an out-of-tree build, you can just do it: -``` -$ cd my/build/dir -$ cp ~/my-config.toml config.toml # Or fill in config.toml otherwise -$ path/to/rust/x.py build -... -$ # This will use the Rust source code in `path/to/rust`, but build -$ # artifacts will now be in ./build -``` - -It's absolutely fine to have multiple build directories with different -`config.toml` configurations using the same code. +[rustcguidebuild]: https://rust-lang.github.io/rustc-guide/how-to-build-and-run.html ## Pull Requests [pull-requests]: #pull-requests @@ -320,18 +122,12 @@ bring those changes into the source repository. Please make pull requests against the `master` branch. -Compiling all of `./x.py test` can take a while. When testing your pull request, -consider using one of the more specialized `./x.py` targets to cut down on the -amount of time you have to wait. You need to have built the compiler at least -once before running these will work, but that’s only one full build rather than -one each time. - - $ python x.py test --stage 1 - -is one such example, which builds just `rustc`, and then runs the tests. If -you’re adding something to the standard library, try - - $ python x.py test src/libstd --stage 1 +Rust follows a no merge policy, meaning, when you encounter merge +conflicts you are expected to always rebase instead of merge. +E.g. always use rebase when bringing the latest changes from +the master branch to your feature branch. +Also, please make sure that fixup commits are squashed into other related +commits with meaningful commit messages. Please make sure your pull request is in compliance with Rust's style guidelines by running @@ -339,32 +135,44 @@ guidelines by running $ python x.py test src/tools/tidy Make this check before every pull request (and every new commit in a pull -request) ; you can add [git hooks](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks) +request); you can add [git hooks](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks) before every push to make sure you never forget to make this check. All pull requests are reviewed by another person. We have a bot, -@rust-highfive, that will automatically assign a random person to review your +[@rust-highfive][rust-highfive], that will automatically assign a random person to review your request. If you want to request that a specific person reviews your pull request, -you can add an `r?` to the message. For example, Steve usually reviews +you can add an `r?` to the message. For example, [Steve][steveklabnik] usually reviews documentation changes. So if you were to make a documentation change, add r? @steveklabnik -to the end of the message, and @rust-highfive will assign @steveklabnik instead +to the end of the message, and @rust-highfive will assign [@steveklabnik][steveklabnik] instead of a random person. This is entirely optional. After someone has reviewed your pull request, they will leave an annotation on the pull request with an `r+`. It will look something like this: - @bors: r+ 38fe8d2 + @bors r+ -This tells @bors, our lovable integration bot, that your pull request has -been approved. The PR then enters the [merge queue][merge-queue], where @bors +This tells [@bors][bors], our lovable integration bot, that your pull request has +been approved. The PR then enters the [merge queue][merge-queue], where [@bors][bors] will run all the tests on every platform we support. If it all works out, -@bors will merge your code into `master` and close the pull request. +[@bors][bors] will merge your code into `master` and close the pull request. + +Depending on the scale of the change, you may see a slightly different form of `r+`: + + @bors r+ rollup + +The additional `rollup` tells [@bors][bors] that this change is eligible for to be +"rolled up". Changes that are rolled up are tested and merged at the same time, to +speed the process up. Typically only small changes that are expected not to conflict +with one another are rolled up. +[rust-highfive]: https://github.com/rust-highfive +[steveklabnik]: https://github.com/steveklabnik +[bors]: https://github.com/bors [merge-queue]: https://buildbot2.rust-lang.org/homu/queue/rust Speaking of tests, Rust has a comprehensive test suite. More information about @@ -375,10 +183,10 @@ it can be found [here][rctd]. Currently building Rust will also build the following external projects: -* [clippy](https://github.com/rust-lang-nursery/rust-clippy) -* [miri](https://github.com/solson/miri) -* [rustfmt](https://github.com/rust-lang-nursery/rustfmt) -* [rls](https://github.com/rust-lang-nursery/rls/) +* [clippy](https://github.com/rust-lang/rust-clippy) +* [miri](https://github.com/rust-lang/miri) +* [rustfmt](https://github.com/rust-lang/rustfmt) +* [rls](https://github.com/rust-lang/rls/) We allow breakage of these tools in the nightly channel. Maintainers of these projects will be notified of the breakages and should fix them as soon as @@ -404,10 +212,10 @@ before the PR is merged. [breaking-tools-built-with-the-compiler]: #breaking-tools-built-with-the-compiler Rust's build system builds a number of tools that make use of the -internals of the compiler. This includes -[Clippy](https://github.com/rust-lang-nursery/rust-clippy), -[RLS](https://github.com/rust-lang-nursery/rls) and -[rustfmt](https://github.com/rust-lang-nursery/rustfmt). If these tools +internals of the compiler. This includes +[Clippy](https://github.com/rust-lang/rust-clippy), +[RLS](https://github.com/rust-lang/rls) and +[rustfmt](https://github.com/rust-lang/rustfmt). If these tools break because of your changes, you may run into a sort of "chicken and egg" problem. These tools rely on the latest compiler to be built so you can't update them to reflect your changes to the compiler until those changes are merged into @@ -467,10 +275,10 @@ to complete a few more steps which are outlined with their rationale below. *(This error may change in the future to include more information.)* ``` -error: failed to resolve patches for `https://github.com/rust-lang-nursery/rustfmt` +error: failed to resolve patches for `https://github.com/rust-lang/rustfmt` Caused by: - patch for `rustfmt-nightly` in `https://github.com/rust-lang-nursery/rustfmt` did not resolve to any crates + patch for `rustfmt-nightly` in `https://github.com/rust-lang/rustfmt` did not resolve to any crates failed to run: ~/rust/build/x86_64-unknown-linux-gnu/stage0/bin/cargo build --manifest-path ~/rust/src/bootstrap/Cargo.toml ``` @@ -506,18 +314,8 @@ the submodule to. Running `./x.py build` should work now. Documentation improvements are very welcome. The source of `doc.rust-lang.org` is located in `src/doc` in the tree, and standard API documentation is generated -from the source code itself. - -Documentation pull requests function in the same way as other pull requests, -though you may see a slightly different form of `r+`: - - @bors: r+ 38fe8d2 rollup - -That additional `rollup` tells @bors that this change is eligible for a 'rollup'. -To save @bors some work, and to get small changes through more quickly, when -@bors attempts to merge a commit that's rollup-eligible, it will also merge -the other rollup-eligible patches too, and they'll get tested and merged at -the same time. +from the source code itself. Documentation pull requests function in the same way +as other pull requests. To find documentation-related issues, sort by the [T-doc label][tdoc]. @@ -532,6 +330,12 @@ to check small fixes. For example, `rustdoc src/doc/reference.md` will render reference to `doc/reference.html`. The CSS might be messed up, but you can verify that the HTML is right. +Additionally, contributions to the [rustc-guide] are always welcome. Contributions +can be made directly at [the +rust-lang/rustc-guide](https://github.com/rust-lang/rustc-guide) repo. The issue +tracker in that repo is also a great way to find things that need doing. There +are issues for beginners and advanced compiler devs alike! + ## Issue Triage [issue-triage]: #issue-triage @@ -627,7 +431,7 @@ For people new to Rust, and just starting to contribute, or even for more seasoned developers, some useful places to look for information are: -* The [rustc guide] contains information about how various parts of the compiler work +* The [rustc guide] contains information about how various parts of the compiler work and how to contribute to the compiler * [Rust Forge][rustforge] contains additional documentation, including write-ups of how to achieve common tasks * The [Rust Internals forum][rif], a place to ask questions and discuss Rust's internals @@ -636,7 +440,8 @@ are: * Although out of date, [Tom Lee's great blog article][tlgba] is very helpful * [rustaceans.org][ro] is helpful, but mostly dedicated to IRC * The [Rust Compiler Testing Docs][rctd] -* For @bors, [this cheat sheet][cheatsheet] is helpful (Remember to replace `@homu` with `@bors` in the commands that you use.) +* For [@bors][bors], [this cheat sheet][cheatsheet] is helpful +(though you'll need to replace `@homu` with `@bors` in any commands) * **Google!** ([search only in Rust Documentation][gsearchdocs] to find types, traits, etc. quickly) * Don't be afraid to ask! The Rust community is friendly and helpful. diff --git a/COPYRIGHT b/COPYRIGHT index e2d0ed77224e3..dc9abf84b8e5a 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -23,7 +23,7 @@ The Rust Project includes packages written by third parties. The following third party packages are included, and carry their own copyright notices and license terms: -* LLVM. Code for this package is found in src/llvm. +* LLVM. Code for this package is found in src/llvm-project. Copyright (c) 2003-2013 University of Illinois at Urbana-Champaign. All rights reserved. @@ -73,8 +73,8 @@ their own copyright notices and license terms: OTHER DEALINGS WITH THE SOFTWARE. * Additional libraries included in LLVM carry separate - BSD-compatible licenses. See src/llvm/LICENSE.txt for - details. + BSD-compatible licenses. See src/llvm-project/llvm/LICENSE.TXT + for details. * compiler-rt, in src/compiler-rt is dual licensed under LLVM's license and MIT: @@ -229,35 +229,3 @@ their own copyright notices and license terms: NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ - -* jemalloc, under src/jemalloc: - - Copyright (C) 2002-2014 Jason Evans - . All rights reserved. - Copyright (C) 2007-2012 Mozilla Foundation. - All rights reserved. - Copyright (C) 2009-2014 Facebook, Inc. - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - 1. Redistributions of source code must retain the above copyright notice(s), - this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright notice(s), - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, - INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) - BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER - IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY - OF SUCH DAMAGE. diff --git a/Cargo.lock b/Cargo.lock index 7e03474565d85..8fd365b8462b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,3 +1,5 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. [[package]] name = "adler32" version = "1.0.3" @@ -15,9 +17,10 @@ dependencies = [ name = "alloc" version = "0.0.0" dependencies = [ - "compiler_builtins 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_xorshift 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -41,32 +44,39 @@ dependencies = [ "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "arc-swap" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "arena" version = "0.0.0" dependencies = [ "rustc_data_structures 0.0.0", + "smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "arrayvec" -version = "0.4.7" +name = "argon2rs" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)", + "scoped_threadpool 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "assert_cli" -version = "0.6.2" +name = "arrayref" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "arrayvec" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "colored 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "environment 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "failure_derive 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", + "nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -74,7 +84,7 @@ name = "atty" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -84,20 +94,22 @@ name = "backtrace" version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "backtrace-sys 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace-sys 0.1.27 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-demangle 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "backtrace-sys" -version = "0.1.24" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-std-workspace-core 1.0.0", ] [[package]] @@ -123,21 +135,39 @@ name = "bitflags" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "blake2-rfc" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", + "constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "block-buffer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "bootstrap" version = "0.0.0" dependencies = [ "build_helper 0.1.0", - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "cmake 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", "filetime 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "petgraph 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", "pretty_assertions 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)", @@ -153,7 +183,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" name = "build-manifest" version = "0.1.0" dependencies = [ - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -167,9 +197,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" name = "build_helper" version = "0.1.0" +[[package]] +name = "byte-tools" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "bytecount" -version = "0.4.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "packed_simd 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -180,6 +215,15 @@ name = "byteorder" version = "1.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "bytes" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "bytesize" version = "1.0.0" @@ -187,58 +231,60 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "cargo" -version = "0.34.0" +version = "0.36.0" dependencies = [ "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "bufstream 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)", "bytesize 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", "core-foundation 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", - "crates-io 0.22.0", + "crates-io 0.24.0", "crossbeam-utils 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "crypto-hash 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "curl 0.4.19 (registry+https://github.com/rust-lang/crates.io-index)", "curl-sys 0.4.15 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "filetime 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "flate2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "fs2 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "fwdansi 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "git2 0.7.5 (registry+https://github.com/rust-lang/crates.io-index)", - "git2-curl 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)", - "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "git2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "git2-curl 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "home 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "ignore 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "im-rc 12.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jobserver 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "ignore 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "im-rc 12.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jobserver 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazycell 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", - "libgit2-sys 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", + "libgit2-sys 0.7.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "opener 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl 0.10.15 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl 0.10.16 (registry+https://github.com/rust-lang/crates.io-index)", "pretty_env_logger 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "proptest 0.8.7 (registry+https://github.com/rust-lang/crates.io-index)", + "proptest 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-workspace-hack 1.0.0", - "rustfix 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustfix 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", "same-file 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_ignored 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", "shell-escape 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "tar 0.4.20 (registry+https://github.com/rust-lang/crates.io-index)", "tempfile 3.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "url_serde 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -249,7 +295,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cargo_metadata" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -260,7 +318,7 @@ version = "0.1.0" [[package]] name = "cc" -version = "1.0.25" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -270,7 +328,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "chalk-engine" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "chalk-macros 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -314,18 +372,17 @@ dependencies = [ name = "clippy" version = "0.0.212" dependencies = [ - "cargo_metadata 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cargo_metadata 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "clippy-mini-macro-test 0.2.0", - "clippy_dev 0.0.1", "clippy_lints 0.0.212", - "compiletest_rs 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", + "compiletest_rs 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)", "derive-new 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-workspace-hack 1.0.0", - "rustc_tools_util 0.1.0", + "rustc_tools_util 0.1.1", "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -333,31 +390,20 @@ dependencies = [ name = "clippy-mini-macro-test" version = "0.2.0" -[[package]] -name = "clippy_dev" -version = "0.0.1" -dependencies = [ - "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", - "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "clippy_lints" version = "0.0.212" dependencies = [ - "cargo_metadata 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cargo_metadata 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "if_chain 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "pulldown-cmark 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "quine-mc_cluskey 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "regex-syntax 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -375,10 +421,10 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.33" +version = "0.1.38" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -402,15 +448,15 @@ name = "commoncrypto-sys" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "compiler_builtins" -version = "0.1.2" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-std-workspace-core 1.0.0", ] @@ -423,36 +469,44 @@ dependencies = [ "filetime 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustfix 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "rustfix 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", + "walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "compiletest_rs" -version = "0.3.17" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "filetime 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "rustfix 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", "tempfile 3.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "tester 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "constant_time_eq" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "core" version = "0.0.0" @@ -466,7 +520,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -476,11 +530,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "crates-io" -version = "0.22.0" +version = "0.24.0" dependencies = [ "curl 0.4.19 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "http 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -504,13 +559,13 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.2.6" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-epoch 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-epoch 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -523,6 +578,15 @@ dependencies = [ "crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "crossbeam-deque" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-epoch 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "crossbeam-epoch" version = "0.3.1" @@ -539,12 +603,12 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -558,11 +622,6 @@ dependencies = [ "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "crossbeam-utils" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "crossbeam-utils" version = "0.6.2" @@ -578,7 +637,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "commoncrypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl 0.10.15 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl 0.10.16 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -589,9 +648,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "curl-sys 0.4.15 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.39 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.40 (registry+https://github.com/rust-lang/crates.io-index)", "schannel 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", "socket2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -602,11 +661,11 @@ name = "curl-sys" version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "libnghttp2-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "libz-sys 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.39 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.40 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "vcpkg 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -614,7 +673,7 @@ dependencies = [ [[package]] name = "datafrog" -version = "0.1.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -648,22 +707,40 @@ name = "difference" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "digest" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "generic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "directories" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "dirs" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_users 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "dlmalloc" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "compiler_builtins 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-std-workspace-core 1.0.0", ] @@ -679,7 +756,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", "strum 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -694,6 +771,14 @@ dependencies = [ "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ena" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "env_logger" version = "0.5.13" @@ -718,11 +803,6 @@ dependencies = [ "termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "environment" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "error-chain" version = "0.11.0" @@ -748,16 +828,16 @@ dependencies = [ [[package]] name = "failure" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "backtrace 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)", - "failure_derive 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "failure_derive" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "proc-macro2 0.4.24 (registry+https://github.com/rust-lang/crates.io-index)", @@ -766,13 +846,18 @@ dependencies = [ "synstructure 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "filetime" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -787,7 +872,7 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "crc32fast 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "libz-sys 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", "miniz-sys 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "miniz_oxide_c_api 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -817,10 +902,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "fortanix-sgx-abi" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "compiler_builtins 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-std-workspace-core 1.0.0", ] @@ -829,7 +914,7 @@ name = "fs2" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -883,6 +968,14 @@ dependencies = [ "termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "generic-array" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "getopts" version = "0.2.17" @@ -890,32 +983,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "git2" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", - "libgit2-sys 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", + "libgit2-sys 0.7.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.39 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.40 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "git2-curl" -version = "0.8.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "curl 0.4.19 (registry+https://github.com/rust-lang/crates.io-index)", - "git2 0.7.5 (registry+https://github.com/rust-lang/crates.io-index)", + "git2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "glob" -version = "0.2.11" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -945,8 +1038,24 @@ dependencies = [ "pest_derive 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "handlebars" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "pest 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pest_derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", + "walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -984,6 +1093,16 @@ dependencies = [ "syn 0.15.22 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "http" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "humantime" version = "1.2.0" @@ -1009,10 +1128,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "ignore" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "crossbeam-channel 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "globset 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1026,7 +1145,7 @@ dependencies = [ [[package]] name = "im-rc" -version = "12.2.0" +version = "12.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1048,6 +1167,15 @@ dependencies = [ "xz2 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "iovec" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "is-match" version = "0.1.0" @@ -1061,6 +1189,14 @@ dependencies = [ "either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "itertools" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "itoa" version = "0.4.3" @@ -1068,20 +1204,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jemalloc-sys" -version = "0.1.8" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", "fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "jobserver" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1093,12 +1229,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" -version = "8.0.1" +version = "10.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1112,21 +1248,6 @@ dependencies = [ "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "languageserver-types" -version = "0.51.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "num-derive 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "url_serde 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "lazy_static" version = "0.2.11" @@ -1144,7 +1265,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libc" -version = "0.2.45" +version = "0.2.51" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rustc-std-workspace-core 1.0.0", @@ -1152,15 +1273,15 @@ dependencies = [ [[package]] name = "libgit2-sys" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", "curl-sys 0.4.15 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "libssh2-sys 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "libz-sys 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.39 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.40 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1169,8 +1290,8 @@ name = "libnghttp2-sys" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1178,10 +1299,10 @@ name = "libssh2-sys" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "libz-sys 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.39 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.40 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "vcpkg 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1191,8 +1312,8 @@ name = "libz-sys" version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "vcpkg 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1212,26 +1333,43 @@ dependencies = [ [[package]] name = "log" -version = "0.3.9" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "log" -version = "0.4.6" +name = "log_settings" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "log_settings" +name = "lsp-codec" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bytes 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "lsp-types" +version = "0.55.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "num-derive 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "url_serde 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1239,9 +1377,9 @@ name = "lzma-sys" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", "filetime 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1267,11 +1405,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "phf 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)", "phf_codegen 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", "string_cache 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", - "string_cache_codegen 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "string_cache_codegen 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "tendril 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1299,7 +1437,7 @@ dependencies = [ "open 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "pulldown-cmark 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", "shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1308,13 +1446,41 @@ dependencies = [ "toml-query 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "mdbook" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ammonia 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", + "elasticlunr-rs 2.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)", + "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "handlebars 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "open 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pulldown-cmark 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", + "shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tempfile 3.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", + "toml-query 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "memchr" version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "version_check 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1323,7 +1489,7 @@ name = "memmap" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1334,7 +1500,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "minifier" -version = "0.0.20" +version = "0.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "macro-utils 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1345,8 +1511,8 @@ name = "miniz-sys" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1362,20 +1528,70 @@ name = "miniz_oxide_c_api" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", "crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "miniz_oxide 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] -name = "miow" -version = "0.3.3" +name = "mio" +version = "0.6.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "socket2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", -] + "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fuchsia-zircon-sys 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazycell 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "mio-named-pipes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", + "miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "mio-uds" +version = "0.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "miow" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "miow" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "socket2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "miri" @@ -1384,13 +1600,26 @@ dependencies = [ "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)", "cargo_metadata 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "colored 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)", - "compiletest_rs 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)", + "compiletest_rs 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)", "directories 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-workspace-hack 1.0.0", + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "shell-escape 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", "vergen 3.0.4 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "net2" +version = "0.2.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "new_debug_unreachable" version = "1.0.1" @@ -1433,7 +1662,7 @@ name = "num_cpus" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1446,22 +1675,22 @@ name = "opener" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "failure_derive 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "openssl" -version = "0.10.15" +version = "0.10.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", - "openssl-sys 0.9.39 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", + "openssl-sys 0.9.40 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1474,16 +1703,16 @@ name = "openssl-src" version = "111.1.0+1.1.1a" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "openssl-sys" -version = "0.9.39" +version = "0.9.40" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "openssl-src 111.1.0+1.1.1a (registry+https://github.com/rust-lang/crates.io-index)", "pkg-config 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "vcpkg 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1519,9 +1748,9 @@ dependencies = [ name = "panic_abort" version = "0.0.0" dependencies = [ - "compiler_builtins 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1529,28 +1758,29 @@ name = "panic_unwind" version = "0.0.0" dependencies = [ "alloc 0.0.0", - "compiler_builtins 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "unwind 0.0.0", ] [[package]] name = "parking_lot" -version = "0.6.4" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "lock_api 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "parking_lot_core" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1565,6 +1795,14 @@ name = "pest" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "pest" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ucd-trie 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "pest_derive" version = "1.0.8" @@ -1575,6 +1813,37 @@ dependencies = [ "syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "pest_derive" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "pest 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pest_generator 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "pest_generator" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "pest 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "pest_meta 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.24 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.22 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "pest_meta" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "maplit 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "pest 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sha-1 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "petgraph" version = "0.4.13" @@ -1625,10 +1894,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "polonius-engine" -version = "0.5.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "datafrog 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "datafrog 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1657,14 +1926,6 @@ dependencies = [ "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "proc-macro2" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "proc-macro2" version = "0.4.24" @@ -1681,14 +1942,14 @@ version = "0.0.0" name = "profiler_builtins" version = "0.0.0" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "compiler_builtins 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", ] [[package]] name = "proptest" -version = "0.8.7" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bit-set 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1697,7 +1958,9 @@ dependencies = [ "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_chacha 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand_xorshift 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "regex-syntax 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", "rusty-fork 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "tempfile 3.0.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1736,14 +1999,6 @@ name = "quote" version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "quote" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "proc-macro2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "quote" version = "0.6.10" @@ -1754,7 +2009,7 @@ dependencies = [ [[package]] name = "racer" -version = "2.1.16" +version = "2.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1764,8 +2019,8 @@ dependencies = [ "humantime 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-syntax 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-span 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-syntax 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1774,19 +2029,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", -] - -[[package]] -name = "rand" -version = "0.5.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", - "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", - "rand_core 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1797,7 +2040,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)", "fuchsia-zircon 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "rand_chacha 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1817,14 +2060,6 @@ dependencies = [ "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "rand_core" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "rand_core" version = "0.3.0" @@ -1879,7 +2114,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1897,6 +2132,17 @@ dependencies = [ "redox_syscall 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "redox_users" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "argon2rs 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "regex" version = "0.2.11" @@ -1955,46 +2201,56 @@ dependencies = [ [[package]] name = "rls" -version = "1.31.6" +version = "1.35.0" dependencies = [ - "cargo 0.34.0", - "cargo_metadata 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cargo 0.36.0", + "cargo_metadata 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "clippy_lints 0.0.212", - "crossbeam-channel 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 8.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "languageserver-types 0.51.1 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "heck 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "home 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 10.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "lsp-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lsp-types 0.55.4 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "ordslice 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "racer 2.1.16 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "racer 2.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-analysis 0.16.10 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-analysis 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)", "rls-blacklist 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-data 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-rustc 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-data 0.18.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-rustc 0.6.0", + "rls-span 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "rls-vfs 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-workspace-hack 1.0.0", - "rustc_tools_util 0.1.0", - "rustfmt-nightly 1.0.1", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_tools_util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustfmt-nightly 1.2.0", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_ignored 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", - "toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tempfile 3.0.5 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-process 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "walkdir 2.2.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rls-analysis" -version = "0.16.10" +version = "0.16.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "derive-new 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2002,8 +2258,8 @@ dependencies = [ "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", "json 0.11.13 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-data 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-data 0.18.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-span 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2014,27 +2270,26 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "rls-data" -version = "0.18.1" +version = "0.18.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-span 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rls-rustc" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" +version = "0.6.0" [[package]] name = "rls-span" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2044,7 +2299,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-span 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2053,6 +2308,7 @@ version = "0.1.0" dependencies = [ "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)", "mdbook 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "mdbook 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2063,23 +2319,25 @@ dependencies = [ "backtrace 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "chalk-engine 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "chalk-engine 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "flate2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "fmt_macros 0.0.0", "graphviz 0.0.0", - "jobserver 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", + "jobserver 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", - "polonius-engine 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "polonius-engine 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_apfloat 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", "rustc_fs_util 0.0.0", + "rustc_macros 0.1.0", "rustc_target 0.0.0", - "scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serialize 0.0.0", "smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", "syntax 0.0.0", @@ -2089,20 +2347,20 @@ dependencies = [ [[package]] name = "rustc-ap-arena" -version = "306.0.0" +version = "407.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "rustc-ap-rustc_data_structures 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_data_structures 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rustc-ap-graphviz" -version = "306.0.0" +version = "407.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "rustc-ap-rustc_cratesio_shim" -version = "306.0.0" +version = "407.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2112,53 +2370,55 @@ dependencies = [ [[package]] name = "rustc-ap-rustc_data_structures" -version = "306.0.0" +version = "407.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "ena 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jobserver 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-graphviz 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-rustc_cratesio_shim 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-serialize 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-graphviz 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_cratesio_shim 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-serialize 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", "stable_deref_trait 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rustc-ap-rustc_errors" -version = "306.0.0" +version = "407.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-rustc_cratesio_shim 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-rustc_data_structures 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-serialize 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-syntax_pos 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_cratesio_shim 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_data_structures 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-serialize 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-syntax_pos 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rustc-ap-rustc_target" -version = "306.0.0" +version = "407.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-rustc_cratesio_shim 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-rustc_data_structures 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-serialize 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_cratesio_shim 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_data_structures 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-serialize 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rustc-ap-serialize" -version = "306.0.0" +version = "407.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2166,30 +2426,30 @@ dependencies = [ [[package]] name = "rustc-ap-syntax" -version = "306.0.0" +version = "407.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-rustc_data_structures 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-rustc_errors 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-rustc_target 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-serialize 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-syntax_pos 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_data_structures 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_errors 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_target 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-serialize 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-syntax_pos 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rustc-ap-syntax_pos" -version = "306.0.0" +version = "407.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-arena 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-rustc_data_structures 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-serialize 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-arena 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_data_structures 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-serialize 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2198,7 +2458,7 @@ name = "rustc-demangle" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "compiler_builtins 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-std-workspace-core 1.0.0", ] @@ -2214,6 +2474,7 @@ dependencies = [ name = "rustc-main" version = "0.0.0" dependencies = [ + "jemalloc-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_codegen_ssa 0.0.0", "rustc_driver 0.0.0", "rustc_target 0.0.0", @@ -2221,23 +2482,23 @@ dependencies = [ [[package]] name = "rustc-rayon" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ + "crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rustc-rayon-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2256,8 +2517,11 @@ dependencies = [ name = "rustc-workspace-hack" version = "1.0.0" dependencies = [ - "parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", "syn 0.15.22 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2293,8 +2557,8 @@ version = "0.0.0" dependencies = [ "alloc 0.0.0", "build_helper 0.1.0", - "cmake 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)", - "compiler_builtins 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", ] @@ -2316,7 +2580,7 @@ dependencies = [ name = "rustc_codegen_llvm" version = "0.0.0" dependencies = [ - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-demangle 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2328,12 +2592,13 @@ name = "rustc_codegen_ssa" version = "0.0.0" dependencies = [ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "jobserver 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", + "jobserver 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc-demangle 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_allocator 0.0.0", @@ -2358,7 +2623,6 @@ dependencies = [ "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc_data_structures 0.0.0", - "rustc_incremental 0.0.0", "rustc_metadata 0.0.0", "rustc_mir 0.0.0", "rustc_target 0.0.0", @@ -2380,13 +2644,15 @@ name = "rustc_data_structures" version = "0.0.0" dependencies = [ "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "ena 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ena 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)", "graphviz 0.0.0", + "jobserver 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_cratesio_shim 0.0.0", "serialize 0.0.0", "smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2400,16 +2666,16 @@ dependencies = [ "arena 0.0.0", "env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)", "graphviz 0.0.0", - "jemalloc-sys 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", - "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-rayon 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_allocator 0.0.0", "rustc_borrowck 0.0.0", "rustc_codegen_utils 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", "rustc_incremental 0.0.0", + "rustc_interface 0.0.0", "rustc_lint 0.0.0", "rustc_metadata 0.0.0", "rustc_mir 0.0.0", @@ -2421,7 +2687,7 @@ dependencies = [ "rustc_target 0.0.0", "rustc_traits 0.0.0", "rustc_typeck 0.0.0", - "scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serialize 0.0.0", "smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", "syntax 0.0.0", @@ -2453,7 +2719,7 @@ version = "0.0.0" dependencies = [ "graphviz 0.0.0", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc_data_structures 0.0.0", "rustc_fs_util 0.0.0", @@ -2462,6 +2728,36 @@ dependencies = [ "syntax_pos 0.0.0", ] +[[package]] +name = "rustc_interface" +version = "0.0.0" +dependencies = [ + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc 0.0.0", + "rustc-rayon 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_allocator 0.0.0", + "rustc_borrowck 0.0.0", + "rustc_codegen_utils 0.0.0", + "rustc_data_structures 0.0.0", + "rustc_errors 0.0.0", + "rustc_incremental 0.0.0", + "rustc_lint 0.0.0", + "rustc_metadata 0.0.0", + "rustc_mir 0.0.0", + "rustc_passes 0.0.0", + "rustc_plugin 0.0.0", + "rustc_privacy 0.0.0", + "rustc_resolve 0.0.0", + "rustc_traits 0.0.0", + "rustc_typeck 0.0.0", + "scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "serialize 0.0.0", + "smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", + "syntax 0.0.0", + "syntax_ext 0.0.0", + "syntax_pos 0.0.0", +] + [[package]] name = "rustc_lint" version = "0.0.0" @@ -2479,7 +2775,7 @@ name = "rustc_llvm" version = "0.0.0" dependencies = [ "build_helper 0.1.0", - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2488,11 +2784,22 @@ version = "0.0.0" dependencies = [ "alloc 0.0.0", "build_helper 0.1.0", - "cmake 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)", - "compiler_builtins 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", ] +[[package]] +name = "rustc_macros" +version = "0.1.0" +dependencies = [ + "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.24 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 0.15.22 (registry+https://github.com/rust-lang/crates.io-index)", + "synstructure 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "rustc_metadata" version = "0.0.0" @@ -2516,13 +2823,12 @@ name = "rustc_mir" version = "0.0.0" dependencies = [ "arena 0.0.0", - "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)", "either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "graphviz 0.0.0", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "log_settings 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "polonius-engine 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", + "polonius-engine 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc_apfloat 0.0.0", "rustc_data_structures 0.0.0", @@ -2540,8 +2846,8 @@ version = "0.0.0" dependencies = [ "alloc 0.0.0", "build_helper 0.1.0", - "cmake 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)", - "compiler_builtins 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", ] @@ -2555,13 +2861,10 @@ dependencies = [ "rustc_errors 0.0.0", "rustc_mir 0.0.0", "syntax 0.0.0", + "syntax_ext 0.0.0", "syntax_pos 0.0.0", ] -[[package]] -name = "rustc_platform_intrinsics" -version = "0.0.0" - [[package]] name = "rustc_plugin" version = "0.0.0" @@ -2577,6 +2880,7 @@ dependencies = [ name = "rustc_privacy" version = "0.0.0" dependencies = [ + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc_data_structures 0.0.0", "rustc_typeck 0.0.0", @@ -2604,8 +2908,8 @@ name = "rustc_save_analysis" version = "0.0.0" dependencies = [ "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-data 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-data 0.18.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rls-span 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_codegen_utils 0.0.0", @@ -2629,14 +2933,19 @@ dependencies = [ [[package]] name = "rustc_tools_util" -version = "0.1.0" +version = "0.1.1" + +[[package]] +name = "rustc_tools_util" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "rustc_traits" version = "0.0.0" dependencies = [ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", - "chalk-engine 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "chalk-engine 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "graphviz 0.0.0", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "rustc 0.0.0", @@ -2653,8 +2962,8 @@ version = "0.0.0" dependencies = [ "alloc 0.0.0", "build_helper 0.1.0", - "cmake 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)", - "compiler_builtins 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cmake 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", ] @@ -2667,7 +2976,6 @@ dependencies = [ "rustc 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", - "rustc_platform_intrinsics 0.0.0", "rustc_target 0.0.0", "smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", "syntax 0.0.0", @@ -2686,8 +2994,8 @@ dependencies = [ name = "rustdoc" version = "0.0.0" dependencies = [ - "minifier 0.0.20 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", + "minifier 0.0.29 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "pulldown-cmark 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "tempfile 3.0.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2705,43 +3013,45 @@ dependencies = [ [[package]] name = "rustfix" -version = "0.4.2" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "rustfmt-nightly" -version = "1.0.1" +version = "1.2.0" dependencies = [ - "assert_cli 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "bytecount 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "cargo_metadata 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "bytecount 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", + "cargo_metadata 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", "derive-new 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", "diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "dirs 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", "getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-rustc_target 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-syntax 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-ap-syntax_pos 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-rustc_target 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-syntax 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-ap-syntax_pos 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-workspace-hack 1.0.0", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", "term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", "unicode-segmentation 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode_categories 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2779,7 +3089,12 @@ dependencies = [ [[package]] name = "scoped-tls" -version = "0.1.2" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "scoped_threadpool" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -2793,7 +3108,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2803,8 +3118,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "serde" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", +] [[package]] name = "serde_derive" @@ -2821,7 +3139,7 @@ name = "serde_ignored" version = "0.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2831,7 +3149,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "ryu 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2841,6 +3159,17 @@ dependencies = [ "smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "sha-1" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "block-buffer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", + "fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "shell-escape" version = "0.1.4" @@ -2851,11 +3180,25 @@ name = "shlex" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "signal-hook" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "arc-swap 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "siphasher" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "slab" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "smallvec" version = "0.6.7" @@ -2870,7 +3213,7 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2885,13 +3228,13 @@ name = "std" version = "0.0.0" dependencies = [ "alloc 0.0.0", - "build_helper 0.1.0", - "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", - "compiler_builtins 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "backtrace-sys 0.1.27 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", - "dlmalloc 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "fortanix-sgx-abi 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "dlmalloc 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "fortanix-sgx-abi 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "panic_abort 0.0.0", "panic_unwind 0.0.0", "profiler_builtins 0.0.0", @@ -2913,20 +3256,20 @@ dependencies = [ "new_debug_unreachable 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "phf_shared 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)", "precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", - "string_cache_codegen 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", + "string_cache_codegen 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "string_cache_codegen" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "phf_generator 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)", "phf_shared 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)", - "proc-macro2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", - "quote 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "proc-macro2 0.4.24 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)", "string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3004,7 +3347,7 @@ dependencies = [ "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", "rustc_target 0.0.0", - "scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serialize 0.0.0", "smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", "syntax_pos 0.0.0", @@ -3031,7 +3374,7 @@ dependencies = [ "arena 0.0.0", "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_data_structures 0.0.0", - "scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "serialize 0.0.0", "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3042,7 +3385,7 @@ version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "filetime 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", "xattr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3053,7 +3396,7 @@ version = "3.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", "remove_dir_all 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3074,6 +3417,15 @@ dependencies = [ name = "term" version = "0.0.0" +[[package]] +name = "term" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "term" version = "0.5.1" @@ -3096,7 +3448,7 @@ name = "termion" version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", "redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3110,6 +3462,16 @@ dependencies = [ "term 0.0.0", ] +[[package]] +name = "tester" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", + "term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "textwrap" version = "0.10.0" @@ -3130,7 +3492,7 @@ dependencies = [ name = "tidy" version = "0.1.0" dependencies = [ - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3140,17 +3502,213 @@ name = "time" version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", "redox_syscall 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-current-thread 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-fs 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-timer 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-udp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-codec" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-current-thread" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-executor" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-utils 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-fs" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-threadpool 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-io" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-process" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", + "mio-named-pipes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-signal 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-reactor" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-utils 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-signal" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", + "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", + "signal-hook 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-tcp" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-threadpool" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-deque 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-utils 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-timer" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "crossbeam-utils 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-executor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-udp" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tokio-uds" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "bytes 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", + "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", + "iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", + "mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", + "mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-io 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-reactor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "toml" version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "toml" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3165,11 +3723,28 @@ dependencies = [ "toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "toml-query" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", + "is-match 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "typenum" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "ucd-trie" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "ucd-util" version = "0.1.3" @@ -3208,6 +3783,11 @@ name = "unicode-xid" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "unicode_categories" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "unreachable" version = "1.0.0" @@ -3228,9 +3808,10 @@ dependencies = [ name = "unwind" version = "0.0.0" dependencies = [ - "compiler_builtins 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)", + "compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "core 0.0.0", - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3248,7 +3829,7 @@ name = "url_serde" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -3279,7 +3860,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)", - "failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", + "failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3297,7 +3878,7 @@ name = "wait-timeout" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3356,12 +3937,21 @@ dependencies = [ "winapi-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ws2_32-sys" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "xattr" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3382,72 +3972,83 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum aho-corasick 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)" = "1e9a933f4e58658d7b12defcf96dc5c720f20832deebe3e0a19efd3b6aaeeb9e" "checksum ammonia 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fd4c682378117e4186a492b2252b9537990e1617f44aed9788b9a1149de45477" "checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b" +"checksum arc-swap 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1025aeae2b664ca0ea726a89d574fe8f4e77dd712d443236ad1de00379450cf6" +"checksum argon2rs 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "3f67b0b6a86dae6e67ff4ca2b6201396074996379fba2b92ff649126f37cb392" +"checksum arrayref 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0d382e583f07208808f6b1249e60848879ba3543f57c32277bf52d69c2f0f0ee" "checksum arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)" = "a1e964f9e24d588183fcb43503abda40d288c8657dfc27311516ce2f05675aef" -"checksum assert_cli 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "98589b0e465a6c510d95fceebd365bb79bedece7f6e18a480897f2015f85ec51" "checksum atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9a7d5b8723950951411ee34d271d99dddcc2035a16ab25310ea2c8cfd4369652" "checksum backtrace 0.3.11 (registry+https://github.com/rust-lang/crates.io-index)" = "18b65ea1161bfb2dd6da6fade5edd4dbd08fba85012123dd333d2fd1b90b2782" -"checksum backtrace-sys 0.1.24 (registry+https://github.com/rust-lang/crates.io-index)" = "c66d56ac8dabd07f6aacdaf633f4b8262f5b3601a810a0dcddffd5c22c69daa0" +"checksum backtrace-sys 0.1.27 (registry+https://github.com/rust-lang/crates.io-index)" = "6ea90dd7b012b3d1a2cb6bec16670a0db2c95d4e931e84f4047e0460c1b34c8d" "checksum bit-set 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6f1efcc46c18245a69c38fcc5cc650f16d3a59d034f3106e9ed63748f695730a" "checksum bit-vec 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4440d5cb623bb7390ae27fec0bb6c61111969860f8e3ae198bfa0663645e67cf" "checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5" "checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12" +"checksum blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" +"checksum block-buffer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a076c298b9ecdb530ed9d967e74a6027d6a7478924520acddcddc24c1c8ab3ab" "checksum bufstream 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" "checksum build_const 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "39092a32794787acd8525ee150305ff051b0aa6cc2abaf193924f5ab05425f39" -"checksum bytecount 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b92204551573580e078dc80017f36a213eb77a0450e4ddd8cfa0f3f2d1f0178f" +"checksum byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "560c32574a12a89ecd91f5e742165893f86e3ab98d21f8ea548658eb9eef5f40" +"checksum bytecount 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "be0fdd54b507df8f22012890aadd099979befdba27713c767993f8380112ca7c" "checksum byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "94f88df23a25417badc922ab0f5716cc1330e87f71ddd9203b3a3ccd9cedf75d" +"checksum bytes 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)" = "40ade3d27603c2cb345eb0912aec461a6dec7e06a4ae48589904e808335c7afa" "checksum bytesize 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "716960a18f978640f25101b5cbf1c6f6b0d3192fab36a2d98ca96f0ecbe41010" "checksum cargo_metadata 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7d8dfe3adeb30f7938e6c1dd5327f29235d8ada3e898aeb08c343005ec2915a2" -"checksum cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "f159dfd43363c4d08055a07703eb7a3406b0dac4d0584d96965a3262db3c9d16" +"checksum cargo_metadata 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "585784cac9b05c93a53b17a0b24a5cdd1dfdda5256f030e089b549d2390cc720" +"checksum cc 1.0.35 (registry+https://github.com/rust-lang/crates.io-index)" = "5e5f3fee5eeb60324c2781f1e41286bdee933850fff9b3c672587fed5ec58c83" "checksum cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "082bb9b28e00d3c9d39cc03e64ce4cea0f1bb9b3fde493f0cbc008472d22bdf4" -"checksum chalk-engine 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6749eb72e7d4355d944a99f15fbaea701b978c18c5e184a025fcde942b0c9779" +"checksum chalk-engine 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17ec698a6f053a23bfbe646d9f2fde4b02abc19125595270a99e6f44ae0bdd1a" "checksum chalk-macros 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "295635afd6853aa9f20baeb7f0204862440c0fe994c5a253d5f479dac41d047e" "checksum chrono 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "45912881121cb26fad7c38c17ba7daa18764771836b34fab7d3fbd93ed633878" "checksum clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b957d88f4b6a63b9d70d5f454ac8011819c6efa7727858f458ab71c756ce2d3e" "checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" -"checksum cmake 0.1.33 (registry+https://github.com/rust-lang/crates.io-index)" = "704fbf3bb5149daab0afb255dbea24a1f08d2f4099cedb9baab6d470d4c5eefb" +"checksum cmake 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)" = "96210eec534fc3fbfc0452a63769424eaa80205fda6cea98e5b61cb3d97bcec8" "checksum colored 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b0aa3473e85a3161b59845d6096b289bb577874cafeaf75ea1b1beaa6572c7fc" "checksum commoncrypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d056a8586ba25a1e4d61cb090900e495952c7886786fc55f909ab2f819b69007" "checksum commoncrypto-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1fed34f46747aa73dfaa578069fd8279d2818ade2b55f38f22a9401c7f4083e2" -"checksum compiler_builtins 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8ad611263b9f31bdb66e66227d3b781600fd1e68d5deee29b23f5e2ac9cb4892" -"checksum compiletest_rs 0.3.17 (registry+https://github.com/rust-lang/crates.io-index)" = "89747fe073b7838343bd2c2445e7a7c2e0d415598f8925f0fa9205b9cdfc48cb" +"checksum compiler_builtins 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4ada53ac629568219809178f988ca2aac9889e9a847379588c097d30ce185145" +"checksum compiletest_rs 0.3.21 (registry+https://github.com/rust-lang/crates.io-index)" = "17a60483b7d4d1534db7b77458a03d5d8a93a707432a04978dfe87ea43bb61b1" +"checksum constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8ff012e225ce166d4422e0e78419d901719760f62ae2b7969ca6b564d1b54a9e" "checksum core-foundation 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4e2640d6d0bf22e82bed1b73c6aef8d5dd31e5abe6666c57e6d45e2649f4f887" "checksum core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b" "checksum crc 1.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d663548de7f5cca343f1e0a48d14dcfb0e9eb4e079ec58883b7251539fa10aeb" "checksum crc32fast 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e91d5240c6975ef33aeb5f148f35275c25eda8e8a5f95abe421978b05b8bf192" -"checksum crossbeam-channel 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7b85741761b7f160bc5e7e0c14986ef685b7f8bf9b7ad081c60c604bb4649827" +"checksum crossbeam-channel 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5b2a9ea8f77c7f9efd317a8a5645f515d903a2d86ee14d2337a5facd1bd52c12" "checksum crossbeam-deque 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f739f8c5363aca78cfb059edf753d8f0d36908c348f3d8d1503f03d8b75d9cf3" +"checksum crossbeam-deque 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "05e44b8cf3e1a625844d1750e1f7820da46044ff6d28f4d43e455ba3e5bb2c13" "checksum crossbeam-epoch 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "927121f5407de9956180ff5e936fe3cf4324279280001cd56b669d28ee7e9150" -"checksum crossbeam-epoch 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c90f1474584f38e270b5b613e898c8c328aa4f3dea85e0a27ac2e642f009416" +"checksum crossbeam-epoch 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f10a4f8f409aaac4b16a5474fb233624238fcdeefb9ba50d5ea059aab63ba31c" "checksum crossbeam-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9" -"checksum crossbeam-utils 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "677d453a17e8bd2b913fa38e8b9cf04bcdbb5be790aa294f2389661d72036015" "checksum crossbeam-utils 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e07fc155212827475223f0bcfae57e945e694fc90950ddf3f6695bbfd5555c72" "checksum crypto-hash 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "09de9ee0fc255ace04c7fa0763c9395a945c37c8292bb554f8d48361d1dcf1b4" "checksum curl 0.4.19 (registry+https://github.com/rust-lang/crates.io-index)" = "c7c9d851c825e0c033979d4516c9173bc19a78a96eb4d6ae51d4045440eafa16" "checksum curl-sys 0.4.15 (registry+https://github.com/rust-lang/crates.io-index)" = "721c204978be2143fab0a84b708c49d79d1f6100b8785610f456043a90708870" -"checksum datafrog 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "16d724bf4ffe77cdceeecd461009b5f8d9e23c5d645d68bedb4586bf43e7e142" +"checksum datafrog 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a0afaad2b26fa326569eb264b1363e8ae3357618c43982b3f285f0774ce76b69" "checksum derive-new 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "6ca414e896ae072546f4d789f452daaecf60ddee4c9df5dc6d5936d769e3d87c" "checksum derive_more 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3f57d78cf3bd45270dad4e70c21ec77a960b36c7a841ff9db76aaa775a8fb871" "checksum diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "3c2b69f912779fbb121ceb775d74d51e915af17aaebc38d28a592843a2dd0a3a" "checksum difference 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198" +"checksum digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "03b072242a8cbaf9c145665af9d250c59af3b958f83ed6824e13533cf76d5b90" "checksum directories 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "72d337a64190607d4fcca2cb78982c5dd57f4916e19696b48a575fa746b6cb0f" -"checksum dlmalloc 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4c46c65de42b063004b31c67a98abe071089b289ff0919c660ed7ff4f59317f8" +"checksum dirs 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "88972de891f6118092b643d85a0b28e0678e0f948d7f879aa32f2d5aafe97d2a" +"checksum dlmalloc 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "f283302e035e61c23f2b86b3093e8c6273a4c3125742d6087e96ade001ca5e63" "checksum either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3be565ca5c557d7f59e7cfcf1844f9e3033650c929c6566f511e8005f205c1d0" "checksum elasticlunr-rs 2.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a99a310cd1f9770e7bf8e48810c7bcbb0e078c8fb23a8c7bcf0da4c2bf61a455" "checksum ena 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f56c93cc076508c549d9bb747f79aa9b4eb098be7b8cad8830c3137ef52d1e00" +"checksum ena 0.13.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3dc01d68e08ca384955a3aeba9217102ca1aa85b6e168639bf27739f1d749d87" "checksum env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)" = "15b0a4d2e39f8420210be8b27eeda28029729e2fd4291019455016c348240c38" "checksum env_logger 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "afb070faf94c85d17d50ca44f6ad076bce18ae92f0037d350947240a36e9d42e" -"checksum environment 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1f4b14e20978669064c33b4c1e0fb4083412e40fe56cbea2eae80fd7591503ee" "checksum error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3" "checksum error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "07e791d3be96241c77c43846b665ef1384606da2cd2a48730abe606a12906e02" -"checksum failure 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6dd377bcc1b1b7ce911967e3ec24fa19c3224394ec05b54aa7b083d498341ac7" -"checksum failure_derive 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "64c2d913fe8ed3b6c6518eedf4538255b989945c14c2a7d5cbff62a5e2120596" +"checksum failure 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "795bd83d3abeb9220f257e597aa0080a508b27533824adf336529648f6abf7e2" +"checksum failure_derive 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "ea1063915fd7ef4309e222a5a07cf9c319fb9c7836b1f89b85458672dbb127e1" +"checksum fake-simd 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" "checksum filetime 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "a2df5c1a8c4be27e7707789dc42ae65976e60b394afd293d1419ab915833e646" "checksum fixedbitset 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "86d4de0081402f5e88cdac65c8dcdcc73118c1a7a465e2a05f0da05843a8ea33" "checksum flate2 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2291c165c8e703ee54ef3055ad6188e3d51108e2ded18e9f2476e774fc5ad3d4" "checksum fnv 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" "checksum foreign-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" "checksum foreign-types-shared 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" -"checksum fortanix-sgx-abi 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "26105e20b4c3f7a319db1376b54ac9a46e5761e949405553375095d05a0cee4d" +"checksum fortanix-sgx-abi 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3f8cbee5e872cf7db61a999a041f9bc4706ca7bf7df4cb914f53fabb1c1bc550" "checksum fs2 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" "checksum fs_extra 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5f2a4a2034423744d2cc7ca2068453168dcdb82c438419e639a26bd87839c674" "checksum fst 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d94485a00b1827b861dd9d1a2cc9764f9044d4c535514c0760a5a2012ef3399f" @@ -3456,42 +4057,47 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum futf 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7c9c1ce3fa9336301af935ab852c437817d14cd33690446569392e65170aac3b" "checksum futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "1a70b146671de62ec8c8ed572219ca5d594d9b06c0b364d5e67b722fc559b48c" "checksum fwdansi 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "34dd4c507af68d37ffef962063dfa1944ce0dd4d5b82043dbab1dabe088610c3" +"checksum generic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ef25c5683767570c2bbd7deba372926a55eaae9982d7726ee2a1050239d45b9d" "checksum getopts 0.2.17 (registry+https://github.com/rust-lang/crates.io-index)" = "b900c08c1939860ce8b54dc6a89e26e00c04c380fd0e09796799bd7f12861e05" -"checksum git2 0.7.5 (registry+https://github.com/rust-lang/crates.io-index)" = "591f8be1674b421644b6c030969520bc3fa12114d2eb467471982ed3e9584e71" -"checksum git2-curl 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0173e317f8ba21f3fff0f71549fead5e42e67961dbd402bf69f42775f3cc78b4" -"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" +"checksum git2 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c7339329bfa14a00223244311560d11f8f489b453fb90092af97f267a6090ab0" +"checksum git2-curl 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d58551e903ed7e2d6fe3a2f3c7efa3a784ec29b19d0fbb035aaf0497c183fbdd" +"checksum glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" "checksum globset 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "4743617a7464bbda3c8aec8558ff2f9429047e025771037df561d383337ff865" "checksum handlebars 0.32.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d89ec99d1594f285d4590fc32bac5f75cdab383f1123d504d27862c644a807dd" +"checksum handlebars 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d82e5750d8027a97b9640e3fefa66bbaf852a35228e1c90790efd13c4b09c166" "checksum heck 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea04fa3ead4e05e51a7c806fc07271fdbde4e246a6c6d1efd52e72230b771b82" "checksum hex 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "805026a5d0141ffc30abb3be3173848ad46a1b1664fe632428479619a3644d77" "checksum home 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "80dff82fb58cfbbc617fb9a9184b010be0529201553cda50ad04372bc2333aff" "checksum html5ever 0.22.5 (registry+https://github.com/rust-lang/crates.io-index)" = "c213fa6a618dc1da552f54f85cba74b05d8e883c92ec4e89067736938084c26e" +"checksum http 0.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "fe67e3678f2827030e89cc4b9e7ecd16d52f132c0b940ab5005f88e821500f6a" "checksum humantime 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3ca7e5f2e110db35f93b837c81797f3714500b81d517bf20c431b16d3ca4f114" "checksum idna 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "38f09e0f0b1fb55fdee1f17470ad800da77af5186a1a76c026b679358b7e844e" "checksum if_chain 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4bac95d9aa0624e7b78187d6fb8ab012b41d9f6f54b1bcb61e61c4845f8357ec" -"checksum ignore 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "36ecfc5ad80f0b1226df948c562e2cddd446096be3f644c95106400eae8a5e01" -"checksum im-rc 12.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d4591152fd573cf453a890b5f9fdc5c328a751a0785539316739d5f85e5c468c" +"checksum ignore 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ad03ca67dc12474ecd91fdb94d758cbd20cb4e7a78ebe831df26a9b7511e1162" +"checksum im-rc 12.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9460397452f537fd51808056ff209f4c4c4c9d20d42ae952f517708726284972" +"checksum iovec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dbe6e417e7d0975db6512b90796e8ce223145ac4e33c377e4a42882a0e88bb08" "checksum is-match 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7e5b386aef33a1c677be65237cb9d32c3f3ef56bd035949710c4bb13083eb053" "checksum itertools 0.7.8 (registry+https://github.com/rust-lang/crates.io-index)" = "f58856976b776fedd95533137617a02fb25719f40e7d9b01c7043cd65474f450" +"checksum itertools 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b8467d9c1cebe26feb08c640139247fac215782d35371ade9a2136ed6085358" "checksum itoa 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1306f3464951f30e30d12373d31c79fbd52d236e5e896fd92f96ec7babbbe60b" -"checksum jemalloc-sys 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "bfc62c8e50e381768ce8ee0428ee53741929f7ebd73e4d83f669bcf7693e00ae" -"checksum jobserver 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "dd80e58f77e0cdea53ba96acc5e04479e5ffc5d869626a6beafe50fed867eace" +"checksum jemalloc-sys 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7bef0d4ce37578dfd80b466e3d8324bd9de788e249f1accebb0c472ea4b52bdc" +"checksum jobserver 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "b3d51e24009d966c8285d524dbaf6d60926636b2a89caee9ce0bd612494ddc16" "checksum json 0.11.13 (registry+https://github.com/rust-lang/crates.io-index)" = "9ad0485404155f45cce53a40d4b2d6ac356418300daed05273d9e26f91c390be" -"checksum jsonrpc-core 8.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ddf83704f4e79979a424d1082dd2c1e52683058056c9280efa19ac5f6bc9033c" +"checksum jsonrpc-core 10.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7a5152c3fda235dfd68341b3edf4121bc4428642c93acbd6de88c26bf95fc5d7" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" -"checksum languageserver-types 0.51.1 (registry+https://github.com/rust-lang/crates.io-index)" = "68de833188ada4e175d04a028f03f244f6370eedbcc75a05604d47d925933f69" "checksum lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73" "checksum lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a374c89b9db55895453a74c1e38861d9deec0b01b405a82516e9d5de4820dea1" "checksum lazycell 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b294d6fa9ee409a054354afc4352b0b9ef7ca222c69b8812cbea9e7d2bf3783f" -"checksum libc 0.2.45 (registry+https://github.com/rust-lang/crates.io-index)" = "2d2857ec59fadc0773853c664d2d18e7198e83883e7060b63c924cb077bd5c74" -"checksum libgit2-sys 0.7.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4916b5addc78ec36cc309acfcdf0b9f9d97ab7b84083118b248709c5b7029356" +"checksum libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)" = "bedcc7a809076656486ffe045abeeac163da1b558e963a31e29fbfbeba916917" +"checksum libgit2-sys 0.7.11 (registry+https://github.com/rust-lang/crates.io-index)" = "48441cb35dc255da8ae72825689a95368bf510659ae1ad55dc4aa88cb1789bf1" "checksum libnghttp2-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d75d7966bda4730b722d1eab8e668df445368a24394bae9fc1e8dc0ab3dbe4f4" "checksum libssh2-sys 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "126a1f4078368b163bfdee65fbab072af08a1b374a5551b21e87ade27b1fbf9d" "checksum libz-sys 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "2eb5e43362e38e2bca2fd5f5134c4d4564a23a5c28e9b95411652021a8675ebe" "checksum lock_api 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "949826a5ccf18c1b3a7c3d57692778d21768b79e46eb9dd07bfc4c2160036c54" -"checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" "checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6" "checksum log_settings 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "19af41f0565d7c19b2058153ad0b42d4d5ce89ec4dbf06ed6741114a8b63e7cd" +"checksum lsp-codec 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "169d737ad89cf8ddd82d1804d9122f54568c49377665157277cc90d747b1d31a" +"checksum lsp-types 0.55.4 (registry+https://github.com/rust-lang/crates.io-index)" = "6392b5843615b8a2adeebe87b83fdd29567c0870baba3407a67e6dbfee4712f8" "checksum lzma-sys 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "d1eaa027402541975218bb0eec67d6b0412f6233af96e0d096d31dbdfd22e614" "checksum mac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4" "checksum macro-utils 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f2c4deaccc2ead6a28c16c0ba82f07d52b6475397415ce40876e559b0b0ea510" @@ -3499,14 +4105,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum markup5ever 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "bfedc97d5a503e96816d10fedcd5b42f760b2e525ce2f7ec71f6a41780548475" "checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" "checksum mdbook 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "90b5a8d7e341ceee5db3882a06078d42661ddcfa2b3687319cc5da76ec4e782f" +"checksum mdbook 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0ba0d44cb4089c741b9a91f3e5218298a40699c2f3a070a85014eed290c60819" "checksum memchr 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0a3eb002f0535929f1199681417029ebea04aadc0c7a4224b46be99c7f5d6a16" "checksum memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e2ffa2c986de11a9df78620c01eeaaf27d94d3ff02bf81bfcca953102dd0c6ff" "checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3" -"checksum minifier 0.0.20 (registry+https://github.com/rust-lang/crates.io-index)" = "96c269bb45c39b333392b2b18ad71760b34ac65666591386b0e959ed58b3f474" +"checksum minifier 0.0.29 (registry+https://github.com/rust-lang/crates.io-index)" = "1f4950cb2617b1933e2da0446e864dfe0d6a22c22ff72297996c46e6a63b210b" "checksum miniz-sys 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "0300eafb20369952951699b68243ab4334f4b10a88f411c221d444b36c40e649" "checksum miniz_oxide 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5ad30a47319c16cde58d0314f5d98202a80c9083b5f61178457403dfb14e509c" "checksum miniz_oxide_c_api 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "28edaef377517fd9fe3e085c37d892ce7acd1fbeab9239c5a36eec352d8a8b7e" +"checksum mio 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)" = "71646331f2619b1026cc302f87a2b8b648d5c6dd6937846a16cc8ce0f347f432" +"checksum mio-named-pipes 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "f5e374eff525ce1c5b7687c4cef63943e7686524a387933ad27ca7ec43779cb3" +"checksum mio-uds 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "966257a94e196b11bb43aca423754d87429960a768de9414f3691d6957abf125" +"checksum miow 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" "checksum miow 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "396aa0f2003d7df8395cb93e09871561ccc3e785f0acb369170e8cc74ddf9226" +"checksum net2 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)" = "42550d9fb7b6684a6d404d9fa7250c2eb2646df731d1c06afc06dcee9e1bcf88" "checksum new_debug_unreachable 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0cdc457076c78ab54d5e0d6fa7c47981757f1e34dc39ff92787f217dede586c4" "checksum nodrop 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)" = "9a2228dca57108069a5262f2ed8bd2e82496d2e074a06d1ccc7ce1687b6ae0a2" "checksum num-derive 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8af1847c907c2f04d7bfd572fb25bbb4385c637fe5be163cf2f8c5d778fe1e7d" @@ -3515,45 +4127,45 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c51a3322e4bca9d212ad9a158a02abc6934d005490c054a2778df73a70aa0a30" "checksum open 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c281318d992e4432cfa799969467003d05921582a7489a8325e37f8a450d5113" "checksum opener 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "04b1d6b086d9b3009550f9b6f81b10ad9428cf14f404b8e1a3a06f6f012c8ec9" -"checksum openssl 0.10.15 (registry+https://github.com/rust-lang/crates.io-index)" = "5e1309181cdcbdb51bc3b6bedb33dfac2a83b3d585033d3f6d9e22e8c1928613" +"checksum openssl 0.10.16 (registry+https://github.com/rust-lang/crates.io-index)" = "ec7bd7ca4cce6dbdc77e7c1230682740d307d1218a87fb0349a571272be749f9" "checksum openssl-probe 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" "checksum openssl-src 111.1.0+1.1.1a (registry+https://github.com/rust-lang/crates.io-index)" = "26bb632127731bf4ac49bf86a5dde12d2ca0918c2234fc39d79d4da2ccbc6da7" -"checksum openssl-sys 0.9.39 (registry+https://github.com/rust-lang/crates.io-index)" = "278c1ad40a89aa1e741a1eed089a2f60b18fab8089c3139b542140fc7d674106" +"checksum openssl-sys 0.9.40 (registry+https://github.com/rust-lang/crates.io-index)" = "1bb974e77de925ef426b6bc82fce15fd45bdcbeb5728bffcfc7cdeeb7ce1c2d6" "checksum ordermap 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a86ed3f5f244b372d6b1a00b72ef7f8876d0bc6a78a4c9985c53614041512063" "checksum ordslice 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dd20eec3dbe4376829cb7d80ae6ac45e0a766831dca50202ff2d40db46a8a024" "checksum owning_ref 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "cdf84f41639e037b484f93433aa3897863b561ed65c6e59c7073d7c561710f37" "checksum packed_simd 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "25d36de864f7218ec5633572a800109bbe5a1cc8d9d95a967f3daf93ea7e6ddc" -"checksum parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f0802bff09003b291ba756dc7e79313e51cc31667e94afbe847def490424cde5" -"checksum parking_lot_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "06a2b6aae052309c2fd2161ef58f5067bc17bb758377a0de9d4b279d603fdd8a" +"checksum parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ab41b4aed082705d1056416ae4468b6ea99d52599ecf3169b00088d43113e337" +"checksum parking_lot_core 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "94c8c7923936b28d546dfd14d4472eaf34c99b14e1c973a32b3e6d4eb04298c9" "checksum percent-encoding 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "31010dd2e1ac33d5b46a5b413495239882813e0369f8ed8a5e266f173602f831" "checksum pest 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0fce5d8b5cc33983fc74f78ad552b5522ab41442c4ca91606e4236eb4b5ceefc" +"checksum pest 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "54f0c72a98d8ab3c99560bfd16df8059cc10e1f9a8e83e6e3b97718dd766e9c3" "checksum pest_derive 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "ca3294f437119209b084c797604295f40227cffa35c57220b1e99a6ff3bf8ee4" +"checksum pest_derive 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "833d1ae558dc601e9a60366421196a8d94bc0ac980476d0b67e1d0988d72b2d0" +"checksum pest_generator 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "63120576c4efd69615b5537d3d052257328a4ca82876771d6944424ccfd9f646" +"checksum pest_meta 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f5a3492a4ed208ffc247adcdcc7ba2a95be3104f58877d0d02f0df39bf3efb5e" "checksum petgraph 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3659d1ee90221741f65dd128d9998311b0e40c5d3c23a62445938214abce4f" "checksum phf 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)" = "7d37a244c75a9748e049225155f56dbcb98fe71b192fd25fd23cb914b5ad62f2" "checksum phf_codegen 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)" = "4e4048fe7dd7a06b8127ecd6d3803149126e9b33c7558879846da3a63f734f2b" "checksum phf_generator 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)" = "05a079dd052e7b674d21cb31cbb6c05efd56a2cd2827db7692e2f1a507ebd998" "checksum phf_shared 0.7.22 (registry+https://github.com/rust-lang/crates.io-index)" = "c2261d544c2bb6aa3b10022b0be371b9c7c64f762ef28c6f5d4f1ef6d97b5930" "checksum pkg-config 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "676e8eb2b1b4c9043511a9b7bea0915320d7e502b0a079fb03f9635a5252b18c" -"checksum polonius-engine 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a5b6b0a7f5f4278b991ffd14abce1d01b013121ad297460237ef0a2f08d43201" +"checksum polonius-engine 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2490c396085801abf88df91758bad806b0890354f0875d624e62ecf0579a8145" "checksum precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" "checksum pretty_assertions 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3a029430f0d744bc3d15dd474d591bed2402b645d024583082b9f63bb936dac6" "checksum pretty_env_logger 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "df8b3f4e0475def7d9c2e5de8e5a1306949849761e107b360d03e98eafaffd61" -"checksum proc-macro2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1b06e2f335f48d24442b35a19df506a835fb3547bc3c06ef27340da9acf5cae7" "checksum proc-macro2 0.4.24 (registry+https://github.com/rust-lang/crates.io-index)" = "77619697826f31a02ae974457af0b29b723e5619e113e9397b8b82c6bd253f09" -"checksum proptest 0.8.7 (registry+https://github.com/rust-lang/crates.io-index)" = "926d0604475349f463fe44130aae73f2294b5309ab2ca0310b998bd334ef191f" +"checksum proptest 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "24f5844db2f839e97e3021980975f6ebf8691d9b9b2ca67ed3feb38dc3edb52c" "checksum pulldown-cmark 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d6fdf85cda6cadfae5428a54661d431330b312bc767ddbc57adbedc24da66e32" "checksum pulldown-cmark 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "eef52fac62d0ea7b9b4dc7da092aa64ea7ec3d90af6679422d3d7e0e14b6ee15" "checksum quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9274b940887ce9addde99c4eee6b5c44cc494b182b97e73dc8ffdcb3397fd3f0" "checksum quine-mc_cluskey 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "07589615d719a60c8dd8a4622e7946465dfef20d1a428f969e3443e7386d5f45" "checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" -"checksum quote 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9949cfe66888ffe1d53e6ec9d9f3b70714083854be20fd5e271b232a017401e8" "checksum quote 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "53fa22a1994bd0f9372d7a816207d8a2677ad0325b073f5c5332760f0fb62b5c" -"checksum racer 2.1.16 (registry+https://github.com/rust-lang/crates.io-index)" = "fbfcf2686b50f75a279cb42d9c6d253a1e68a475b415ea4baf7fb177ce94839a" +"checksum racer 2.1.21 (registry+https://github.com/rust-lang/crates.io-index)" = "37c88638777cc178684cf648ca0e1dad56646ce105b8593dfe665c436300adc3" "checksum rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8356f47b32624fef5b3301c1be97e5944ecdd595409cc5da11d05f211db6cfbd" -"checksum rand 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e464cd887e869cddcae8792a4ee31d23c7edd516700695608f5b98c67ee0131c" "checksum rand 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ae9d223d52ae411a33cf7e54ec6034ec165df296ccd23533d671a28252b6f66a" "checksum rand_chacha 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "771b009e3a508cb67e8823dda454aaa5368c7bc1c16829fb77d3e980440dd34a" -"checksum rand_core 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1961a422c4d189dfb50ffa9320bf1f2a9bd54ecb92792fb9477f99a1045f3372" "checksum rand_core 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0905b6b7079ec73b314d4c748701f6931eb79fd97c668caa3f1899b22b32c6db" "checksum rand_hc 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" "checksum rand_isaac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" @@ -3563,53 +4175,58 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum rayon-core 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9d24ad214285a7729b174ed6d3bcfcb80177807f959d95fafd5bfc5c4f201ac8" "checksum redox_syscall 0.1.43 (registry+https://github.com/rust-lang/crates.io-index)" = "679da7508e9a6390aeaf7fbd02a800fdc64b73fe2204dd2c8ae66d22d9d5ad5d" "checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76" +"checksum redox_users 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "214a97e49be64fd2c86f568dd0cb2c757d2cc53de95b273b6ad0a1c908482f26" "checksum regex 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9329abc99e39129fcceabd24cf5d85b4671ef7c29c50e972bc5afe32438ec384" "checksum regex 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "37e7cbbd370869ce2e8dff25c7018702d10b21a20ef7135316f8daecd6c25b7f" "checksum regex-syntax 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)" = "7d707a4fa2637f2dca2ef9fd02225ec7661fe01a53623c1e6515b6916511f7a7" "checksum regex-syntax 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4e47a2ed29da7a9e1960e1639e7a982e6edc6d49be308a3b02daf511504a16d1" "checksum remove_dir_all 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3488ba1b9a2084d38645c4c08276a1752dcbf2c7130d74f1569681ad5d2799c5" -"checksum rls-analysis 0.16.10 (registry+https://github.com/rust-lang/crates.io-index)" = "2de1187cceaf16d7642cc78835a2890b55b35ed9e8a8e3c6348a6297d8dd0fb1" +"checksum rls-analysis 0.16.12 (registry+https://github.com/rust-lang/crates.io-index)" = "ae18d8ad01dec3b2014f4d7ae3c607d7adbcff79e5d3b48ea42ea71c10d43a71" "checksum rls-blacklist 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b8ce1fdac03e138c4617ff87b194e1ff57a39bb985a044ccbd8673d30701e411" -"checksum rls-data 0.18.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3a209ce46bb52813cbe0786a7baadc0c1a3f5543ef93f179eda3b841ed72cf2e" -"checksum rls-rustc 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9dba7390427aefa953608429701e3665192ca810ba8ae09301e001b7c7bed0" -"checksum rls-span 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d7c7046dc6a92f2ae02ed302746db4382e75131b9ce20ce967259f6b5867a6a" +"checksum rls-data 0.18.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5f80b84551b32e26affaf7f12374913b5061730c0dcd185d9e8fa5a15e36e65c" +"checksum rls-span 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "33d66f1d6c6ccd5c98029f162544131698f6ebb61d8c697681cac409dcd08805" "checksum rls-vfs 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "72d56425bd5aa86d9d4372b76f0381d3b4bda9c0220e71956c9fcc929f45c1f1" -"checksum rustc-ap-arena 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cbfb540c1347a3993060896b18e0d64084203fa37aaffdc5e5c31264f275d476" -"checksum rustc-ap-graphviz 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "790ac657d5bf69be9ef56f6810e8a0238b07e8460a88526e11d41f8214eb6c4e" -"checksum rustc-ap-rustc_cratesio_shim 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b721cf32b543f3ee90240d7b757ca4a45930fe9981458a50678b4ccd75c472e2" -"checksum rustc-ap-rustc_data_structures 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4fa11df199d45ce948b07792ca593f59c1d19d2cb05d35c6b0a02271e772a416" -"checksum rustc-ap-rustc_errors 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b7ead3163ef995bbba520b88739e1d60f9ccf74fdacdda985067644c8134e827" -"checksum rustc-ap-rustc_target 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "688fef9cc27837755019b72b4f13e7a3d3e5012473475f377b75dbb1f07beb5f" -"checksum rustc-ap-serialize 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b2c0e8161e956647592a737074736e6ce05ea36b70c770ea8cca3eb9cb33737" -"checksum rustc-ap-syntax 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1adc189e5e4500a4167b9afa04e67067f40d0039e0e05870c977bebb561f065a" -"checksum rustc-ap-syntax_pos 306.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4d42c430dbb0be4377bfe6aa5099074c63ac8796b24098562c2e2154aecc5652" +"checksum rustc-ap-arena 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5aab2fb5e5becf1c9183f6c63b8714817a3e780a20b4fe6b3920751c98a18225" +"checksum rustc-ap-graphviz 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0235ff613d4f96176ea56748010b5d8e978605cc47856ba9bb5372f4f38e9c03" +"checksum rustc-ap-rustc_cratesio_shim 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "63e04a90b0dd8597da83633961698c61a2948f50c9d4b9a71e8afafc0ba0f158" +"checksum rustc-ap-rustc_data_structures 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c03988d65fc5130787df32e8ea91738f78a8ed62b7a5bdd77f10e5cceb531d8e" +"checksum rustc-ap-rustc_errors 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8b33b9dc34f9fa50bf7e6fd14f2f3c1adc69833acf43c10f3e9795bd4d613712" +"checksum rustc-ap-rustc_target 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e6de75caef2c7acba11994614266d60238653657677934817ab368d169333cba" +"checksum rustc-ap-serialize 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cf09c60aaee892b0fd107544cfe607d8d463e7f33da34aa823566b8fd2b17f53" +"checksum rustc-ap-syntax 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "69f38cc120ff317678bbda8c4f58c1bbc1de64b615383ab01480482dde5e95a1" +"checksum rustc-ap-syntax_pos 407.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "20a0a201141c5c416b1924b079eeefc7b013e34ece0740ce4997f358b3684a7f" "checksum rustc-demangle 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "82ae957aa1b3055d8e086486723c0ccd3d7b8fa190ae8fa2e35543b6171c810e" "checksum rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7540fc8b0c49f096ee9c961cda096467dce8084bec6bdca2fc83895fd9b28cb8" -"checksum rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8c6d5a683c6ba4ed37959097e88d71c9e8e26659a3cb5be8b389078e7ad45306" -"checksum rustc-rayon-core 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "40f06724db71e18d68b3b946fdf890ca8c921d9edccc1404fdfdb537b0d12649" +"checksum rustc-rayon 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8d98c51d9cbbe810c8b6693236d3412d8cd60513ff27a3e1b6af483dca0af544" +"checksum rustc-rayon-core 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "526e7b6d2707a5b9bec3927d424ad70fa3cfc68e0ac1b75e46cdbbc95adc5108" "checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" +"checksum rustc_tools_util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b3c5a95edfa0c893236ae4778bb7c4752760e4c0d245e19b5eff33c5aa5eb9dc" "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -"checksum rustfix 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "756567f00f7d89c9f89a5c401b8b1caaa122e27240b9eaadd0bb52ee0b680b1b" +"checksum rustfix 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "af7c21531a91512a4a51b490be6ba1c8eff34fdda0dc5bf87dc28d86748aac56" "checksum rusty-fork 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9591f190d2852720b679c21f66ad929f9f1d7bb09d1193c26167586029d8489c" "checksum ryu 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "eb9e9b8cde282a9fe6a42dd4681319bfb63f121b8a8ee9439c6f4107e58a46f7" "checksum same-file 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8f20c4be53a8a1ff4c1f1b2bd14570d2f634628709752f0702ecdd2b3f9a5267" "checksum schannel 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "0e1a231dc10abf6749cfa5d7767f25888d484201accbd919b66ab5413c502d56" -"checksum scoped-tls 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "332ffa32bf586782a3efaeb58f127980944bbc8c4d6913a86107ac2a5ab24b28" +"checksum scoped-tls 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" +"checksum scoped_threadpool 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "1d51f5df5af43ab3f1360b429fa5e0152ac5ce8c0bd6485cae490332e96846a8" "checksum scopeguard 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "94258f53601af11e6a49f722422f6e3425c52b06245a5cf9bc09908b174f5e27" "checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" "checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" -"checksum serde 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)" = "c91eb5b0190ae87b4e2e39cbba6e3bed3ac6186935fe265f0426156c4c49961b" +"checksum serde 1.0.82 (registry+https://github.com/rust-lang/crates.io-index)" = "6fa52f19aee12441d5ad11c9a00459122bd8f98707cadf9778c540674f1935b6" "checksum serde_derive 1.0.81 (registry+https://github.com/rust-lang/crates.io-index)" = "477b13b646f5b5b56fc95bedfc3b550d12141ce84f466f6c44b9a17589923885" "checksum serde_ignored 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "190e9765dcedb56be63b6e0993a006c7e3b071a016a304736e4a315dc01fb142" "checksum serde_json 1.0.33 (registry+https://github.com/rust-lang/crates.io-index)" = "c37ccd6be3ed1fdf419ee848f7c758eb31b054d7cd3ae3600e3bae0adf569811" +"checksum sha-1 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "51b9d1f3b5de8a167ab06834a7c883bd197f2191e1dda1a22d9ccfeedbf9aded" "checksum shell-escape 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "170a13e64f2a51b77a45702ba77287f5c6829375b04a69cf2222acd17d0cfab9" "checksum shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" +"checksum signal-hook 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "1f272d1b7586bec132ed427f532dd418d8beca1ca7f2caf7df35569b1415a4b4" "checksum siphasher 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0df90a788073e8d0235a67e50441d47db7c8ad9debd91cbf43736a2a92d36537" +"checksum slab 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" "checksum smallvec 0.6.7 (registry+https://github.com/rust-lang/crates.io-index)" = "b73ea3738b47563803ef814925e69be00799a8c07420be8b996f8e98fb2336db" "checksum socket2 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "c4d11a52082057d87cb5caa31ad812f4504b97ab44732cd8359df2e9ff9f48e7" "checksum stable_deref_trait 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ffbc596e092fe5f598b12ef46cc03754085ac2f4d8c739ad61c4ae266cc3b3fa" "checksum string_cache 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)" = "25d70109977172b127fe834e5449e5ab1740b9ba49fa18a2020f509174f25423" -"checksum string_cache_codegen 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "35293b05cf1494e8ddd042a7df6756bf18d07f42d234f32e71dce8a7aabb0191" +"checksum string_cache_codegen 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1eea1eee654ef80933142157fdad9dd8bc43cf7c74e999e369263496f04ff4da" "checksum string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b1884d1bc09741d466d9b14e6d37ac89d6909cbcac41dd9ae982d4d063bbedfc" "checksum strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550" "checksum strum 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f6c3a2071519ab6a48f465808c4c1ffdd00dfc8e93111d02b4fc5abab177676e" @@ -3621,15 +4238,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum tar 0.4.20 (registry+https://github.com/rust-lang/crates.io-index)" = "a303ba60a099fcd2aaa646b14d2724591a96a75283e4b7ed3d1a1658909d9ae2" "checksum tempfile 3.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "7e91405c14320e5c79b3d148e1c86f40749a36e490642202a31689cb1a3452b2" "checksum tendril 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9de21546595a0873061940d994bbbc5c35f024ae4fd61ec5c5b159115684f508" +"checksum term 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "fa63644f74ce96fbeb9b794f66aff2a52d601cbd5e80f4b97123e3899f4570f1" "checksum term 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5e6b677dd1e8214ea1ef4297f85dbcbed8e8cdddb561040cc998ca2551c37561" "checksum termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4096add70612622289f2fdcdbd5086dc81c1e2675e6ae58d6c4f62a16c6d7f2f" "checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096" +"checksum tester 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5e812cb26c597f86a49b26dbb58b878bd2a2b4b93fc069dc39499228fe556ff6" "checksum textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "307686869c93e71f94da64286f9a9524c0f308a9e1c87a583de8e9c9039ad3f6" "checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" "checksum time 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "d825be0eb33fda1a7e68012d51e9c7f451dc1a69391e7fdc197060bb8c56667b" +"checksum tokio 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "4790d0be6f4ba6ae4f48190efa2ed7780c9e3567796abdb285003cf39840d9c5" +"checksum tokio-codec 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c501eceaf96f0e1793cf26beb63da3d11c738c4a943fdf3746d81d64684c39f" +"checksum tokio-current-thread 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "331c8acc267855ec06eb0c94618dcbbfea45bed2d20b77252940095273fb58f6" +"checksum tokio-executor 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "30c6dbf2d1ad1de300b393910e8a3aa272b724a400b6531da03eed99e329fbf0" +"checksum tokio-fs 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0e9cbbc8a3698b7ab652340f46633364f9eaa928ddaaee79d8b8f356dd79a09d" +"checksum tokio-io 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b53aeb9d3f5ccf2ebb29e19788f96987fa1355f8fe45ea193928eaaaf3ae820f" +"checksum tokio-process 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "88e1281e412013f1ff5787def044a9577a0bed059f451e835f1643201f8b777d" +"checksum tokio-reactor 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "afbcdb0f0d2a1e4c440af82d7bbf0bf91a8a8c0575bcd20c05d15be7e9d3a02f" +"checksum tokio-signal 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "dd6dc5276ea05ce379a16de90083ec80836440d5ef8a6a39545a3207373b8296" +"checksum tokio-tcp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1d14b10654be682ac43efee27401d792507e30fd8d26389e1da3b185de2e4119" +"checksum tokio-threadpool 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "17465013014410310f9f61fa10bf4724803c149ea1d51efece131c38efca93aa" +"checksum tokio-timer 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "4f37f0111d76cc5da132fe9bc0590b9b9cfd079bc7e75ac3846278430a299ff8" +"checksum tokio-udp 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "66268575b80f4a4a710ef83d087fdfeeabdce9b74c797535fbac18a2cb906e92" +"checksum tokio-uds 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "037ffc3ba0e12a0ab4aca92e5234e0dedeb48fddf6ccd260f1f150a36a9f2445" "checksum toml 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)" = "758664fc71a3a69038656bee8b6be6477d2a6c315a6b81f7081f591bffa4111f" +"checksum toml 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "87c5890a989fa47ecdc7bcb4c63a77a82c18f306714104b1decfd722db17b39e" "checksum toml-query 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6854664bfc6df0360c695480836ee90e2d0c965f06db291d10be9344792d43e8" +"checksum toml-query 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ab234a943a2363ad774020e2f9474a38a85bc4396bace01a96380144aef17db3" "checksum typenum 1.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "612d636f949607bdf9b123b4a6f6d966dedf3ff669f7f045890d3a4a73948169" +"checksum ucd-trie 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "71a9c5b1fe77426cf144cc30e49e955270f5086e31a6441dfa8b32efc09b9d77" "checksum ucd-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "535c204ee4d8434478593480b8f86ab45ec9aae0e83c568ca81abf0fd0e88f86" "checksum unicode-bidi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" "checksum unicode-normalization 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "6a0180bc61fc5a987082bfa111f4cc95c4caff7f9799f3e46df09163a937aa25" @@ -3637,6 +4273,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "882386231c45df4700b275c7ff55b6f3698780a650026380e72dabe76fa46526" "checksum unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc" "checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" +"checksum unicode_categories 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" "checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" "checksum url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a" "checksum url_serde 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "74e7d099f1ee52f823d4bdd60c93c3602043c728f5db3b97bdb548467f7bddea" @@ -3656,6 +4293,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" "checksum winapi-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "afc5508759c5bf4285e61feb862b6083c8480aec864fa17a81fdec6f69b461ab" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" "checksum wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "561ed901ae465d6185fa7864d63fbd5720d0ef718366c9a4dc83cf6170d7e9ba" +"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" "checksum xattr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "244c3741f4240ef46274860397c7c74e50eb23624996930e484c16679633a54c" "checksum xz2 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "df8bf41d3030c3577c9458fd6640a05afbf43b150d0b531b16bd77d3f794f27a" "checksum yaml-rust 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e66366e18dc58b46801afbf2ca7661a9f59cc8c5962c29892b6039b4f86fa992" diff --git a/Cargo.toml b/Cargo.toml index 667c55791bd17..cb3c0ee194fe2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,6 @@ members = [ "src/tools/rustdoc-themes", ] exclude = [ - "src/tools/rls/test_data", "build", # HACK(eddyb) This hardcodes the fact that our CI uses `/checkout/obj`. "obj", @@ -71,4 +70,3 @@ rustc-std-workspace-core = { path = 'src/tools/rustc-std-workspace-core' } [patch."https://github.com/rust-lang/rust-clippy"] clippy_lints = { path = "src/tools/clippy/clippy_lints" } -rustc_tools_util = { path = "src/tools/clippy/rustc_tools_util" } diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 0000000000000..ab3e292f1e8c5 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,14 @@ +node('master') { + stage('Checkout') { + checkout scm + sh 'git submodule update --init --recursive' + } + + stage('Build') { + dir('build') { + sh '../x.py build' + } + } +} + + diff --git a/README.md b/README.md index dc013a1ad2be6..327af708dc040 100644 --- a/README.md +++ b/README.md @@ -1,247 +1,70 @@ -# The Rust Programming Language +# Rust with AVR support -This is the main source code repository for [Rust]. It contains the compiler, -standard library, and documentation. +[![Gitter](https://img.shields.io/gitter/room/nwjs/nw.js.svg)](https://gitter.im/avr-rust) -[Rust]: https://www.rust-lang.org +This project adds support for the [AVR](https://en.wikipedia.org/wiki/Atmel_AVR) +microcontroller to Rust. -## Quick Start -[quick-start]: #quick-start +It uses the [AVR-LLVM backend](http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AVR/). -Read ["Installation"] from [The Book]. +## Caveats -["Installation"]: https://doc.rust-lang.org/book/ch01-01-installation.html -[The Book]: https://doc.rust-lang.org/book/index.html +While the stock libcore may be compiled, certain code patterns may +still exercise code in LLVM that is broken or that produces +miscompiled code. Looking for existing issues or submitting a new +issue is appreciated! -## Building from Source -[building-from-source]: #building-from-source +## Building and installation -### Building on *nix -1. Make sure you have installed the dependencies: +This will compile Rust with AVR support. This will not create a +fully-fledged cross-compiler, however, as it does not compile any libraries +such as `libcore` or `liblibc`. To do this, the `--target=avr-unknown-unknown` +flag must be passed to `configure`, which is not fully supported yet due to bugs. - * `g++` 4.7 or later or `clang++` 3.x or later - * `python` 2.7 (but not 3.x) - * GNU `make` 3.81 or later - * `cmake` 3.4.3 or later - * `curl` - * `git` +First make sure you've installed all dependencies for building, as specified in +the main Rust repository [here](https://github.com/rust-lang/rust/#building-from-source). +Then use the following commands: -2. Clone the [source] with `git`: +``` bash +# Grab the avr-rust sources +git clone https://github.com/avr-rust/rust.git - ```sh - $ git clone https://github.com/rust-lang/rust.git - $ cd rust - ``` +# Create a directory to place built files in +mkdir build && cd build -[source]: https://github.com/rust-lang/rust +# Generate Makefile using settings suitable for an experimental compiler +../rust/configure \ + --enable-debug \ + --disable-docs \ + --enable-llvm-assertions \ + --enable-debug-assertions \ + --enable-optimize \ + --enable-llvm-release-debuginfo \ + --experimental-targets=AVR \ + --prefix=/opt/avr-rust -3. Build and install: +# Build the compiler, optionally install it to /opt/avr-rust +make +make install - ```sh - $ git submodule update --init --recursive --progress - $ ./x.py build && sudo ./x.py install - ``` +# Register the toolchain with rustup +rustup toolchain link avr-toolchain $(realpath $(find . -name 'stage1')) - > ***Note:*** Install locations can be adjusted by copying the config file - > from `./config.toml.example` to `./config.toml`, and - > adjusting the `prefix` option under `[install]`. Various other options, such - > as enabling debug information, are also supported, and are documented in - > the config file. - - When complete, `sudo ./x.py install` will place several programs into - `/usr/local/bin`: `rustc`, the Rust compiler, and `rustdoc`, the - API-documentation tool. This install does not include [Cargo], - Rust's package manager, which you may also want to build. - -[Cargo]: https://github.com/rust-lang/cargo - -### Building on Windows -[building-on-windows]: #building-on-windows - -There are two prominent ABIs in use on Windows: the native (MSVC) ABI used by -Visual Studio, and the GNU ABI used by the GCC toolchain. Which version of Rust -you need depends largely on what C/C++ libraries you want to interoperate with: -for interop with software produced by Visual Studio use the MSVC build of Rust; -for interop with GNU software built using the MinGW/MSYS2 toolchain use the GNU -build. - -#### MinGW -[windows-mingw]: #windows-mingw - -[MSYS2][msys2] can be used to easily build Rust on Windows: - -[msys2]: https://msys2.github.io/ - -1. Grab the latest [MSYS2 installer][msys2] and go through the installer. - -2. Run `mingw32_shell.bat` or `mingw64_shell.bat` from wherever you installed - MSYS2 (i.e. `C:\msys64`), depending on whether you want 32-bit or 64-bit - Rust. (As of the latest version of MSYS2 you have to run `msys2_shell.cmd - -mingw32` or `msys2_shell.cmd -mingw64` from the command line instead) - -3. From this terminal, install the required tools: - - ```sh - # Update package mirrors (may be needed if you have a fresh install of MSYS2) - $ pacman -Sy pacman-mirrors - - # Install build tools needed for Rust. If you're building a 32-bit compiler, - # then replace "x86_64" below with "i686". If you've already got git, python, - # or CMake installed and in PATH you can remove them from this list. Note - # that it is important that you do **not** use the 'python2' and 'cmake' - # packages from the 'msys2' subsystem. The build has historically been known - # to fail with these packages. - $ pacman -S git \ - make \ - diffutils \ - tar \ - mingw-w64-x86_64-python2 \ - mingw-w64-x86_64-cmake \ - mingw-w64-x86_64-gcc - ``` - -4. Navigate to Rust's source code (or clone it), then build it: - - ```sh - $ ./x.py build && ./x.py install - ``` - -#### MSVC -[windows-msvc]: #windows-msvc - -MSVC builds of Rust additionally require an installation of Visual Studio 2013 -(or later) so `rustc` can use its linker. Make sure to check the “C++ tools” -option. - -With these dependencies installed, you can build the compiler in a `cmd.exe` -shell with: - -```sh -> python x.py build -``` - -Currently, building Rust only works with some known versions of Visual Studio. If -you have a more recent version installed the build system doesn't understand -then you may need to force rustbuild to use an older version. This can be done -by manually calling the appropriate vcvars file before running the bootstrap. - -```batch -> CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\amd64\vcvars64.bat" -> python x.py build -``` - -#### Specifying an ABI -[specifying-an-abi]: #specifying-an-abi - -Each specific ABI can also be used from either environment (for example, using -the GNU ABI in PowerShell) by using an explicit build triple. The available -Windows build triples are: -- GNU ABI (using GCC) - - `i686-pc-windows-gnu` - - `x86_64-pc-windows-gnu` -- The MSVC ABI - - `i686-pc-windows-msvc` - - `x86_64-pc-windows-msvc` - -The build triple can be specified by either specifying `--build=` when -invoking `x.py` commands, or by copying the `config.toml` file (as described -in Building From Source), and modifying the `build` option under the `[build]` -section. - -### Configure and Make -[configure-and-make]: #configure-and-make - -While it's not the recommended build system, this project also provides a -configure script and makefile (the latter of which just invokes `x.py`). - -```sh -$ ./configure -$ make && sudo make install -``` - -When using the configure script, the generated `config.mk` file may override the -`config.toml` file. To go back to the `config.toml` file, delete the generated -`config.mk` file. - -## Building Documentation -[building-documentation]: #building-documentation - -If you’d like to build the documentation, it’s almost the same: - -```sh -$ ./x.py doc +# Optionally enable the avr toolchain globally +rustup default avr-toolchain ``` -The generated documentation will appear under `doc` in the `build` directory for -the ABI used. I.e., if the ABI was `x86_64-pc-windows-msvc`, the directory will be -`build\x86_64-pc-windows-msvc\doc`. - -## Notes -[notes]: #notes - -Since the Rust compiler is written in Rust, it must be built by a -precompiled "snapshot" version of itself (made in an earlier stage of -development). As such, source builds require a connection to the Internet, to -fetch snapshots, and an OS that can execute the available snapshot binaries. - -Snapshot binaries are currently built and tested on several platforms: - -| Platform / Architecture | x86 | x86_64 | -|--------------------------------|-----|--------| -| Windows (7, 8, Server 2008 R2) | ✓ | ✓ | -| Linux (2.6.18 or later) | ✓ | ✓ | -| OSX (10.7 Lion or later) | ✓ | ✓ | - -You may find that other platforms work, but these are our officially -supported build environments that are most likely to work. - -Rust currently needs between 600MiB and 1.5GiB of RAM to build, depending on platform. -If it hits swap, it will take a very long time to build. - -There is more advice about hacking on Rust in [CONTRIBUTING.md]. - -[CONTRIBUTING.md]: https://github.com/rust-lang/rust/blob/master/CONTRIBUTING.md - -## Getting Help -[getting-help]: #getting-help - -The Rust community congregates in a few places: - -* [Stack Overflow] - Direct questions about using the language. -* [users.rust-lang.org] - General discussion and broader questions. -* [/r/rust] - News and general discussion. - -[Stack Overflow]: https://stackoverflow.com/questions/tagged/rust -[/r/rust]: https://reddit.com/r/rust -[users.rust-lang.org]: https://users.rust-lang.org/ - -## Contributing -[contributing]: #contributing - -To contribute to Rust, please see [CONTRIBUTING](CONTRIBUTING.md). - -Rust has an [IRC] culture and most real-time collaboration happens in a -variety of channels on Mozilla's IRC network, irc.mozilla.org. The -most popular channel is [#rust], a venue for general discussion about -Rust. And a good place to ask for help would be [#rust-beginners]. - -The [rustc guide] might be a good place to start if you want to find out how -various parts of the compiler work. +## Usage -Also, you may find the [rustdocs for the compiler itself][rustdocs] useful. +# With Xargo (recommended) -[IRC]: https://en.wikipedia.org/wiki/Internet_Relay_Chat -[#rust]: irc://irc.mozilla.org/rust -[#rust-beginners]: irc://irc.mozilla.org/rust-beginners -[rustc guide]: https://rust-lang.github.io/rustc-guide/about-this-guide.html -[rustdocs]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ +Take a look at the example [blink](https://github.com/avr-rust/blink) program. -## License -[license]: #license +# Vanilla `rustc` -Rust is primarily distributed under the terms of both the MIT license -and the Apache License (Version 2.0), with portions covered by various -BSD-like licenses. +AVR support is enabled by passing the `--target avr-unknown-unknown` flag to `rustc`. -See [LICENSE-APACHE](LICENSE-APACHE), [LICENSE-MIT](LICENSE-MIT), and -[COPYRIGHT](COPYRIGHT) for details. +Note that the Rust `libcore` library (essentially required for every Rust program), +must be manually compiled for it to be used, as it will not be built for AVR during +compiler compilation (yet). Work is currently being done in order to allow `libcore` +to be automatically compiled for AVR. diff --git a/RELEASES.md b/RELEASES.md index da09af3edfe8a..fcaaa73bdc0f4 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,9 +1,620 @@ +Version 1.34.0 (2019-04-11) +========================== + +Language +-------- +- [You can now use `#[deprecated = "reason"]`][58166] as a shorthand for + `#[deprecated(note = "reason")]`. This was previously allowed by mistake + but had no effect. +- [You can now accept token streams in `#[attr()]`,`#[attr[]]`, and + `#[attr{}]` procedural macros.][57367] +- [You can now write `extern crate self as foo;`][57407] to import your + crate's root into the extern prelude. + + +Compiler +-------- +- [You can now target `riscv64imac-unknown-none-elf` and + `riscv64gc-unknown-none-elf`.][58406] +- [You can now enable linker plugin LTO optimisations with + `-C linker-plugin-lto`.][58057] This allows rustc to compile your Rust code + into LLVM bitcode allowing LLVM to perform LTO optimisations across C/C++ FFI + boundaries. +- [You can now target `powerpc64-unknown-freebsd`.][57809] + + +Libraries +--------- +- [The trait bounds have been removed on some of `HashMap`'s and + `HashSet`'s basic methods.][58370] Most notably you no longer require + the `Hash` trait to create an iterator. +- [The `Ord` trait bounds have been removed on some of `BinaryHeap`'s basic + methods.][58421] Most notably you no longer require the `Ord` trait to create + an iterator. +- [The methods `overflowing_neg` and `wrapping_neg` are now `const` functions + for all numeric types.][58044] +- [Indexing a `str` is now generic over all types that + implement `SliceIndex`.][57604] +- [`str::trim`, `str::trim_matches`, `str::trim_{start, end}`, and + `str::trim_{start, end}_matches` are now `#[must_use]`][57106] and will + produce a warning if their returning type is unused. +- [The methods `checked_pow`, `saturating_pow`, `wrapping_pow`, and + `overflowing_pow` are now available for all numeric types.][57873] These are + equivalvent to methods such as `wrapping_add` for the `pow` operation. + + +Stabilized APIs +--------------- + +#### std & core +* [`Any::type_id`] +* [`Error::type_id`] +* [`atomic::AtomicI16`] +* [`atomic::AtomicI32`] +* [`atomic::AtomicI64`] +* [`atomic::AtomicI8`] +* [`atomic::AtomicU16`] +* [`atomic::AtomicU32`] +* [`atomic::AtomicU64`] +* [`atomic::AtomicU8`] +* [`convert::Infallible`] +* [`convert::TryFrom`] +* [`convert::TryInto`] +* [`iter::from_fn`] +* [`iter::successors`] +* [`num::NonZeroI128`] +* [`num::NonZeroI16`] +* [`num::NonZeroI32`] +* [`num::NonZeroI64`] +* [`num::NonZeroI8`] +* [`num::NonZeroIsize`] +* [`slice::sort_by_cached_key`] +* [`str::escape_debug`] +* [`str::escape_default`] +* [`str::escape_unicode`] +* [`str::split_ascii_whitespace`] + +#### std +* [`Instant::checked_add`] +* [`Instant::checked_sub`] +* [`SystemTime::checked_add`] +* [`SystemTime::checked_sub`] + +Cargo +----- +- [You can now use alternative registries to crates.io.][cargo/6654] + +Misc +---- +- [You can now use the `?` operator in your documentation tests without manually + adding `fn main() -> Result<(), _> {}`.][56470] + +Compatibility Notes +------------------- +- [`Command::before_exec` is now deprecated in favor of the + unsafe method `Command::pre_exec`.][58059] +- [Use of `ATOMIC_{BOOL, ISIZE, USIZE}_INIT` is now deprecated.][57425] As you + can now use `const` functions in `static` variables. + +[58370]: https://github.com/rust-lang/rust/pull/58370/ +[58406]: https://github.com/rust-lang/rust/pull/58406/ +[58421]: https://github.com/rust-lang/rust/pull/58421/ +[58166]: https://github.com/rust-lang/rust/pull/58166/ +[58044]: https://github.com/rust-lang/rust/pull/58044/ +[58057]: https://github.com/rust-lang/rust/pull/58057/ +[58059]: https://github.com/rust-lang/rust/pull/58059/ +[57809]: https://github.com/rust-lang/rust/pull/57809/ +[57873]: https://github.com/rust-lang/rust/pull/57873/ +[57604]: https://github.com/rust-lang/rust/pull/57604/ +[57367]: https://github.com/rust-lang/rust/pull/57367/ +[57407]: https://github.com/rust-lang/rust/pull/57407/ +[57425]: https://github.com/rust-lang/rust/pull/57425/ +[57106]: https://github.com/rust-lang/rust/pull/57106/ +[56470]: https://github.com/rust-lang/rust/pull/56470/ +[cargo/6654]: https://github.com/rust-lang/cargo/pull/6654/ +[`Any::type_id`]: https://doc.rust-lang.org/std/any/trait.Any.html#tymethod.type_id +[`Error::type_id`]: https://doc.rust-lang.org/std/error/trait.Error.html#tymethod.type_id +[`atomic::AtomicI16`]: https://doc.rust-lang.org/std/atomic/struct.AtomicI16.html +[`atomic::AtomicI32`]: https://doc.rust-lang.org/std/atomic/struct.AtomicI32.html +[`atomic::AtomicI64`]: https://doc.rust-lang.org/std/atomic/struct.AtomicI64.html +[`atomic::AtomicI8`]: https://doc.rust-lang.org/std/atomic/struct.AtomicI8.html +[`atomic::AtomicU16`]: https://doc.rust-lang.org/std/atomic/struct.AtomicU16.html +[`atomic::AtomicU32`]: https://doc.rust-lang.org/std/atomic/struct.AtomicU32.html +[`atomic::AtomicU64`]: https://doc.rust-lang.org/std/atomic/struct.AtomicU64.html +[`atomic::AtomicU8`]: https://doc.rust-lang.org/std/atomic/struct.AtomicU8.html +[`convert::Infallible`]: https://doc.rust-lang.org/std/convert/enum.Infallible.html +[`convert::TryFrom`]: https://doc.rust-lang.org/std/convert/trait.TryFrom.html +[`convert::TryInto`]: https://doc.rust-lang.org/std/convert/trait.TryInto.html +[`iter::from_fn`]: https://doc.rust-lang.org/std/iter/fn.from_fn.html +[`iter::successors`]: https://doc.rust-lang.org/std/iter/fn.successors.html +[`num::NonZeroI128`]: https://doc.rust-lang.org/std/num/struct.NonZeroI128.html +[`num::NonZeroI16`]: https://doc.rust-lang.org/std/num/struct.NonZeroI16.html +[`num::NonZeroI32`]: https://doc.rust-lang.org/std/num/struct.NonZeroI32.html +[`num::NonZeroI64`]: https://doc.rust-lang.org/std/num/struct.NonZeroI64.html +[`num::NonZeroI8`]: https://doc.rust-lang.org/std/num/struct.NonZeroI8.html +[`num::NonZeroIsize`]: https://doc.rust-lang.org/std/num/struct.NonZeroIsize.html +[`slice::sort_by_cached_key`]: https://doc.rust-lang.org/std/primitive.slice.html#method.sort_by_cached_key +[`str::escape_debug`]: https://doc.rust-lang.org/std/primitive.str.html#method.escape_debug +[`str::escape_default`]: https://doc.rust-lang.org/std/primitive.str.html#method.escape_default +[`str::escape_unicode`]: https://doc.rust-lang.org/std/primitive.str.html#method.escape_unicode +[`str::split_ascii_whitespace`]: https://doc.rust-lang.org/std/primitive.str.html#method.split_ascii_whitespace +[`Instant::checked_add`]: https://doc.rust-lang.org/std/time/struct.Instant.html#method.checked_add +[`Instant::checked_sub`]: https://doc.rust-lang.org/std/time/struct.Instant.html#method.checked_sub +[`SystemTime::checked_add`]: https://doc.rust-lang.org/std/time/struct.SystemTime.html#method.checked_add +[`SystemTime::checked_sub`]: https://doc.rust-lang.org/std/time/struct.SystemTime.html#method.checked_sub + + +Version 1.33.0 (2019-02-28) +========================== + +Language +-------- +- [You can now use the `cfg(target_vendor)` attribute.][57465] E.g. + `#[cfg(target_vendor="apple")] fn main() { println!("Hello Apple!"); }` +- [Integer patterns such as in a match expression can now be exhaustive.][56362] + E.g. You can have match statement on a `u8` that covers `0..=255` and + you would no longer be required to have a `_ => unreachable!()` case. +- [You can now have multiple patterns in `if let` and `while let` + expressions.][57532] You can do this with the same syntax as a `match` + expression. E.g. + ```rust + enum Creature { + Crab(String), + Lobster(String), + Person(String), + } + + fn main() { + let state = Creature::Crab("Ferris"); + + if let Creature::Crab(name) | Creature::Person(name) = state { + println!("This creature's name is: {}", name); + } + } + ``` +- [You can now have irrefutable `if let` and `while let` patterns.][57535] Using + this feature will by default produce a warning as this behaviour can be + unintuitive. E.g. `if let _ = 5 {}` +- [You can now use `let` bindings, assignments, expression statements, + and irrefutable pattern destructuring in const functions.][57175] +- [You can now call unsafe const functions.][57067] E.g. + ```rust + const unsafe fn foo() -> i32 { 5 } + const fn bar() -> i32 { + unsafe { foo() } + } + ``` +- [You can now specify multiple attributes in a `cfg_attr` attribute.][57332] + E.g. `#[cfg_attr(all(), must_use, optimize)]` +- [You can now specify a specific alignment with the `#[repr(packed)]` + attribute.][57049] E.g. `#[repr(packed(2))] struct Foo(i16, i32);` is a struct + with an alignment of 2 bytes and a size of 6 bytes. +- [You can now import an item from a module as an `_`.][56303] This allows you to + import a trait's impls, and not have the name in the namespace. E.g. + ```rust + use std::io::Read as _; + + // Allowed as there is only one `Read` in the module. + pub trait Read {} + ``` +- [You may now use `Rc`, `Arc`, and `Pin` as method receivers][56805]. + +Compiler +-------- +- [You can now set a linker flavor for `rustc` with the `-Clinker-flavor` + command line argument.][56351] +- [The mininum required LLVM version has been bumped to 6.0.][56642] +- [Added support for the PowerPC64 architecture on FreeBSD.][57615] +- [The `x86_64-fortanix-unknown-sgx` target support has been upgraded to + tier 2 support.][57130] Visit the [platform support][platform-support] page for + information on Rust's platform support. +- [Added support for the `thumbv7neon-linux-androideabi` and + `thumbv7neon-unknown-linux-gnueabihf` targets.][56947] +- [Added support for the `x86_64-unknown-uefi` target.][56769] + +Libraries +--------- +- [The methods `overflowing_{add, sub, mul, shl, shr}` are now `const` + functions for all numeric types.][57566] +- [The methods `rotate_left`, `rotate_right`, and `wrapping_{add, sub, mul, shl, shr}` + are now `const` functions for all numeric types.][57105] +- [The methods `is_positive` and `is_negative` are now `const` functions for + all signed numeric types.][57105] +- [The `get` method for all `NonZero` types is now `const`.][57167] +- [The methods `count_ones`, `count_zeros`, `leading_zeros`, `trailing_zeros`, + `swap_bytes`, `from_be`, `from_le`, `to_be`, `to_le` are now `const` for all + numeric types.][57234] +- [`Ipv4Addr::new` is now a `const` function][57234] + +Stabilized APIs +--------------- +- [`unix::FileExt::read_exact_at`] +- [`unix::FileExt::write_all_at`] +- [`Option::transpose`] +- [`Result::transpose`] +- [`convert::identity`] +- [`pin::Pin`] +- [`marker::Unpin`] +- [`marker::PhantomPinned`] +- [`Vec::resize_with`] +- [`VecDeque::resize_with`] +- [`Duration::as_millis`] +- [`Duration::as_micros`] +- [`Duration::as_nanos`] + + +Cargo +----- +- [You can now publish crates that require a feature flag to compile with + `cargo publish --features` or `cargo publish --all-features`.][cargo/6453] +- [Cargo should now rebuild a crate if a file was modified during the initial + build.][cargo/6484] + +Compatibility Notes +------------------- +- The methods `str::{trim_left, trim_right, trim_left_matches, trim_right_matches}` + are now deprecated in the standard library, and their usage will now produce a warning. + Please use the `str::{trim_start, trim_end, trim_start_matches, trim_end_matches}` + methods instead. +- The `Error::cause` method has been deprecated in favor of `Error::source` which supports + downcasting. +- [Libtest no longer creates a new thread for each test when + `--test-threads=1`. It also runs the tests in deterministic order][56243] + +[55982]: https://github.com/rust-lang/rust/pull/55982/ +[56243]: https://github.com/rust-lang/rust/pull/56243 +[56303]: https://github.com/rust-lang/rust/pull/56303/ +[56351]: https://github.com/rust-lang/rust/pull/56351/ +[56362]: https://github.com/rust-lang/rust/pull/56362 +[56642]: https://github.com/rust-lang/rust/pull/56642/ +[56769]: https://github.com/rust-lang/rust/pull/56769/ +[56805]: https://github.com/rust-lang/rust/pull/56805 +[56947]: https://github.com/rust-lang/rust/pull/56947/ +[57049]: https://github.com/rust-lang/rust/pull/57049/ +[57067]: https://github.com/rust-lang/rust/pull/57067/ +[57105]: https://github.com/rust-lang/rust/pull/57105 +[57130]: https://github.com/rust-lang/rust/pull/57130/ +[57167]: https://github.com/rust-lang/rust/pull/57167/ +[57175]: https://github.com/rust-lang/rust/pull/57175/ +[57234]: https://github.com/rust-lang/rust/pull/57234/ +[57332]: https://github.com/rust-lang/rust/pull/57332/ +[57465]: https://github.com/rust-lang/rust/pull/57465/ +[57532]: https://github.com/rust-lang/rust/pull/57532/ +[57535]: https://github.com/rust-lang/rust/pull/57535/ +[57566]: https://github.com/rust-lang/rust/pull/57566/ +[57615]: https://github.com/rust-lang/rust/pull/57615/ +[cargo/6453]: https://github.com/rust-lang/cargo/pull/6453/ +[cargo/6484]: https://github.com/rust-lang/cargo/pull/6484/ +[`unix::FileExt::read_exact_at`]: https://doc.rust-lang.org/std/os/unix/fs/trait.FileExt.html#method.read_exact_at +[`unix::FileExt::write_all_at`]: https://doc.rust-lang.org/std/os/unix/fs/trait.FileExt.html#method.write_all_at +[`Option::transpose`]: https://doc.rust-lang.org/std/option/enum.Option.html#method.transpose +[`Result::transpose`]: https://doc.rust-lang.org/std/result/enum.Result.html#method.transpose +[`convert::identity`]: https://doc.rust-lang.org/std/convert/fn.identity.html +[`pin::Pin`]: https://doc.rust-lang.org/std/pin/struct.Pin.html +[`marker::Unpin`]: https://doc.rust-lang.org/stable/std/marker/trait.Unpin.html +[`marker::PhantomPinned`]: https://doc.rust-lang.org/nightly/std/marker/struct.PhantomPinned.html +[`Vec::resize_with`]: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.resize_with +[`VecDeque::resize_with`]: https://doc.rust-lang.org/std/collections/struct.VecDeque.html#method.resize_with +[`Duration::as_millis`]: https://doc.rust-lang.org/std/time/struct.Duration.html#method.as_millis +[`Duration::as_micros`]: https://doc.rust-lang.org/std/time/struct.Duration.html#method.as_micros +[`Duration::as_nanos`]: https://doc.rust-lang.org/std/time/struct.Duration.html#method.as_nanos +[platform-support]: https://forge.rust-lang.org/platform-support.html + +Version 1.32.0 (2019-01-17) +========================== + +Language +-------- +#### 2018 edition +- [You can now use the `?` operator in macro definitions.][56245] The `?` + operator allows you to specify zero or one repetitions similar to the `*` and + `+` operators. +- [Module paths with no leading keyword like `super`, `self`, or `crate`, will + now always resolve to the item (`enum`, `struct`, etc.) available in the + module if present, before resolving to a external crate or an item the prelude.][56759] + E.g. + ```rust + enum Color { Red, Green, Blue } + + use Color::*; + ``` + +#### All editions +- [You can now match against `PhantomData` types.][55837] +- [You can now match against literals in macros with the `literal` + specifier.][56072] This will match against a literal of any type. + E.g. `1`, `'A'`, `"Hello World"` +- [Self can now be used as a constructor and pattern for unit and tuple structs.][56365] E.g. + ```rust + struct Point(i32, i32); + + impl Point { + pub fn new(x: i32, y: i32) -> Self { + Self(x, y) + } + + pub fn is_origin(&self) -> bool { + match self { + Self(0, 0) => true, + _ => false, + } + } + } + ``` +- [Self can also now be used in type definitions.][56366] E.g. + ```rust + enum List + where + Self: PartialOrd // can write `Self` instead of `List` + { + Nil, + Cons(T, Box) // likewise here + } + ``` +- [You can now mark traits with `#[must_use]`.][55663] This provides a warning if + a `impl Trait` or `dyn Trait` is returned and unused in the program. + +Compiler +-------- +- [The default allocator has changed from jemalloc to the default allocator on + your system.][55238] The compiler itself on Linux & macOS will still use + jemalloc, but programs compiled with it will use the system allocator. +- [Added the `aarch64-pc-windows-msvc` target.][55702] + +Libraries +--------- +- [`PathBuf` now implements `FromStr`.][55148] +- [`Box<[T]>` now implements `FromIterator`.][55843] +- [The `dbg!` macro has been stabilized.][56395] This macro enables you to + easily debug expressions in your rust program. E.g. + ```rust + let a = 2; + let b = dbg!(a * 2) + 1; + // ^-- prints: [src/main.rs:4] a * 2 = 4 + assert_eq!(b, 5); + ``` + +The following APIs are now `const` functions and can be used in a +`const` context. + +- [`Cell::as_ptr`] +- [`UnsafeCell::get`] +- [`char::is_ascii`] +- [`iter::empty`] +- [`ManuallyDrop::new`] +- [`ManuallyDrop::into_inner`] +- [`RangeInclusive::start`] +- [`RangeInclusive::end`] +- [`NonNull::as_ptr`] +- [`slice::as_ptr`] +- [`str::as_ptr`] +- [`Duration::as_secs`] +- [`Duration::subsec_millis`] +- [`Duration::subsec_micros`] +- [`Duration::subsec_nanos`] +- [`CStr::as_ptr`] +- [`Ipv4Addr::is_unspecified`] +- [`Ipv6Addr::new`] +- [`Ipv6Addr::octets`] + +Stabilized APIs +--------------- +- [`i8::to_be_bytes`] +- [`i8::to_le_bytes`] +- [`i8::to_ne_bytes`] +- [`i8::from_be_bytes`] +- [`i8::from_le_bytes`] +- [`i8::from_ne_bytes`] +- [`i16::to_be_bytes`] +- [`i16::to_le_bytes`] +- [`i16::to_ne_bytes`] +- [`i16::from_be_bytes`] +- [`i16::from_le_bytes`] +- [`i16::from_ne_bytes`] +- [`i32::to_be_bytes`] +- [`i32::to_le_bytes`] +- [`i32::to_ne_bytes`] +- [`i32::from_be_bytes`] +- [`i32::from_le_bytes`] +- [`i32::from_ne_bytes`] +- [`i64::to_be_bytes`] +- [`i64::to_le_bytes`] +- [`i64::to_ne_bytes`] +- [`i64::from_be_bytes`] +- [`i64::from_le_bytes`] +- [`i64::from_ne_bytes`] +- [`i128::to_be_bytes`] +- [`i128::to_le_bytes`] +- [`i128::to_ne_bytes`] +- [`i128::from_be_bytes`] +- [`i128::from_le_bytes`] +- [`i128::from_ne_bytes`] +- [`isize::to_be_bytes`] +- [`isize::to_le_bytes`] +- [`isize::to_ne_bytes`] +- [`isize::from_be_bytes`] +- [`isize::from_le_bytes`] +- [`isize::from_ne_bytes`] +- [`u8::to_be_bytes`] +- [`u8::to_le_bytes`] +- [`u8::to_ne_bytes`] +- [`u8::from_be_bytes`] +- [`u8::from_le_bytes`] +- [`u8::from_ne_bytes`] +- [`u16::to_be_bytes`] +- [`u16::to_le_bytes`] +- [`u16::to_ne_bytes`] +- [`u16::from_be_bytes`] +- [`u16::from_le_bytes`] +- [`u16::from_ne_bytes`] +- [`u32::to_be_bytes`] +- [`u32::to_le_bytes`] +- [`u32::to_ne_bytes`] +- [`u32::from_be_bytes`] +- [`u32::from_le_bytes`] +- [`u32::from_ne_bytes`] +- [`u64::to_be_bytes`] +- [`u64::to_le_bytes`] +- [`u64::to_ne_bytes`] +- [`u64::from_be_bytes`] +- [`u64::from_le_bytes`] +- [`u64::from_ne_bytes`] +- [`u128::to_be_bytes`] +- [`u128::to_le_bytes`] +- [`u128::to_ne_bytes`] +- [`u128::from_be_bytes`] +- [`u128::from_le_bytes`] +- [`u128::from_ne_bytes`] +- [`usize::to_be_bytes`] +- [`usize::to_le_bytes`] +- [`usize::to_ne_bytes`] +- [`usize::from_be_bytes`] +- [`usize::from_le_bytes`] +- [`usize::from_ne_bytes`] + +Cargo +----- +- [You can now run `cargo c` as an alias for `cargo check`.][cargo/6218] +- [Usernames are now allowed in alt registry URLs.][cargo/6242] + +Misc +---- +- [`libproc_macro` has been added to the `rust-src` distribution.][55280] + +Compatibility Notes +------------------- +- [The argument types for AVX's + `_mm256_stream_si256`, `_mm256_stream_pd`, `_mm256_stream_ps`][55610] have + been changed from `*const` to `*mut` as the previous implementation + was unsound. + + +[55148]: https://github.com/rust-lang/rust/pull/55148/ +[55238]: https://github.com/rust-lang/rust/pull/55238/ +[55280]: https://github.com/rust-lang/rust/pull/55280/ +[55610]: https://github.com/rust-lang/rust/pull/55610/ +[55663]: https://github.com/rust-lang/rust/pull/55663/ +[55702]: https://github.com/rust-lang/rust/pull/55702/ +[55837]: https://github.com/rust-lang/rust/pull/55837/ +[55843]: https://github.com/rust-lang/rust/pull/55843/ +[56072]: https://github.com/rust-lang/rust/pull/56072/ +[56245]: https://github.com/rust-lang/rust/pull/56245/ +[56365]: https://github.com/rust-lang/rust/pull/56365/ +[56366]: https://github.com/rust-lang/rust/pull/56366/ +[56395]: https://github.com/rust-lang/rust/pull/56395/ +[56759]: https://github.com/rust-lang/rust/pull/56759/ +[cargo/6218]: https://github.com/rust-lang/cargo/pull/6218/ +[cargo/6242]: https://github.com/rust-lang/cargo/pull/6242/ +[`CStr::as_ptr`]: https://doc.rust-lang.org/std/ffi/struct.CStr.html#method.as_ptr +[`Cell::as_ptr`]: https://doc.rust-lang.org/std/cell/struct.Cell.html#method.as_ptr +[`Duration::as_secs`]: https://doc.rust-lang.org/std/time/struct.Duration.html#method.as_secs +[`Duration::subsec_micros`]: https://doc.rust-lang.org/std/time/struct.Duration.html#method.subsec_micros +[`Duration::subsec_millis`]: https://doc.rust-lang.org/std/time/struct.Duration.html#method.subsec_millis +[`Duration::subsec_nanos`]: https://doc.rust-lang.org/std/time/struct.Duration.html#method.subsec_nanos +[`Ipv4Addr::is_unspecified`]: https://doc.rust-lang.org/std/net/struct.Ipv4Addr.html#method.is_unspecified +[`Ipv6Addr::new`]: https://doc.rust-lang.org/std/net/struct.Ipv6Addr.html#method.new +[`Ipv6Addr::octets`]: https://doc.rust-lang.org/std/net/struct.Ipv6Addr.html#method.octets +[`ManuallyDrop::into_inner`]: https://doc.rust-lang.org/std/mem/struct.ManuallyDrop.html#method.into_inner +[`ManuallyDrop::new`]: https://doc.rust-lang.org/std/mem/struct.ManuallyDrop.html#method.new +[`NonNull::as_ptr`]: https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.as_ptr +[`RangeInclusive::end`]: https://doc.rust-lang.org/std/ops/struct.RangeInclusive.html#method.end +[`RangeInclusive::start`]: https://doc.rust-lang.org/std/ops/struct.RangeInclusive.html#method.start +[`UnsafeCell::get`]: https://doc.rust-lang.org/std/cell/struct.UnsafeCell.html#method.get +[`slice::as_ptr`]: https://doc.rust-lang.org/std/primitive.slice.html#method.as_ptr +[`char::is_ascii`]: https://doc.rust-lang.org/std/primitive.char.html#method.is_ascii +[`i128::from_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i128.html#method.from_be_bytes +[`i128::from_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i128.html#method.from_le_bytes +[`i128::from_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i128.html#method.from_ne_bytes +[`i128::to_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i128.html#method.to_be_bytes +[`i128::to_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i128.html#method.to_le_bytes +[`i128::to_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i128.html#method.to_ne_bytes +[`i16::from_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i16.html#method.from_be_bytes +[`i16::from_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i16.html#method.from_le_bytes +[`i16::from_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i16.html#method.from_ne_bytes +[`i16::to_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i16.html#method.to_be_bytes +[`i16::to_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i16.html#method.to_le_bytes +[`i16::to_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i16.html#method.to_ne_bytes +[`i32::from_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i32.html#method.from_be_bytes +[`i32::from_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i32.html#method.from_le_bytes +[`i32::from_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i32.html#method.from_ne_bytes +[`i32::to_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i32.html#method.to_be_bytes +[`i32::to_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i32.html#method.to_le_bytes +[`i32::to_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i32.html#method.to_ne_bytes +[`i64::from_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i64.html#method.from_be_bytes +[`i64::from_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i64.html#method.from_le_bytes +[`i64::from_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i64.html#method.from_ne_bytes +[`i64::to_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i64.html#method.to_be_bytes +[`i64::to_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i64.html#method.to_le_bytes +[`i64::to_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i64.html#method.to_ne_bytes +[`i8::from_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i8.html#method.from_be_bytes +[`i8::from_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i8.html#method.from_le_bytes +[`i8::from_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i8.html#method.from_ne_bytes +[`i8::to_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i8.html#method.to_be_bytes +[`i8::to_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i8.html#method.to_le_bytes +[`i8::to_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.i8.html#method.to_ne_bytes +[`isize::from_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.isize.html#method.from_be_bytes +[`isize::from_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.isize.html#method.from_le_bytes +[`isize::from_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.isize.html#method.from_ne_bytes +[`isize::to_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.isize.html#method.to_be_bytes +[`isize::to_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.isize.html#method.to_le_bytes +[`isize::to_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.isize.html#method.to_ne_bytes +[`iter::empty`]: https://doc.rust-lang.org/std/iter/fn.empty.html +[`str::as_ptr`]: https://doc.rust-lang.org/std/primitive.str.html#method.as_ptr +[`u128::from_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u128.html#method.from_be_bytes +[`u128::from_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u128.html#method.from_le_bytes +[`u128::from_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u128.html#method.from_ne_bytes +[`u128::to_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u128.html#method.to_be_bytes +[`u128::to_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u128.html#method.to_le_bytes +[`u128::to_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u128.html#method.to_ne_bytes +[`u16::from_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u16.html#method.from_be_bytes +[`u16::from_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u16.html#method.from_le_bytes +[`u16::from_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u16.html#method.from_ne_bytes +[`u16::to_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u16.html#method.to_be_bytes +[`u16::to_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u16.html#method.to_le_bytes +[`u16::to_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u16.html#method.to_ne_bytes +[`u32::from_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u32.html#method.from_be_bytes +[`u32::from_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u32.html#method.from_le_bytes +[`u32::from_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u32.html#method.from_ne_bytes +[`u32::to_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u32.html#method.to_be_bytes +[`u32::to_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u32.html#method.to_le_bytes +[`u32::to_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u32.html#method.to_ne_bytes +[`u64::from_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.from_be_bytes +[`u64::from_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.from_le_bytes +[`u64::from_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.from_ne_bytes +[`u64::to_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.to_be_bytes +[`u64::to_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.to_le_bytes +[`u64::to_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u64.html#method.to_ne_bytes +[`u8::from_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u8.html#method.from_be_bytes +[`u8::from_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u8.html#method.from_le_bytes +[`u8::from_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u8.html#method.from_ne_bytes +[`u8::to_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u8.html#method.to_be_bytes +[`u8::to_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u8.html#method.to_le_bytes +[`u8::to_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.u8.html#method.to_ne_bytes +[`usize::from_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.usize.html#method.from_be_bytes +[`usize::from_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.usize.html#method.from_le_bytes +[`usize::from_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.usize.html#method.from_ne_bytes +[`usize::to_be_bytes`]: https://doc.rust-lang.org/stable/std/primitive.usize.html#method.to_be_bytes +[`usize::to_le_bytes`]: https://doc.rust-lang.org/stable/std/primitive.usize.html#method.to_le_bytes +[`usize::to_ne_bytes`]: https://doc.rust-lang.org/stable/std/primitive.usize.html#method.to_ne_bytes + + +Version 1.31.1 (2018-12-20) +=========================== + +- [Fix Rust failing to build on `powerpc-unknown-netbsd`][56562] +- [Fix broken go-to-definition in RLS][rls/1171] +- [Fix infinite loop on hover in RLS][rls/1170] + +[56562]: https://github.com/rust-lang/rust/pull/56562 +[rls/1171]: https://github.com/rust-lang/rls/issues/1171 +[rls/1170]: https://github.com/rust-lang/rls/pull/1170 + Version 1.31.0 (2018-12-06) ========================== Language -------- -- 🎉 [This version marks the release of the 2018 edition of Rust.][54057] 🎉 +- 🎉 [This version marks the release of the 2018 edition of Rust.][54057] 🎉 - [New lifetime elision rules now allow for eliding lifetimes in functions and impl headers.][54778] E.g. `impl<'a> Reader for BufReader<'a> {}` can now be `impl Reader for BufReader<'_> {}`. Lifetimes are still required to be defined diff --git a/RUST_README.md b/RUST_README.md new file mode 100644 index 0000000000000..55e6e8d7f1889 --- /dev/null +++ b/RUST_README.md @@ -0,0 +1,263 @@ +# The Rust Programming Language + +This is the main source code repository for [Rust]. It contains the compiler, +standard library, and documentation. + +[Rust]: https://www.rust-lang.org + +## Quick Start +[quick-start]: #quick-start + +Read ["Installation"] from [The Book]. + +["Installation"]: https://doc.rust-lang.org/book/ch01-01-installation.html +[The Book]: https://doc.rust-lang.org/book/index.html + +## Installing from Source +[building-from-source]: #building-from-source + +_Note: If you wish to contribute to the compiler, you should read +[this chapter](https://rust-lang.github.io/rustc-guide/how-to-build-and-run.html) +of the rustc-guide instead._ + +### Building on *nix +1. Make sure you have installed the dependencies: + + * `g++` 4.7 or later or `clang++` 3.x or later + * `python` 2.7 (but not 3.x) + * GNU `make` 3.81 or later + * `cmake` 3.4.3 or later + * `curl` + * `git` + +2. Clone the [source] with `git`: + + ```sh + $ git clone https://github.com/rust-lang/rust.git + $ cd rust + ``` + +[source]: https://github.com/rust-lang/rust + +3. Build and install: + + ```sh + $ ./x.py build && sudo ./x.py install + ``` + + If after running `sudo ./x.py install` you see an error message like + + ``` + error: failed to load source for a dependency on 'cc' + ``` + + then run these two commands and then try `sudo ./x.py install` again: + + ``` + $ cargo install cargo-vendor + ``` + + ``` + $ cargo vendor + ``` + + > ***Note:*** Install locations can be adjusted by copying the config file + > from `./config.toml.example` to `./config.toml`, and + > adjusting the `prefix` option under `[install]`. Various other options, such + > as enabling debug information, are also supported, and are documented in + > the config file. + + When complete, `sudo ./x.py install` will place several programs into + `/usr/local/bin`: `rustc`, the Rust compiler, and `rustdoc`, the + API-documentation tool. This install does not include [Cargo], + Rust's package manager, which you may also want to build. + +[Cargo]: https://github.com/rust-lang/cargo + +### Building on Windows +[building-on-windows]: #building-on-windows + +There are two prominent ABIs in use on Windows: the native (MSVC) ABI used by +Visual Studio, and the GNU ABI used by the GCC toolchain. Which version of Rust +you need depends largely on what C/C++ libraries you want to interoperate with: +for interop with software produced by Visual Studio use the MSVC build of Rust; +for interop with GNU software built using the MinGW/MSYS2 toolchain use the GNU +build. + +#### MinGW +[windows-mingw]: #windows-mingw + +[MSYS2][msys2] can be used to easily build Rust on Windows: + +[msys2]: https://msys2.github.io/ + +1. Grab the latest [MSYS2 installer][msys2] and go through the installer. + +2. Run `mingw32_shell.bat` or `mingw64_shell.bat` from wherever you installed + MSYS2 (i.e. `C:\msys64`), depending on whether you want 32-bit or 64-bit + Rust. (As of the latest version of MSYS2 you have to run `msys2_shell.cmd + -mingw32` or `msys2_shell.cmd -mingw64` from the command line instead) + +3. From this terminal, install the required tools: + + ```sh + # Update package mirrors (may be needed if you have a fresh install of MSYS2) + $ pacman -Sy pacman-mirrors + + # Install build tools needed for Rust. If you're building a 32-bit compiler, + # then replace "x86_64" below with "i686". If you've already got git, python, + # or CMake installed and in PATH you can remove them from this list. Note + # that it is important that you do **not** use the 'python2' and 'cmake' + # packages from the 'msys2' subsystem. The build has historically been known + # to fail with these packages. + $ pacman -S git \ + make \ + diffutils \ + tar \ + mingw-w64-x86_64-python2 \ + mingw-w64-x86_64-cmake \ + mingw-w64-x86_64-gcc + ``` + +4. Navigate to Rust's source code (or clone it), then build it: + + ```sh + $ ./x.py build && ./x.py install + ``` + +#### MSVC +[windows-msvc]: #windows-msvc + +MSVC builds of Rust additionally require an installation of Visual Studio 2013 +(or later) so `rustc` can use its linker. Make sure to check the “C++ tools” +option. + +With these dependencies installed, you can build the compiler in a `cmd.exe` +shell with: + +```sh +> python x.py build +``` + +Currently, building Rust only works with some known versions of Visual Studio. If +you have a more recent version installed the build system doesn't understand +then you may need to force rustbuild to use an older version. This can be done +by manually calling the appropriate vcvars file before running the bootstrap. + +```batch +> CALL "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\amd64\vcvars64.bat" +> python x.py build +``` + +#### Specifying an ABI +[specifying-an-abi]: #specifying-an-abi + +Each specific ABI can also be used from either environment (for example, using +the GNU ABI in PowerShell) by using an explicit build triple. The available +Windows build triples are: +- GNU ABI (using GCC) + - `i686-pc-windows-gnu` + - `x86_64-pc-windows-gnu` +- The MSVC ABI + - `i686-pc-windows-msvc` + - `x86_64-pc-windows-msvc` + +The build triple can be specified by either specifying `--build=` when +invoking `x.py` commands, or by copying the `config.toml` file (as described +in Building From Source), and modifying the `build` option under the `[build]` +section. + +### Configure and Make +[configure-and-make]: #configure-and-make + +While it's not the recommended build system, this project also provides a +configure script and makefile (the latter of which just invokes `x.py`). + +```sh +$ ./configure +$ make && sudo make install +``` + +When using the configure script, the generated `config.mk` file may override the +`config.toml` file. To go back to the `config.toml` file, delete the generated +`config.mk` file. + +## Building Documentation +[building-documentation]: #building-documentation + +If you’d like to build the documentation, it’s almost the same: + +```sh +$ ./x.py doc +``` + +The generated documentation will appear under `doc` in the `build` directory for +the ABI used. I.e., if the ABI was `x86_64-pc-windows-msvc`, the directory will be +`build\x86_64-pc-windows-msvc\doc`. + +## Notes +[notes]: #notes + +Since the Rust compiler is written in Rust, it must be built by a +precompiled "snapshot" version of itself (made in an earlier stage of +development). As such, source builds require a connection to the Internet, to +fetch snapshots, and an OS that can execute the available snapshot binaries. + +Snapshot binaries are currently built and tested on several platforms: + +| Platform / Architecture | x86 | x86_64 | +|--------------------------|-----|--------| +| Windows (7, 8, 10, ...) | ✓ | ✓ | +| Linux (2.6.18 or later) | ✓ | ✓ | +| OSX (10.7 Lion or later) | ✓ | ✓ | + +You may find that other platforms work, but these are our officially +supported build environments that are most likely to work. + +There is more advice about hacking on Rust in [CONTRIBUTING.md]. + +[CONTRIBUTING.md]: https://github.com/rust-lang/rust/blob/master/CONTRIBUTING.md + +## Getting Help +[getting-help]: #getting-help + +The Rust community congregates in a few places: + +* [Stack Overflow] - Direct questions about using the language. +* [users.rust-lang.org] - General discussion and broader questions. +* [/r/rust] - News and general discussion. + +[Stack Overflow]: https://stackoverflow.com/questions/tagged/rust +[/r/rust]: https://reddit.com/r/rust +[users.rust-lang.org]: https://users.rust-lang.org/ + +## Contributing +[contributing]: #contributing + +To contribute to Rust, please see [CONTRIBUTING](CONTRIBUTING.md). + +Rust has an [IRC] culture and most real-time collaboration happens in a +variety of channels on Mozilla's IRC network, irc.mozilla.org. The +most popular channel is [#rust], a venue for general discussion about +Rust. And a good place to ask for help would be [#rust-beginners]. + +The [rustc guide] might be a good place to start if you want to find out how +various parts of the compiler work. + +Also, you may find the [rustdocs for the compiler itself][rustdocs] useful. + +[IRC]: https://en.wikipedia.org/wiki/Internet_Relay_Chat +[#rust]: irc://irc.mozilla.org/rust +[#rust-beginners]: irc://irc.mozilla.org/rust-beginners +[rustc guide]: https://rust-lang.github.io/rustc-guide/about-this-guide.html +[rustdocs]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc/ + +## License +[license]: #license + +Rust is primarily distributed under the terms of both the MIT license +and the Apache License (Version 2.0), with portions covered by various +BSD-like licenses. + +See [LICENSE-APACHE](LICENSE-APACHE), [LICENSE-MIT](LICENSE-MIT), and +[COPYRIGHT](COPYRIGHT) for details. diff --git a/appveyor.yml b/appveyor.yml index f043b8bfefca7..be960dc13af57 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,46 +1,46 @@ environment: + # This is required for at least an AArch64 compiler in one image, and is also + # going to soon be required for compiling LLVM. + APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 Preview # By default schannel checks revocation of certificates unlike some other SSL # backends, but we've historically had problems on CI where a revocation # server goes down presumably. See #43333 for more info CARGO_HTTP_CHECK_REVOKE: false - # Recommended by AppVeyor this moves our builds to GCE which incurs a 3-4 - # minute startup overhead, but that's paltry compared to our overall build - # times so we're will to eat the cost. This is intended to give us better - # performance I believe! - appveyor_build_worker_cloud: gce - matrix: # 32/64 bit MSVC tests - - MSYS_BITS: 64 + - CI_JOB_NAME: x86_64-msvc + MSYS_BITS: 64 RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-msvc --enable-profiler SCRIPT: python x.py test - CI_JOB_NAME: x86_64-msvc - - MSYS_BITS: 32 + # FIXME(#59637) + NO_DEBUG_ASSERTIONS: 1 + NO_LLVM_ASSERTIONS: 1 + - CI_JOB_NAME: i686-msvc-1 + MSYS_BITS: 32 RUST_CONFIGURE_ARGS: --build=i686-pc-windows-msvc SCRIPT: make appveyor-subset-1 - CI_JOB_NAME: i686-msvc-1 - - MSYS_BITS: 32 + - CI_JOB_NAME: i686-msvc-2 + MSYS_BITS: 32 RUST_CONFIGURE_ARGS: --build=i686-pc-windows-msvc SCRIPT: make appveyor-subset-2 - CI_JOB_NAME: i686-msvc-2 # MSVC aux tests - - MSYS_BITS: 64 + - CI_JOB_NAME: x86_64-msvc-aux + MSYS_BITS: 64 RUST_CHECK_TARGET: check-aux EXCLUDE_CARGO=1 RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-msvc - CI_JOB_NAME: x86_64-msvc-aux - - MSYS_BITS: 64 + - CI_JOB_NAME: x86_64-msvc-cargo + MSYS_BITS: 64 SCRIPT: python x.py test src/tools/cargotest src/tools/cargo RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-msvc - CI_JOB_NAME: x86_64-msvc-cargo # MSVC tools tests - - MSYS_BITS: 64 + - CI_JOB_NAME: x86_64-msvc-tools + MSYS_BITS: 64 SCRIPT: src/ci/docker/x86_64-gnu-tools/checktools.sh x.py /tmp/toolstates.json windows RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-msvc --save-toolstates=/tmp/toolstates.json --enable-test-miri - CI_JOB_NAME: x86_64-msvc-tools # 32/64-bit MinGW builds. # @@ -55,30 +55,37 @@ environment: # bucket, but they cleraly didn't originate there! The downloads originally # came from the mingw-w64 SourceForge download site. Unfortunately # SourceForge is notoriously flaky, so we mirror it on our own infrastructure. - - MSYS_BITS: 32 + - CI_JOB_NAME: i686-mingw-1 + MSYS_BITS: 32 RUST_CONFIGURE_ARGS: --build=i686-pc-windows-gnu SCRIPT: make appveyor-subset-1 MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror MINGW_ARCHIVE: i686-6.3.0-release-posix-dwarf-rt_v5-rev2.7z MINGW_DIR: mingw32 - CI_JOB_NAME: i686-mingw-1 - - MSYS_BITS: 32 + # FIXME(#59637) + NO_DEBUG_ASSERTIONS: 1 + NO_LLVM_ASSERTIONS: 1 + - CI_JOB_NAME: i686-mingw-2 + MSYS_BITS: 32 RUST_CONFIGURE_ARGS: --build=i686-pc-windows-gnu SCRIPT: make appveyor-subset-2 MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror MINGW_ARCHIVE: i686-6.3.0-release-posix-dwarf-rt_v5-rev2.7z MINGW_DIR: mingw32 - CI_JOB_NAME: i686-mingw-2 - - MSYS_BITS: 64 + - CI_JOB_NAME: x86_64-mingw + MSYS_BITS: 64 SCRIPT: python x.py test RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-gnu MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror MINGW_ARCHIVE: x86_64-6.3.0-release-posix-seh-rt_v5-rev2.7z MINGW_DIR: mingw64 - CI_JOB_NAME: x86_64-mingw + # FIXME(#59637) + NO_DEBUG_ASSERTIONS: 1 + NO_LLVM_ASSERTIONS: 1 # 32/64 bit MSVC and GNU deployment - - RUST_CONFIGURE_ARGS: > + - CI_JOB_NAME: dist-x86_64-msvc + RUST_CONFIGURE_ARGS: > --build=x86_64-pc-windows-msvc --target=x86_64-pc-windows-msvc,aarch64-pc-windows-msvc --enable-full-tools @@ -86,9 +93,8 @@ environment: SCRIPT: python x.py dist DIST_REQUIRE_ALL_TOOLS: 1 DEPLOY: 1 - CI_JOB_NAME: dist-x86_64-msvc - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 Preview - - RUST_CONFIGURE_ARGS: > + - CI_JOB_NAME: dist-i686-msvc + RUST_CONFIGURE_ARGS: > --build=i686-pc-windows-msvc --target=i586-pc-windows-msvc --enable-full-tools @@ -96,8 +102,8 @@ environment: SCRIPT: python x.py dist DIST_REQUIRE_ALL_TOOLS: 1 DEPLOY: 1 - CI_JOB_NAME: dist-i686-msvc - - MSYS_BITS: 32 + - CI_JOB_NAME: dist-i686-mingw + MSYS_BITS: 32 RUST_CONFIGURE_ARGS: --build=i686-pc-windows-gnu --enable-full-tools SCRIPT: python x.py dist MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror @@ -105,8 +111,8 @@ environment: MINGW_DIR: mingw32 DIST_REQUIRE_ALL_TOOLS: 1 DEPLOY: 1 - CI_JOB_NAME: dist-i686-mingw - - MSYS_BITS: 64 + - CI_JOB_NAME: dist-x86_64-mingw + MSYS_BITS: 64 SCRIPT: python x.py dist RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-gnu --enable-full-tools MINGW_URL: https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror @@ -114,14 +120,13 @@ environment: MINGW_DIR: mingw64 DIST_REQUIRE_ALL_TOOLS: 1 DEPLOY: 1 - CI_JOB_NAME: dist-x86_64-mingw # "alternate" deployment, see .travis.yml for more info - - MSYS_BITS: 64 + - CI_JOB_NAME: dist-x86_64-msvc-alt + MSYS_BITS: 64 RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-msvc --enable-extended --enable-profiler SCRIPT: python x.py dist DEPLOY_ALT: 1 - CI_JOB_NAME: dist-x86_64-msvc-alt matrix: fast_finish: true @@ -130,6 +135,8 @@ clone_depth: 2 build: false install: + # Print which AppVeyor agent version we're running on. + - appveyor version # If we need to download a custom MinGW, do so here and set the path # appropriately. # diff --git a/config.toml.example b/config.toml.example index f75e220de47e7..cdd9cc45064de 100644 --- a/config.toml.example +++ b/config.toml.example @@ -14,10 +14,6 @@ # ============================================================================= [llvm] -# Indicates whether rustc will support compilation with LLVM -# note: rustc does not compile without LLVM at the moment -#enabled = true - # Indicates whether the LLVM build is a Release or Debug build #optimize = true @@ -61,14 +57,14 @@ # support. You'll need to write a target specification at least, and most # likely, teach rustc about the C ABI of the target. Get in touch with the # Rust team and file an issue if you need assistance in porting! -#targets = "X86;ARM;AArch64;Mips;PowerPC;SystemZ;JSBackend;MSP430;Sparc;NVPTX;Hexagon" +#targets = "X86;ARM;AArch64;Mips;PowerPC;SystemZ;MSP430;Sparc;NVPTX;Hexagon" # LLVM experimental targets to build support for. These targets are specified in # the same format as above, but since these targets are experimental, they are # not built by default and the experimental Rust compilation targets that depend # on them will not work unless the user opts in to building them. By default the # `WebAssembly` and `RISCV` targets are enabled when compiling LLVM from scratch. -#experimental-targets = "WebAssembly;RISCV" +#experimental-targets = "AVR;WebAssembly;RISCV" # Cap the number of parallel linker invocations when compiling LLVM. # This can be useful when building LLVM with debug info, which significantly @@ -90,6 +86,23 @@ # with clang-cl, so this is special in that it only compiles LLVM with clang-cl #clang-cl = '/path/to/clang-cl.exe' +# Pass extra compiler and linker flags to the LLVM CMake build. +#cflags = "-fextra-flag" +#cxxflags = "-fextra-flag" +#ldflags = "-Wl,extra-flag" + +# Use libc++ when building LLVM instead of libstdc++. This is the default on +# platforms already use libc++ as the default C++ library, but this option +# allows you to use libc++ even on platforms when it's not. You need to ensure +# that your host compiler ships with libc++. +#use-libcxx = true + +# The value specified here will be passed as `-DLLVM_USE_LINKER` to CMake. +#use-linker = "lld" + +# Whether or not to specify `-DLLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN=YES` +#allow-old-toolchain = false + # ============================================================================= # General build configuration options # ============================================================================= @@ -147,6 +160,9 @@ # Python interpreter to use for various tasks throughout the build, notably # rustdoc tests, the lldb python interpreter, and some dist bits and pieces. # Note that Python 2 is currently required. +# +# Defaults to python2.7, then python2. If neither executable can be found, then +# it defaults to the Python interpreter used to execute x.py. #python = "python2.7" # Force Cargo to check that Cargo.lock describes the precise dependency @@ -171,7 +187,7 @@ # Installs chosen set of extended tools if enables. By default builds all. # If chosen tool failed to build the installation fails. -#tools = ["cargo", "rls", "rustfmt", "analysis", "src"] +#tools = ["cargo", "rls", "clippy", "rustfmt", "analysis", "src"] # Verbosity level: 0 == not verbose, 1 == verbose, 2 == very verbose #verbose = 0 @@ -282,7 +298,7 @@ #codegen-units-std = 1 # Whether or not debug assertions are enabled for the compiler and standard -# library. Also enables compilation of debug! and trace! logging macros. +# library. #debug-assertions = false # Whether or not debuginfo is emitted @@ -306,8 +322,8 @@ # Whether to always use incremental compilation when building rustc #incremental = false -# Build rustc with experimental parallelization -#experimental-parallel-queries = false +# Build a multi-threaded rustc +#parallel-compiler = false # The default linker that will be hard-coded into the generated compiler for # targets that don't specify linker explicitly in their target specifications. @@ -405,6 +421,9 @@ # development of NLL #test-compare-mode = false +# Use LLVM libunwind as the implementation for Rust's unwinder. +#llvm-libunwind = false + # ============================================================================= # Options for specific targets # @@ -461,6 +480,9 @@ # linked binaries #musl-root = "..." +# The root location of the `wasm32-unknown-wasi` sysroot. +#wasi-root = "..." + # Used in testing for configuring where the QEMU images are located, you # probably don't want to use this. #qemu-rootfs = "..." diff --git a/src/README.md b/src/README.md index 65228915866ea..14e773286bc6a 100644 --- a/src/README.md +++ b/src/README.md @@ -8,7 +8,6 @@ For more information on how various parts of the compiler work, see the [rustc g There is also useful content in the following READMEs, which are gradually being moved over to the guide: - https://github.com/rust-lang/rust/tree/master/src/librustc/ty/query - https://github.com/rust-lang/rust/tree/master/src/librustc/dep_graph -- https://github.com/rust-lang/rust/blob/master/src/librustc/infer/region_constraints - https://github.com/rust-lang/rust/tree/master/src/librustc/infer/higher_ranked - https://github.com/rust-lang/rust/tree/master/src/librustc/infer/lexical_region_resolve diff --git a/src/bootstrap/Cargo.toml b/src/bootstrap/Cargo.toml index 0f7b6c22e1cc5..d4ffd4ad39675 100644 --- a/src/bootstrap/Cargo.toml +++ b/src/bootstrap/Cargo.toml @@ -36,11 +36,11 @@ test = false [dependencies] build_helper = { path = "../build_helper" } -cmake = "0.1.23" +cmake = "0.1.38" filetime = "0.2" num_cpus = "1.0" getopts = "0.2" -cc = "1.0.1" +cc = "1.0.35" libc = "0.2" serde = "1.0.8" serde_derive = "1.0.8" diff --git a/src/bootstrap/bin/llvm-config-wrapper.rs b/src/bootstrap/bin/llvm-config-wrapper.rs index b1703f8c728e2..5e3625eb22e52 100644 --- a/src/bootstrap/bin/llvm-config-wrapper.rs +++ b/src/bootstrap/bin/llvm-config-wrapper.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // The sheer existence of this file is an awful hack. See the comments in // `src/bootstrap/native.rs` for why this is needed when compiling LLD. diff --git a/src/bootstrap/bin/main.rs b/src/bootstrap/bin/main.rs index d02bc7972ae9a..0732cb83f39c6 100644 --- a/src/bootstrap/bin/main.rs +++ b/src/bootstrap/bin/main.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! rustbuild, the Rust build system //! //! This is the entry point for the build system used to compile the `rustc` @@ -17,8 +7,6 @@ #![deny(warnings)] -extern crate bootstrap; - use std::env; use bootstrap::{Config, Build}; diff --git a/src/bootstrap/bin/rustc.rs b/src/bootstrap/bin/rustc.rs index d18a48e5d2270..86ce5fd01a812 100644 --- a/src/bootstrap/bin/rustc.rs +++ b/src/bootstrap/bin/rustc.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Shim which is passed to Cargo as "rustc" when running the bootstrap. //! //! This shim will take care of some various tasks that our build process @@ -27,8 +17,6 @@ #![deny(warnings)] -extern crate bootstrap; - use std::env; use std::ffi::OsString; use std::io; @@ -119,6 +107,14 @@ fn main() { // actually downloaded, so we just always pass the `--sysroot` option. cmd.arg("--sysroot").arg(&sysroot); + cmd.arg("-Zexternal-macro-backtrace"); + + // Link crates to the proc macro crate for the target, but use a host proc macro crate + // to actually run the macros + if env::var_os("RUST_DUAL_PROC_MACROS").is_some() { + cmd.arg("-Zdual-proc-macros"); + } + // When we build Rust dylibs they're all intended for intermediate // usage, so make sure we pass the -Cprefer-dynamic flag instead of // linking all deps statically into the dylib. @@ -126,8 +122,8 @@ fn main() { cmd.arg("-Cprefer-dynamic"); } - // Help the libc crate compile by assisting it in finding the MUSL - // native libraries. + // Help the libc crate compile by assisting it in finding various + // sysroot native libraries. if let Some(s) = env::var_os("MUSL_ROOT") { if target.contains("musl") { let mut root = OsString::from("native="); @@ -136,6 +132,12 @@ fn main() { cmd.arg("-L").arg(&root); } } + if let Some(s) = env::var_os("WASI_ROOT") { + let mut root = OsString::from("native="); + root.push(&s); + root.push("/lib/wasm32-wasi"); + cmd.arg("-L").arg(&root); + } // Override linker if necessary. if let Ok(target_linker) = env::var("RUSTC_TARGET_LINKER") { @@ -185,6 +187,33 @@ fn main() { cmd.arg("-C").arg(format!("debug-assertions={}", debug_assertions)); } + // Build all crates in the `std` facade with `-Z emit-stack-sizes` to add stack usage + // information. + // + // When you use this `-Z` flag with Cargo you get stack usage information on all crates + // compiled from source, and when you are using LTO you also get information on pre-compiled + // crates like `core` and `std`, even if they were not compiled with `-Z emit-stack-sizes`. + // However, there's an exception: `compiler_builtins`. This crate is special and doesn't + // participate in LTO because it's always linked as a separate object file. For this reason + // it's impossible to get stack usage information about `compiler-builtins` using + // `RUSTFLAGS` + Cargo, or `cargo rustc`. + // + // To make the stack usage information of all crates under the `std` facade available to + // Cargo based stack usage analysis tools, in both LTO and non-LTO mode, we compile them + // with the `-Z emit-stack-sizes` flag. The `RUSTC_EMIT_STACK_SIZES` var helps us apply this + // flag only to the crates in the `std` facade. The `-Z` flag is known to currently work + // with targets that produce ELF files so we limit its use flag to those targets. + // + // NOTE(japaric) if this ever causes problem with an LLVM upgrade or any PR feel free to + // remove it or comment it out + if env::var_os("RUSTC_EMIT_STACK_SIZES").is_some() + && (target.contains("-linux-") + || target.contains("-none-eabi") + || target.ends_with("-none-elf")) + { + cmd.arg("-Zemit-stack-sizes"); + } + if let Ok(s) = env::var("RUSTC_CODEGEN_UNITS") { cmd.arg("-C").arg(format!("codegen-units={}", s)); } @@ -268,13 +297,6 @@ fn main() { } } - // Force all crates compiled by this compiler to (a) be unstable and (b) - // allow the `rustc_private` feature to link to other unstable crates - // also in the sysroot. - if env::var_os("RUSTC_FORCE_UNSTABLE").is_some() { - cmd.arg("-Z").arg("force-unstable-if-unmarked"); - } - if let Ok(map) = env::var("RUSTC_DEBUGINFO_MAP") { cmd.arg("--remap-path-prefix").arg(&map); } @@ -294,8 +316,21 @@ fn main() { } } - if env::var_os("RUSTC_PARALLEL_QUERIES").is_some() { - cmd.arg("--cfg").arg("parallel_queries"); + // This is required for internal lints. + if stage != "0" { + cmd.arg("-Zunstable-options"); + } + + // Force all crates compiled by this compiler to (a) be unstable and (b) + // allow the `rustc_private` feature to link to other unstable crates + // also in the sysroot. We also do this for host crates, since those + // may be proc macros, in which case we might ship them. + if env::var_os("RUSTC_FORCE_UNSTABLE").is_some() && (stage != "0" || target.is_some()) { + cmd.arg("-Z").arg("force-unstable-if-unmarked"); + } + + if env::var_os("RUSTC_PARALLEL_COMPILER").is_some() { + cmd.arg("--cfg").arg("parallel_compiler"); } if env::var_os("RUSTC_DENY_WARNINGS").is_some() && env::var_os("RUSTC_EXTERNAL_TOOL").is_none() diff --git a/src/bootstrap/bin/rustdoc.rs b/src/bootstrap/bin/rustdoc.rs index bb5a21e3e405f..1c9f6e1ab285c 100644 --- a/src/bootstrap/bin/rustdoc.rs +++ b/src/bootstrap/bin/rustdoc.rs @@ -1,21 +1,9 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Shim which is passed to Cargo as "rustdoc" when running the bootstrap. //! //! See comments in `src/bootstrap/rustc.rs` for more information. #![deny(warnings)] -extern crate bootstrap; - use std::env; use std::process::Command; use std::path::PathBuf; @@ -26,6 +14,7 @@ fn main() { let libdir = env::var_os("RUSTDOC_LIBDIR").expect("RUSTDOC_LIBDIR was not set"); let stage = env::var("RUSTC_STAGE").expect("RUSTC_STAGE was not set"); let sysroot = env::var_os("RUSTC_SYSROOT").expect("RUSTC_SYSROOT was not set"); + let mut has_unstable = false; use std::str::FromStr; @@ -46,7 +35,7 @@ fn main() { .arg("--cfg") .arg("dox") .arg("--sysroot") - .arg(sysroot) + .arg(&sysroot) .env(bootstrap::util::dylib_path_var(), env::join_paths(&dylib_path).unwrap()); @@ -64,13 +53,43 @@ fn main() { // it up so we can make rustdoc print this into the docs if let Some(version) = env::var_os("RUSTDOC_CRATE_VERSION") { // This "unstable-options" can be removed when `--crate-version` is stabilized - cmd.arg("-Z") - .arg("unstable-options") - .arg("--crate-version").arg(version); + if !has_unstable { + cmd.arg("-Z") + .arg("unstable-options"); + } + cmd.arg("--crate-version").arg(version); + has_unstable = true; + } + + // Needed to be able to run all rustdoc tests. + if let Some(_) = env::var_os("RUSTDOC_GENERATE_REDIRECT_PAGES") { + // This "unstable-options" can be removed when `--generate-redirect-pages` is stabilized + if !has_unstable { + cmd.arg("-Z") + .arg("unstable-options"); + } + cmd.arg("--generate-redirect-pages"); + has_unstable = true; + } + + // Needed to be able to run all rustdoc tests. + if let Some(ref x) = env::var_os("RUSTDOC_RESOURCE_SUFFIX") { + // This "unstable-options" can be removed when `--resource-suffix` is stabilized + if !has_unstable { + cmd.arg("-Z") + .arg("unstable-options"); + } + cmd.arg("--resource-suffix").arg(x); } if verbose > 1 { - eprintln!("rustdoc command: {:?}", cmd); + eprintln!( + "rustdoc command: {:?}={:?} {:?}", + bootstrap::util::dylib_path_var(), + env::join_paths(&dylib_path).unwrap(), + cmd, + ); + eprintln!("sysroot: {:?}", sysroot); eprintln!("libdir: {:?}", libdir); } diff --git a/src/bootstrap/bin/sccache-plus-cl.rs b/src/bootstrap/bin/sccache-plus-cl.rs index 0a20ac7e492dc..f40eec83ddf32 100644 --- a/src/bootstrap/bin/sccache-plus-cl.rs +++ b/src/bootstrap/bin/sccache-plus-cl.rs @@ -1,15 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -extern crate cc; - use std::env; use std::process::{self, Command}; diff --git a/src/bootstrap/bootstrap.py b/src/bootstrap/bootstrap.py index d143dffb24be5..8af7aa4856c38 100644 --- a/src/bootstrap/bootstrap.py +++ b/src/bootstrap/bootstrap.py @@ -1,13 +1,3 @@ -# Copyright 2015-2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - from __future__ import absolute_import, division, print_function import argparse import contextlib @@ -240,6 +230,9 @@ def default_build_triple(): err = "unknown OS type: {}".format(ostype) sys.exit(err) + if cputype == 'powerpc' and ostype == 'unknown-freebsd': + cputype = subprocess.check_output( + ['uname', '-p']).strip().decode(default_encoding) cputype_mapper = { 'BePC': 'i686', 'aarch64': 'aarch64', @@ -269,6 +262,10 @@ def default_build_triple(): cputype = 'arm' if ostype == 'linux-android': ostype = 'linux-androideabi' + elif ostype == 'unknown-freebsd': + cputype = subprocess.check_output( + ['uname', '-p']).strip().decode(default_encoding) + ostype = 'unknown-freebsd' elif cputype == 'armv6l': cputype = 'arm' if ostype == 'linux-android': @@ -681,7 +678,7 @@ def update_submodule(self, module, checked_out, recorded_submodules): run(["git", "submodule", "-q", "sync", module], cwd=self.rust_root, verbose=self.verbose) run(["git", "submodule", "update", - "--init", "--recursive", module], + "--init", "--recursive", "--progress", module], cwd=self.rust_root, verbose=self.verbose) run(["git", "reset", "-q", "--hard"], cwd=module_path, verbose=self.verbose) @@ -708,21 +705,13 @@ def update_submodules(self): filtered_submodules = [] submodules_names = [] for module in submodules: - if module.endswith("llvm"): - if self.get_toml('llvm-config'): + if module.endswith("llvm-project"): + if self.get_toml('llvm-config') and self.get_toml('lld') != 'true': continue if module.endswith("llvm-emscripten"): backends = self.get_toml('codegen-backends') if backends is None or not 'emscripten' in backends: continue - if module.endswith("lld"): - config = self.get_toml('lld') - if config is None or config == 'false': - continue - if module.endswith("lldb") or module.endswith("clang"): - config = self.get_toml('lldb') - if config is None or config == 'false': - continue check = self.check_submodule(module, slow_submodules) filtered_submodules.append((module, check)) submodules_names.append(module) @@ -845,7 +834,7 @@ def main(): # x.py help ... if len(sys.argv) > 1 and sys.argv[1] == 'help': - sys.argv = sys.argv[:1] + [sys.argv[2], '-h'] + sys.argv[3:] + sys.argv = [sys.argv[0], '-h'] + sys.argv[2:] help_triggered = ( '-h' in sys.argv) or ('--help' in sys.argv) or (len(sys.argv) == 1) diff --git a/src/bootstrap/bootstrap_test.py b/src/bootstrap/bootstrap_test.py index 4db7e2ec016f0..689298f32d03c 100644 --- a/src/bootstrap/bootstrap_test.py +++ b/src/bootstrap/bootstrap_test.py @@ -1,13 +1,3 @@ -# Copyright 2015-2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - """Bootstrap tests""" from __future__ import absolute_import, division, print_function diff --git a/src/bootstrap/builder.rs b/src/bootstrap/builder.rs index 405fc871eef76..522466314d660 100644 --- a/src/bootstrap/builder.rs +++ b/src/bootstrap/builder.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::any::Any; use std::cell::{Cell, RefCell}; use std::collections::BTreeSet; @@ -31,7 +21,7 @@ use crate::install; use crate::native; use crate::test; use crate::tool; -use crate::util::{add_lib_path, exe, libdir}; +use crate::util::{self, add_lib_path, exe, libdir}; use crate::{Build, DocTests, Mode, GitRepo}; pub use crate::Compiler; @@ -70,23 +60,23 @@ pub trait Step: 'static + Clone + Debug + PartialEq + Eq + Hash { /// Run this rule for all hosts without cross compiling. const ONLY_HOSTS: bool = false; - /// Primary function to execute this rule. Can call `builder.ensure(...)` + /// Primary function to execute this rule. Can call `builder.ensure()` /// with other steps to run those. - fn run(self, builder: &Builder) -> Self::Output; + fn run(self, builder: &Builder<'_>) -> Self::Output; /// When bootstrap is passed a set of paths, this controls whether this rule /// will execute. However, it does not get called in a "default" context - /// when we are not passed any paths; in that case, make_run is called + /// when we are not passed any paths; in that case, `make_run` is called /// directly. - fn should_run(run: ShouldRun) -> ShouldRun; + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_>; - /// Build up a "root" rule, either as a default rule or from a path passed + /// Builds up a "root" rule, either as a default rule or from a path passed /// to us. /// /// When path is `None`, we are executing in a context where no paths were /// passed. When `./x.py build` is run, for example, this rule could get /// called if it is in the correct list below with a path of `None`. - fn make_run(_run: RunConfig) { + fn make_run(_run: RunConfig<'_>) { // It is reasonable to not have an implementation of make_run for rules // who do not want to get called from the root context. This means that // they are likely dependencies (e.g., sysroot creation) or similar, and @@ -105,8 +95,8 @@ pub struct RunConfig<'a> { struct StepDescription { default: bool, only_hosts: bool, - should_run: fn(ShouldRun) -> ShouldRun, - make_run: fn(RunConfig), + should_run: fn(ShouldRun<'_>) -> ShouldRun<'_>, + make_run: fn(RunConfig<'_>), name: &'static str, } @@ -134,7 +124,7 @@ impl PathSet { } } - fn path(&self, builder: &Builder) -> PathBuf { + fn path(&self, builder: &Builder<'_>) -> PathBuf { match self { PathSet::Set(set) => set .iter() @@ -157,7 +147,7 @@ impl StepDescription { } } - fn maybe_run(&self, builder: &Builder, pathset: &PathSet) { + fn maybe_run(&self, builder: &Builder<'_>, pathset: &PathSet) { if builder.config.exclude.iter().any(|e| pathset.has(e)) { eprintln!("Skipping {:?} because it is excluded", pathset); return; @@ -193,7 +183,7 @@ impl StepDescription { } } - fn run(v: &[StepDescription], builder: &Builder, paths: &[PathBuf]) { + fn run(v: &[StepDescription], builder: &Builder<'_>, paths: &[PathBuf]) { let should_runs = v .iter() .map(|desc| (desc.should_run)(ShouldRun::new(builder))) @@ -255,7 +245,7 @@ pub struct ShouldRun<'a> { } impl<'a> ShouldRun<'a> { - fn new(builder: &'a Builder) -> ShouldRun<'a> { + fn new(builder: &'a Builder<'_>) -> ShouldRun<'a> { ShouldRun { builder, paths: BTreeSet::new(), @@ -336,7 +326,7 @@ pub enum Kind { impl<'a> Builder<'a> { fn get_step_descriptions(kind: Kind) -> Vec { macro_rules! describe { - ($($rule:ty),+ $(,)*) => {{ + ($($rule:ty),+ $(,)?) => {{ vec![$(StepDescription::from::<$rule>()),+] }}; } @@ -384,18 +374,16 @@ impl<'a> Builder<'a> { test::MirOpt, test::Codegen, test::CodegenUnits, + test::Assembly, test::Incremental, test::Debuginfo, test::UiFullDeps, test::RunPassFullDeps, - test::RunFailFullDeps, test::Rustdoc, test::Pretty, test::RunPassPretty, test::RunFailPretty, test::RunPassValgrindPretty, - test::RunPassFullDepsPretty, - test::RunFailFullDepsPretty, test::Crate, test::CrateLibrustc, test::CrateRustdoc, @@ -413,17 +401,20 @@ impl<'a> Builder<'a> { test::TheBook, test::UnstableBook, test::RustcBook, + test::EmbeddedBook, + test::EditionGuide, test::Rustfmt, test::Miri, test::Clippy, test::CompiletestTest, - test::RustdocJS, + test::RustdocJSStd, + test::RustdocJSNotStd, test::RustdocTheme, + test::RustdocUi, // Run bootstrap close to the end as it's unlikely to fail test::Bootstrap, // Run run-make last, since these won't pass without make on Windows test::RunMake, - test::RustdocUi ), Kind::Bench => describe!(test::Crate, test::CrateLibrustc), Kind::Doc => describe!( @@ -443,6 +434,7 @@ impl<'a> Builder<'a> { doc::RustByExample, doc::RustcBook, doc::CargoBook, + doc::EmbeddedBook, doc::EditionGuide, ), Kind::Dist => describe!( @@ -459,6 +451,7 @@ impl<'a> Builder<'a> { dist::Rls, dist::Rustfmt, dist::Clippy, + dist::Miri, dist::LlvmTools, dist::Lldb, dist::Extended, @@ -471,6 +464,7 @@ impl<'a> Builder<'a> { install::Rls, install::Rustfmt, install::Clippy, + install::Miri, install::Analysis, install::Src, install::Rustc @@ -520,7 +514,7 @@ impl<'a> Builder<'a> { Some(help) } - pub fn new(build: &Build) -> Builder { + pub fn new(build: &Build) -> Builder<'_> { let (kind, paths) = match build.config.cmd { Subcommand::Build { ref paths } => (Kind::Build, &paths[..]), Subcommand::Check { ref paths } => (Kind::Check, &paths[..]), @@ -600,11 +594,11 @@ impl<'a> Builder<'a> { impl Step for Libdir { type Output = Interned; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.never() } - fn run(self, builder: &Builder) -> Interned { + fn run(self, builder: &Builder<'_>) -> Interned { let compiler = self.compiler; let config = &builder.build.config; let lib = if compiler.stage >= 1 && config.libdir_relative().is_some() { @@ -640,7 +634,28 @@ impl<'a> Builder<'a> { if compiler.is_snapshot(self) { self.rustc_snapshot_libdir() } else { - self.sysroot(compiler).join(libdir(&compiler.host)) + match self.config.libdir_relative() { + Some(relative_libdir) if compiler.stage >= 1 + => self.sysroot(compiler).join(relative_libdir), + _ => self.sysroot(compiler).join(libdir(&compiler.host)) + } + } + } + + /// Returns the compiler's relative libdir where it stores the dynamic libraries that + /// it itself links against. + /// + /// For example this returns `lib` on Unix and `bin` on + /// Windows. + pub fn libdir_relative(&self, compiler: Compiler) -> &Path { + if compiler.is_snapshot(self) { + libdir(&self.config.build).as_ref() + } else { + match self.config.libdir_relative() { + Some(relative_libdir) if compiler.stage >= 1 + => relative_libdir, + _ => libdir(&compiler.host).as_ref() + } } } @@ -657,7 +672,7 @@ impl<'a> Builder<'a> { add_lib_path(vec![self.rustc_libdir(compiler)], cmd); } - /// Get a path to the compiler specified. + /// Gets a path to the compiler specified. pub fn rustc(&self, compiler: Compiler) -> PathBuf { if compiler.is_snapshot(self) { self.initial_rustc.clone() @@ -668,21 +683,28 @@ impl<'a> Builder<'a> { } } - pub fn rustdoc(&self, host: Interned) -> PathBuf { - self.ensure(tool::Rustdoc { host }) + /// Gets the paths to all of the compiler's codegen backends. + fn codegen_backends(&self, compiler: Compiler) -> impl Iterator { + fs::read_dir(self.sysroot_codegen_backends(compiler)) + .into_iter() + .flatten() + .filter_map(Result::ok) + .map(|entry| entry.path()) + } + + pub fn rustdoc(&self, compiler: Compiler) -> PathBuf { + self.ensure(tool::Rustdoc { compiler }) } - pub fn rustdoc_cmd(&self, host: Interned) -> Command { + pub fn rustdoc_cmd(&self, compiler: Compiler) -> Command { let mut cmd = Command::new(&self.out.join("bootstrap/debug/rustdoc")); - let compiler = self.compiler(self.top_stage, host); cmd.env("RUSTC_STAGE", compiler.stage.to_string()) .env("RUSTC_SYSROOT", self.sysroot(compiler)) - .env( - "RUSTDOC_LIBDIR", - self.sysroot_libdir(compiler, self.config.build), - ) + // Note that this is *not* the sysroot_libdir because rustdoc must be linked + // equivalently to rustc. + .env("RUSTDOC_LIBDIR", self.rustc_libdir(compiler)) .env("CFG_RELEASE_CHANNEL", &self.config.channel) - .env("RUSTDOC_REAL", self.rustdoc(host)) + .env("RUSTDOC_REAL", self.rustdoc(compiler)) .env("RUSTDOC_CRATE_VERSION", self.rust_version()) .env("RUSTC_BOOTSTRAP", "1"); @@ -690,7 +712,7 @@ impl<'a> Builder<'a> { cmd.env_remove("MAKEFLAGS"); cmd.env_remove("MFLAGS"); - if let Some(linker) = self.linker(host) { + if let Some(linker) = self.linker(compiler.host) { cmd.env("RUSTC_TARGET_LINKER", linker); } cmd @@ -752,12 +774,15 @@ impl<'a> Builder<'a> { // This is the intended out directory for compiler documentation. my_out = self.compiler_doc_out(target); } - let rustdoc = self.rustdoc(compiler.host); + let rustdoc = self.rustdoc(compiler); self.clear_if_dirty(&my_out, &rustdoc); } else if cmd != "test" { match mode { Mode::Std => { self.clear_if_dirty(&my_out, &self.rustc(compiler)); + for backend in self.codegen_backends(compiler) { + self.clear_if_dirty(&my_out, &backend); + } }, Mode::Test => { self.clear_if_dirty(&my_out, &libstd_stamp); @@ -790,6 +815,13 @@ impl<'a> Builder<'a> { .env("CARGO_TARGET_DIR", out_dir) .arg(cmd); + // See comment in librustc_llvm/build.rs for why this is necessary, largely llvm-config + // needs to not accidentally link to libLLVM in stage0/lib. + cargo.env("REAL_LIBRARY_PATH_VAR", &util::dylib_path_var()); + if let Some(e) = env::var_os(util::dylib_path_var()) { + cargo.env("REAL_LIBRARY_PATH", e); + } + if cmd != "install" { cargo.arg("--target") .arg(target); @@ -803,6 +835,17 @@ impl<'a> Builder<'a> { cargo.env("RUST_CHECK", "1"); } + match mode { + Mode::Std | Mode::Test | Mode::ToolBootstrap | Mode::ToolStd | Mode::ToolTest=> {}, + Mode::Rustc | Mode::Codegen | Mode::ToolRustc => { + // Build proc macros both for the host and the target + if target != compiler.host && cmd != "check" { + cargo.arg("-Zdual-proc-macros"); + cargo.env("RUST_DUAL_PROC_MACROS", "1"); + } + }, + } + cargo.arg("-j").arg(self.jobs().to_string()); // Remove make-related flags to ensure Cargo can correctly set things up cargo.env_remove("MAKEFLAGS"); @@ -864,7 +907,7 @@ impl<'a> Builder<'a> { } else { &maybe_sysroot }; - let libdir = sysroot.join(libdir(&compiler.host)); + let libdir = self.rustc_libdir(compiler); // Customize the compiler we're running. Specify the compiler to cargo // as our shim and then pass it some various options used to configure @@ -888,7 +931,7 @@ impl<'a> Builder<'a> { .env( "RUSTDOC_REAL", if cmd == "doc" || cmd == "rustdoc" || (cmd == "test" && want_rustdoc) { - self.rustdoc(compiler.host) + self.rustdoc(compiler) } else { PathBuf::from("/path/to/nowhere/rustdoc/not/required") }, @@ -906,7 +949,7 @@ impl<'a> Builder<'a> { cargo.env("RUSTC_ERROR_FORMAT", error_format); } if cmd != "build" && cmd != "check" && cmd != "rustc" && want_rustdoc { - cargo.env("RUSTDOC_LIBDIR", self.sysroot_libdir(compiler, self.config.build)); + cargo.env("RUSTDOC_LIBDIR", self.rustc_libdir(compiler)); } if mode.is_tool() { @@ -975,10 +1018,7 @@ impl<'a> Builder<'a> { // For other crates, however, we know that we've already got a standard // library up and running, so we can use the normal compiler to compile // build scripts in that situation. - // - // If LLVM support is disabled we need to use the snapshot compiler to compile - // build scripts, as the new compiler doesn't support executables. - if mode == Mode::Std || !self.config.llvm_enabled { + if mode == Mode::Std { cargo .env("RUSTC_SNAPSHOT", &self.initial_rustc) .env("RUSTC_SNAPSHOT_LIBDIR", self.rustc_snapshot_libdir()); @@ -990,6 +1030,9 @@ impl<'a> Builder<'a> { if self.config.incremental { cargo.env("CARGO_INCREMENTAL", "1"); + } else { + // Don't rely on any default setting for incr. comp. in Cargo + cargo.env("CARGO_INCREMENTAL", "0"); } if let Some(ref on_fail) = self.config.on_fail { @@ -1006,8 +1049,7 @@ impl<'a> Builder<'a> { cargo.env("RUSTC_VERBOSE", self.verbosity.to_string()); - // in std, we want to avoid denying warnings for stage 0 as that makes cfg's painful. - if self.config.deny_warnings && !(mode == Mode::Std && stage == 0) { + if self.config.deny_warnings { cargo.env("RUSTC_DENY_WARNINGS", "1"); } @@ -1040,29 +1082,24 @@ impl<'a> Builder<'a> { } }; let cc = ccacheify(&self.cc(target)); - cargo.env(format!("CC_{}", target), &cc).env("CC", &cc); + cargo.env(format!("CC_{}", target), &cc); let cflags = self.cflags(target, GitRepo::Rustc).join(" "); cargo - .env(format!("CFLAGS_{}", target), cflags.clone()) - .env("CFLAGS", cflags.clone()); + .env(format!("CFLAGS_{}", target), cflags.clone()); if let Some(ar) = self.ar(target) { let ranlib = format!("{} s", ar.display()); cargo .env(format!("AR_{}", target), ar) - .env("AR", ar) - .env(format!("RANLIB_{}", target), ranlib.clone()) - .env("RANLIB", ranlib); + .env(format!("RANLIB_{}", target), ranlib); } if let Ok(cxx) = self.cxx(target) { let cxx = ccacheify(&cxx); cargo .env(format!("CXX_{}", target), &cxx) - .env("CXX", &cxx) - .env(format!("CXXFLAGS_{}", target), cflags.clone()) - .env("CXXFLAGS", cflags); + .env(format!("CXXFLAGS_{}", target), cflags); } } @@ -1841,6 +1878,7 @@ mod __test { doc_tests: DocTests::No, bless: false, compare_mode: None, + rustfix_coverage: false, }; let build = Build::new(config); @@ -1882,6 +1920,7 @@ mod __test { doc_tests: DocTests::No, bless: false, compare_mode: None, + rustfix_coverage: false, }; let build = Build::new(config); diff --git a/src/bootstrap/cache.rs b/src/bootstrap/cache.rs index 165cffa4587d3..239959682cb00 100644 --- a/src/bootstrap/cache.rs +++ b/src/bootstrap/cache.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::any::{Any, TypeId}; use std::borrow::Borrow; use std::cell::RefCell; @@ -78,20 +68,20 @@ unsafe impl Send for Interned {} unsafe impl Sync for Interned {} impl fmt::Display for Interned { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let s: &str = &*self; f.write_str(s) } } impl fmt::Debug for Interned { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let s: &str = &*self; f.write_fmt(format_args!("{:?}", s)) } } impl fmt::Debug for Interned { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let s: &Path = &*self; f.write_fmt(format_args!("{:?}", s)) } @@ -237,10 +227,10 @@ lazy_static! { pub static ref INTERNER: Interner = Interner::default(); } -/// This is essentially a HashMap which allows storing any type in its input and +/// This is essentially a `HashMap` which allows storing any type in its input and /// any type in its output. It is a write-once cache; values are never evicted, /// which means that references to the value can safely be returned from the -/// get() method. +/// `get()` method. #[derive(Debug)] pub struct Cache( RefCell or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! C-compiler probing and detection. //! //! This module will fill out the `cc` and `cxx` maps of `Build` by looking for @@ -37,7 +27,6 @@ use std::path::{Path, PathBuf}; use std::process::Command; use build_helper::output; -use cc; use crate::{Build, GitRepo}; use crate::config::Target; @@ -143,7 +132,10 @@ fn set_compiler(cfg: &mut cc::Build, // compiler already takes into account the triple in question. t if t.contains("android") => { if let Some(ndk) = config.and_then(|c| c.ndk.as_ref()) { - let target = target.replace("armv7", "arm"); + let target = target.replace("armv7neon", "arm") + .replace("armv7", "arm") + .replace("thumbv7neon", "arm") + .replace("thumbv7", "arm"); let compiler = format!("{}-{}", target, compiler.clang()); cfg.compiler(ndk.join("bin").join(compiler)); } @@ -164,7 +156,7 @@ fn set_compiler(cfg: &mut cc::Build, None => return, }; match output[i + 3..].chars().next().unwrap() { - '0' ... '6' => {} + '0' ..= '6' => {} _ => return, } let alternative = format!("e{}", gnu_compiler); diff --git a/src/bootstrap/channel.rs b/src/bootstrap/channel.rs index 8756ec944c257..e42b073322e28 100644 --- a/src/bootstrap/channel.rs +++ b/src/bootstrap/channel.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Build configuration for Rust's release channels. //! //! Implements the stable/beta/nightly channel distinctions by setting various @@ -21,10 +11,9 @@ use std::process::Command; use build_helper::output; use crate::Build; -use crate::config::Config; // The version number -pub const CFG_RELEASE_NUM: &str = "1.33.0"; +pub const CFG_RELEASE_NUM: &str = "1.35.0"; pub struct GitInfo { inner: Option, @@ -37,20 +26,20 @@ struct Info { } impl GitInfo { - pub fn new(config: &Config, dir: &Path) -> GitInfo { + pub fn new(ignore_git: bool, dir: &Path) -> GitInfo { // See if this even begins to look like a git dir - if config.ignore_git || !dir.join(".git").exists() { + if ignore_git || !dir.join(".git").exists() { return GitInfo { inner: None } } // Make sure git commands work - let out = Command::new("git") - .arg("rev-parse") - .current_dir(dir) - .output() - .expect("failed to spawn git"); - if !out.status.success() { - return GitInfo { inner: None } + match Command::new("git") + .arg("rev-parse") + .current_dir(dir) + .output() + { + Ok(ref out) if out.status.success() => {} + _ => return GitInfo { inner: None }, } // Ok, let's scrape some info diff --git a/src/bootstrap/check.rs b/src/bootstrap/check.rs index 84e7c40e4559e..a30b465698e2a 100644 --- a/src/bootstrap/check.rs +++ b/src/bootstrap/check.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Implementation of compiling the compiler and standard library, in "check" mode. use crate::compile::{run_cargo, std_cargo, test_cargo, rustc_cargo, rustc_cargo_env, @@ -27,17 +17,17 @@ impl Step for Std { type Output = (); const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.all_krates("std") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Std { target: run.target, }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let target = self.target; let compiler = builder.compiler(0, builder.config.build); @@ -48,12 +38,12 @@ impl Step for Std { builder.info(&format!("Checking std artifacts ({} -> {})", &compiler.host, target)); run_cargo(builder, &mut cargo, - vec![], &libstd_stamp(builder, compiler, target), true); let libdir = builder.sysroot_libdir(compiler, target); - add_to_sysroot(&builder, &libdir, &libstd_stamp(builder, compiler, target)); + let hostdir = builder.sysroot_libdir(compiler, compiler.host); + add_to_sysroot(&builder, &libdir, &hostdir, &libstd_stamp(builder, compiler, target)); } } @@ -67,22 +57,22 @@ impl Step for Rustc { const ONLY_HOSTS: bool = true; const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.all_krates("rustc-main") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Rustc { target: run.target, }); } - /// Build the compiler. + /// Builds the compiler. /// /// This will build the compiler for a particular stage of the build using /// the `compiler` targeting the `target` architecture. The artifacts /// created will also be linked into the sysroot directory. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = builder.compiler(0, builder.config.build); let target = self.target; @@ -95,12 +85,12 @@ impl Step for Rustc { builder.info(&format!("Checking compiler artifacts ({} -> {})", &compiler.host, target)); run_cargo(builder, &mut cargo, - vec![], &librustc_stamp(builder, compiler, target), true); let libdir = builder.sysroot_libdir(compiler, target); - add_to_sysroot(&builder, &libdir, &librustc_stamp(builder, compiler, target)); + let hostdir = builder.sysroot_libdir(compiler, compiler.host); + add_to_sysroot(&builder, &libdir, &hostdir, &librustc_stamp(builder, compiler, target)); } } @@ -115,11 +105,11 @@ impl Step for CodegenBackend { const ONLY_HOSTS: bool = true; const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.all_krates("rustc_codegen_llvm") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { let backend = run.builder.config.rust_codegen_backends.get(0); let backend = backend.cloned().unwrap_or_else(|| { INTERNER.intern_str("llvm") @@ -130,7 +120,7 @@ impl Step for CodegenBackend { }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = builder.compiler(0, builder.config.build); let target = self.target; let backend = self.backend; @@ -146,7 +136,6 @@ impl Step for CodegenBackend { let _folder = builder.fold_output(|| format!("stage{}-rustc_codegen_llvm", compiler.stage)); run_cargo(builder, &mut cargo, - vec![], &codegen_backend_stamp(builder, compiler, target, backend), true); } @@ -161,17 +150,17 @@ impl Step for Test { type Output = (); const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.all_krates("test") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Test { target: run.target, }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = builder.compiler(0, builder.config.build); let target = self.target; @@ -184,12 +173,12 @@ impl Step for Test { builder.info(&format!("Checking test artifacts ({} -> {})", &compiler.host, target)); run_cargo(builder, &mut cargo, - vec![], &libtest_stamp(builder, compiler, target), true); let libdir = builder.sysroot_libdir(compiler, target); - add_to_sysroot(builder, &libdir, &libtest_stamp(builder, compiler, target)); + let hostdir = builder.sysroot_libdir(compiler, compiler.host); + add_to_sysroot(builder, &libdir, &hostdir, &libtest_stamp(builder, compiler, target)); } } @@ -203,17 +192,17 @@ impl Step for Rustdoc { const ONLY_HOSTS: bool = true; const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/tools/rustdoc") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Rustdoc { target: run.target, }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = builder.compiler(0, builder.config.build); let target = self.target; @@ -232,37 +221,49 @@ impl Step for Rustdoc { println!("Checking rustdoc artifacts ({} -> {})", &compiler.host, target); run_cargo(builder, &mut cargo, - vec![], &rustdoc_stamp(builder, compiler, target), true); let libdir = builder.sysroot_libdir(compiler, target); - add_to_sysroot(&builder, &libdir, &rustdoc_stamp(builder, compiler, target)); + let hostdir = builder.sysroot_libdir(compiler, compiler.host); + add_to_sysroot(&builder, &libdir, &hostdir, &rustdoc_stamp(builder, compiler, target)); builder.cargo(compiler, Mode::ToolRustc, target, "clean"); } } /// Cargo's output path for the standard library in a given stage, compiled /// by a particular compiler for the specified target. -pub fn libstd_stamp(builder: &Builder, compiler: Compiler, target: Interned) -> PathBuf { +pub fn libstd_stamp( + builder: &Builder<'_>, + compiler: Compiler, + target: Interned, +) -> PathBuf { builder.cargo_out(compiler, Mode::Std, target).join(".libstd-check.stamp") } /// Cargo's output path for libtest in a given stage, compiled by a particular /// compiler for the specified target. -pub fn libtest_stamp(builder: &Builder, compiler: Compiler, target: Interned) -> PathBuf { +pub fn libtest_stamp( + builder: &Builder<'_>, + compiler: Compiler, + target: Interned, +) -> PathBuf { builder.cargo_out(compiler, Mode::Test, target).join(".libtest-check.stamp") } /// Cargo's output path for librustc in a given stage, compiled by a particular /// compiler for the specified target. -pub fn librustc_stamp(builder: &Builder, compiler: Compiler, target: Interned) -> PathBuf { +pub fn librustc_stamp( + builder: &Builder<'_>, + compiler: Compiler, + target: Interned, +) -> PathBuf { builder.cargo_out(compiler, Mode::Rustc, target).join(".librustc-check.stamp") } /// Cargo's output path for librustc_codegen_llvm in a given stage, compiled by a particular /// compiler for the specified target and backend. -fn codegen_backend_stamp(builder: &Builder, +fn codegen_backend_stamp(builder: &Builder<'_>, compiler: Compiler, target: Interned, backend: Interned) -> PathBuf { @@ -272,7 +273,11 @@ fn codegen_backend_stamp(builder: &Builder, /// Cargo's output path for rustdoc in a given stage, compiled by a particular /// compiler for the specified target. -pub fn rustdoc_stamp(builder: &Builder, compiler: Compiler, target: Interned) -> PathBuf { +pub fn rustdoc_stamp( + builder: &Builder<'_>, + compiler: Compiler, + target: Interned, +) -> PathBuf { builder.cargo_out(compiler, Mode::ToolRustc, target) .join(".rustdoc-check.stamp") } diff --git a/src/bootstrap/clean.rs b/src/bootstrap/clean.rs index dc42159d068b1..b52e1a7b0e681 100644 --- a/src/bootstrap/clean.rs +++ b/src/bootstrap/clean.rs @@ -1,19 +1,9 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Implementation of `make clean` in rustbuild. //! //! Responsible for cleaning out a build directory of all old and stale //! artifacts to prepare for a fresh build. Currently doesn't remove the //! `build/cache` directory (download cache) or the `build/$target/llvm` -//! directory unless the --all flag is present. +//! directory unless the `--all` flag is present. use std::fs; use std::io::{self, ErrorKind}; diff --git a/src/bootstrap/compile.rs b/src/bootstrap/compile.rs index 61a04b9720602..08316b71ea85b 100644 --- a/src/bootstrap/compile.rs +++ b/src/bootstrap/compile.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Implementation of compiling various phases of the compiler and standard //! library. //! @@ -29,7 +19,8 @@ use build_helper::{output, mtime, up_to_date}; use filetime::FileTime; use serde_json; -use crate::util::{exe, libdir, is_dylib}; +use crate::dist; +use crate::util::{exe, is_dylib}; use crate::{Compiler, Mode, GitRepo}; use crate::native; @@ -46,23 +37,23 @@ impl Step for Std { type Output = (); const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.all_krates("std") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Std { compiler: run.builder.compiler(run.builder.top_stage, run.host), target: run.target, }); } - /// Build the standard library. + /// Builds the standard library. /// /// This will build the standard library for a particular stage of the build /// using the `compiler` targeting the `target` architecture. The artifacts /// created will also be linked into the sysroot directory. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let target = self.target; let compiler = self.compiler; @@ -87,11 +78,8 @@ impl Step for Std { builder.info(&format!("Uplifting stage1 std ({} -> {})", from.host, target)); // Even if we're not building std this stage, the new sysroot must - // still contain the musl startup objects. - if target.contains("musl") { - let libdir = builder.sysroot_libdir(compiler, target); - copy_musl_third_party_objects(builder, target, &libdir); - } + // still contain the third party objects needed by various targets. + copy_third_party_objects(builder, &compiler, target); builder.ensure(StdLink { compiler: from, @@ -101,10 +89,7 @@ impl Step for Std { return; } - if target.contains("musl") { - let libdir = builder.sysroot_libdir(compiler, target); - copy_musl_third_party_objects(builder, target, &libdir); - } + copy_third_party_objects(builder, &compiler, target); let mut cargo = builder.cargo(compiler, Mode::Std, target, "build"); std_cargo(builder, &compiler, target, &mut cargo); @@ -112,9 +97,10 @@ impl Step for Std { let _folder = builder.fold_output(|| format!("stage{}-std", compiler.stage)); builder.info(&format!("Building stage{} std artifacts ({} -> {})", compiler.stage, &compiler.host, target)); + // compile with `-Z emit-stack-sizes`; see bootstrap/src/rustc.rs for more details + cargo.env("RUSTC_EMIT_STACK_SIZES", "1"); run_cargo(builder, &mut cargo, - vec![], &libstd_stamp(builder, compiler, target), false); @@ -126,23 +112,49 @@ impl Step for Std { } } -/// Copies the crt(1,i,n).o startup objects -/// -/// Since musl supports fully static linking, we can cross link for it even -/// with a glibc-targeting toolchain, given we have the appropriate startup -/// files. As those shipped with glibc won't work, copy the ones provided by -/// musl so we have them on linux-gnu hosts. -fn copy_musl_third_party_objects(builder: &Builder, - target: Interned, - into: &Path) { - for &obj in &["crt1.o", "crti.o", "crtn.o"] { - builder.copy(&builder.musl_root(target).unwrap().join("lib").join(obj), &into.join(obj)); +/// Copies third pary objects needed by various targets. +fn copy_third_party_objects(builder: &Builder<'_>, compiler: &Compiler, target: Interned) { + let libdir = builder.sysroot_libdir(*compiler, target); + + // Copies the crt(1,i,n).o startup objects + // + // Since musl supports fully static linking, we can cross link for it even + // with a glibc-targeting toolchain, given we have the appropriate startup + // files. As those shipped with glibc won't work, copy the ones provided by + // musl so we have them on linux-gnu hosts. + if target.contains("musl") { + for &obj in &["crt1.o", "crti.o", "crtn.o"] { + builder.copy( + &builder.musl_root(target).unwrap().join("lib").join(obj), + &libdir.join(obj), + ); + } + } else if target.ends_with("-wasi") { + for &obj in &["crt1.o"] { + builder.copy( + &builder.wasi_root(target).unwrap().join("lib/wasm32-wasi").join(obj), + &libdir.join(obj), + ); + } + } + + // Copies libunwind.a compiled to be linked wit x86_64-fortanix-unknown-sgx. + // + // This target needs to be linked to Fortanix's port of llvm's libunwind. + // libunwind requires support for rwlock and printing to stderr, + // which is provided by std for this target. + if target == "x86_64-fortanix-unknown-sgx" { + let src_path_env = "X86_FORTANIX_SGX_LIBS"; + let obj = "libunwind.a"; + let src = env::var(src_path_env).expect(&format!("{} not found in env", src_path_env)); + let src = Path::new(&src).join(obj); + builder.copy(&src, &libdir.join(obj)); } } /// Configure cargo to compile the standard library, adding appropriate env vars /// and such. -pub fn std_cargo(builder: &Builder, +pub fn std_cargo(builder: &Builder<'_>, compiler: &Compiler, target: Interned, cargo: &mut Command) { @@ -185,6 +197,12 @@ pub fn std_cargo(builder: &Builder, cargo.env("MUSL_ROOT", p); } } + + if target.ends_with("-wasi") { + if let Some(p) = builder.wasi_root(target) { + cargo.env("WASI_ROOT", p); + } + } } } @@ -198,7 +216,7 @@ struct StdLink { impl Step for StdLink { type Output = (); - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.never() } @@ -210,7 +228,7 @@ impl Step for StdLink { /// Note that this assumes that `compiler` has already generated the libstd /// libraries for `target`, and this method will find them in the relevant /// output directory. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = self.compiler; let target_compiler = self.target_compiler; let target = self.target; @@ -221,7 +239,8 @@ impl Step for StdLink { target_compiler.host, target)); let libdir = builder.sysroot_libdir(target_compiler, target); - add_to_sysroot(builder, &libdir, &libstd_stamp(builder, compiler, target)); + let hostdir = builder.sysroot_libdir(target_compiler, compiler.host); + add_to_sysroot(builder, &libdir, &hostdir, &libstd_stamp(builder, compiler, target)); if builder.config.sanitizers && compiler.stage != 0 && target == "x86_64-apple-darwin" { // The sanitizers are only built in stage1 or above, so the dylibs will @@ -234,7 +253,12 @@ impl Step for StdLink { } } -fn copy_apple_sanitizer_dylibs(builder: &Builder, native_dir: &Path, platform: &str, into: &Path) { +fn copy_apple_sanitizer_dylibs( + builder: &Builder<'_>, + native_dir: &Path, + platform: &str, + into: &Path, +) { for &sanitizer in &["asan", "tsan"] { let filename = format!("lib__rustc__clang_rt.{}_{}_dynamic.dylib", sanitizer, platform); let mut src_path = native_dir.join(sanitizer); @@ -255,24 +279,24 @@ pub struct StartupObjects { impl Step for StartupObjects { type Output = (); - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/rtstartup") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(StartupObjects { compiler: run.builder.compiler(run.builder.top_stage, run.host), target: run.target, }); } - /// Build and prepare startup objects like rsbegin.o and rsend.o + /// Builds and prepare startup objects like rsbegin.o and rsend.o /// /// These are primarily used on Windows right now for linking executables/dlls. /// They don't require any library support as they're just plain old object /// files, so we just use the nightly snapshot compiler to always build them (as /// no other compilers are guaranteed to be available). - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let for_compiler = self.compiler; let target = self.target; if !target.contains("pc-windows-gnu") { @@ -320,23 +344,23 @@ impl Step for Test { type Output = (); const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.all_krates("test") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Test { compiler: run.builder.compiler(run.builder.top_stage, run.host), target: run.target, }); } - /// Build libtest. + /// Builds libtest. /// /// This will build libtest and supporting libraries for a particular stage of /// the build using the `compiler` targeting the `target` architecture. The /// artifacts created will also be linked into the sysroot directory. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let target = self.target; let compiler = self.compiler; @@ -373,9 +397,10 @@ impl Step for Test { let _folder = builder.fold_output(|| format!("stage{}-test", compiler.stage)); builder.info(&format!("Building stage{} test artifacts ({} -> {})", compiler.stage, &compiler.host, target)); + // compile with `-Z emit-stack-sizes`; see bootstrap/src/rustc.rs for more details + cargo.env("RUSTC_EMIT_STACK_SIZES", "1"); run_cargo(builder, &mut cargo, - vec![], &libtest_stamp(builder, compiler, target), false); @@ -388,7 +413,7 @@ impl Step for Test { } /// Same as `std_cargo`, but for libtest -pub fn test_cargo(builder: &Builder, +pub fn test_cargo(builder: &Builder<'_>, _compiler: &Compiler, _target: Interned, cargo: &mut Command) { @@ -409,12 +434,12 @@ pub struct TestLink { impl Step for TestLink { type Output = (); - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.never() } /// Same as `std_link`, only for libtest - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = self.compiler; let target_compiler = self.target_compiler; let target = self.target; @@ -424,8 +449,12 @@ impl Step for TestLink { &compiler.host, target_compiler.host, target)); - add_to_sysroot(builder, &builder.sysroot_libdir(target_compiler, target), - &libtest_stamp(builder, compiler, target)); + add_to_sysroot( + builder, + &builder.sysroot_libdir(target_compiler, target), + &builder.sysroot_libdir(target_compiler, compiler.host), + &libtest_stamp(builder, compiler, target) + ); builder.cargo(target_compiler, Mode::ToolTest, target, "clean"); } @@ -442,23 +471,23 @@ impl Step for Rustc { const ONLY_HOSTS: bool = true; const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.all_krates("rustc-main") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Rustc { compiler: run.builder.compiler(run.builder.top_stage, run.host), target: run.target, }); } - /// Build the compiler. + /// Builds the compiler. /// /// This will build the compiler for a particular stage of the build using /// the `compiler` targeting the `target` architecture. The artifacts /// created will also be linked into the sysroot directory. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = self.compiler; let target = self.target; @@ -489,8 +518,8 @@ impl Step for Rustc { return; } - // Ensure that build scripts have a std to link against. - builder.ensure(Std { + // Ensure that build scripts and proc macros have a std / libproc_macro to link against. + builder.ensure(Test { compiler: builder.compiler(self.compiler.stage, builder.config.build), target: builder.config.build, }); @@ -503,7 +532,6 @@ impl Step for Rustc { compiler.stage, &compiler.host, target)); run_cargo(builder, &mut cargo, - vec![], &librustc_stamp(builder, compiler, target), false); @@ -515,14 +543,14 @@ impl Step for Rustc { } } -pub fn rustc_cargo(builder: &Builder, cargo: &mut Command) { +pub fn rustc_cargo(builder: &Builder<'_>, cargo: &mut Command) { cargo.arg("--features").arg(builder.rustc_features()) .arg("--manifest-path") .arg(builder.src.join("src/rustc/Cargo.toml")); rustc_cargo_env(builder, cargo); } -pub fn rustc_cargo_env(builder: &Builder, cargo: &mut Command) { +pub fn rustc_cargo_env(builder: &Builder<'_>, cargo: &mut Command) { // Set some configuration variables picked up by build scripts and // the compiler alike cargo.env("CFG_RELEASE", builder.rust_release()) @@ -553,8 +581,8 @@ pub fn rustc_cargo_env(builder: &Builder, cargo: &mut Command) { if let Some(ref s) = builder.config.rustc_default_linker { cargo.env("CFG_DEFAULT_LINKER", s); } - if builder.config.rustc_parallel_queries { - cargo.env("RUSTC_PARALLEL_QUERIES", "1"); + if builder.config.rustc_parallel { + cargo.env("RUSTC_PARALLEL_COMPILER", "1"); } if builder.config.rust_verify_llvm_ir { cargo.env("RUSTC_VERIFY_LLVM_IR", "1"); @@ -571,12 +599,12 @@ struct RustcLink { impl Step for RustcLink { type Output = (); - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.never() } /// Same as `std_link`, only for librustc - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = self.compiler; let target_compiler = self.target_compiler; let target = self.target; @@ -586,8 +614,12 @@ impl Step for RustcLink { &compiler.host, target_compiler.host, target)); - add_to_sysroot(builder, &builder.sysroot_libdir(target_compiler, target), - &librustc_stamp(builder, compiler, target)); + add_to_sysroot( + builder, + &builder.sysroot_libdir(target_compiler, target), + &builder.sysroot_libdir(target_compiler, compiler.host), + &librustc_stamp(builder, compiler, target) + ); builder.cargo(target_compiler, Mode::ToolRustc, target, "clean"); } } @@ -604,11 +636,11 @@ impl Step for CodegenBackend { const ONLY_HOSTS: bool = true; const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.all_krates("rustc_codegen_llvm") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { let backend = run.builder.config.rust_codegen_backends.get(0); let backend = backend.cloned().unwrap_or_else(|| { INTERNER.intern_str("llvm") @@ -620,7 +652,7 @@ impl Step for CodegenBackend { }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = self.compiler; let target = self.target; let backend = self.backend; @@ -646,47 +678,18 @@ impl Step for CodegenBackend { let out_dir = builder.cargo_out(compiler, Mode::Codegen, target); - let mut cargo = builder.cargo(compiler, Mode::Codegen, target, "rustc"); + let mut cargo = builder.cargo(compiler, Mode::Codegen, target, "build"); cargo.arg("--manifest-path") .arg(builder.src.join("src/librustc_codegen_llvm/Cargo.toml")); rustc_cargo_env(builder, &mut cargo); let features = build_codegen_backend(&builder, &mut cargo, &compiler, target, backend); - let mut cargo_tails_args = vec![]; - - if builder.config.llvm_thin_lto { - cargo_tails_args.push("--".to_string()); - - let num_jobs = builder.jobs(); - - if !target.contains("msvc") { - // Here we assume that the linker is clang. If it's not, there'll - // be linker errors. - cargo_tails_args.push("-Clink-arg=-fuse-ld=lld".to_string()); - cargo_tails_args.push("-Clink-arg=-flto=thin".to_string()); - - if builder.config.llvm_optimize { - cargo_tails_args.push("-Clink-arg=-O2".to_string()); - } - - // Let's make LLD respect the `-j` option. - let num_jobs_arg = format!("-Clink-arg=-Wl,--thinlto-jobs={}", num_jobs); - cargo_tails_args.push(num_jobs_arg); - } else { - // Here we assume that the linker is lld-link.exe. lld-link.exe - // does not need the extra arguments except for num_jobs - let num_jobs_arg = format!("-Clink-arg=/opt:lldltojobs={}", num_jobs); - cargo_tails_args.push(num_jobs_arg); - } - } - let tmp_stamp = out_dir.join(".tmp.stamp"); let _folder = builder.fold_output(|| format!("stage{}-rustc_codegen_llvm", compiler.stage)); let files = run_cargo(builder, cargo.arg("--features").arg(features), - cargo_tails_args, &tmp_stamp, false); if builder.config.dry_run { @@ -712,7 +715,7 @@ impl Step for CodegenBackend { } } -pub fn build_codegen_backend(builder: &Builder, +pub fn build_codegen_backend(builder: &Builder<'_>, cargo: &mut Command, compiler: &Compiler, target: Interned, @@ -740,6 +743,7 @@ pub fn build_codegen_backend(builder: &Builder, if builder.is_rust_llvm(target) && backend != "emscripten" { cargo.env("LLVM_RUSTLLVM", "1"); } + cargo.env("LLVM_CONFIG", &llvm_config); if backend != "emscripten" { let target_config = builder.config.target_config.get(&target); @@ -759,9 +763,14 @@ pub fn build_codegen_backend(builder: &Builder, "libstdc++.a"); cargo.env("LLVM_STATIC_STDCPP", file); } - if builder.config.llvm_link_shared { + if builder.config.llvm_link_shared || + (builder.config.llvm_thin_lto && backend != "emscripten") + { cargo.env("LLVM_LINK_SHARED", "1"); } + if builder.config.llvm_use_libcxx { + cargo.env("LLVM_USE_LIBCXX", "1"); + } } _ => panic!("unknown backend: {}", backend), } @@ -775,7 +784,7 @@ pub fn build_codegen_backend(builder: &Builder, /// This will take the codegen artifacts produced by `compiler` and link them /// into an appropriate location for `target_compiler` to be a functional /// compiler. -fn copy_codegen_backends_to_sysroot(builder: &Builder, +fn copy_codegen_backends_to_sysroot(builder: &Builder<'_>, compiler: Compiler, target_compiler: Compiler) { let target = target_compiler.host; @@ -813,7 +822,7 @@ fn copy_codegen_backends_to_sysroot(builder: &Builder, } } -fn copy_lld_to_sysroot(builder: &Builder, +fn copy_lld_to_sysroot(builder: &Builder<'_>, target_compiler: Compiler, lld_install_root: &Path) { let target = target_compiler.host; @@ -833,25 +842,37 @@ fn copy_lld_to_sysroot(builder: &Builder, /// Cargo's output path for the standard library in a given stage, compiled /// by a particular compiler for the specified target. -pub fn libstd_stamp(builder: &Builder, compiler: Compiler, target: Interned) -> PathBuf { +pub fn libstd_stamp( + builder: &Builder<'_>, + compiler: Compiler, + target: Interned, +) -> PathBuf { builder.cargo_out(compiler, Mode::Std, target).join(".libstd.stamp") } /// Cargo's output path for libtest in a given stage, compiled by a particular /// compiler for the specified target. -pub fn libtest_stamp(builder: &Builder, compiler: Compiler, target: Interned) -> PathBuf { +pub fn libtest_stamp( + builder: &Builder<'_>, + compiler: Compiler, + target: Interned, +) -> PathBuf { builder.cargo_out(compiler, Mode::Test, target).join(".libtest.stamp") } /// Cargo's output path for librustc in a given stage, compiled by a particular /// compiler for the specified target. -pub fn librustc_stamp(builder: &Builder, compiler: Compiler, target: Interned) -> PathBuf { +pub fn librustc_stamp( + builder: &Builder<'_>, + compiler: Compiler, + target: Interned, +) -> PathBuf { builder.cargo_out(compiler, Mode::Rustc, target).join(".librustc.stamp") } /// Cargo's output path for librustc_codegen_llvm in a given stage, compiled by a particular /// compiler for the specified target and backend. -fn codegen_backend_stamp(builder: &Builder, +fn codegen_backend_stamp(builder: &Builder<'_>, compiler: Compiler, target: Interned, backend: Interned) -> PathBuf { @@ -859,10 +880,12 @@ fn codegen_backend_stamp(builder: &Builder, .join(format!(".librustc_codegen_llvm-{}.stamp", backend)) } -pub fn compiler_file(builder: &Builder, - compiler: &Path, - target: Interned, - file: &str) -> PathBuf { +pub fn compiler_file( + builder: &Builder<'_>, + compiler: &Path, + target: Interned, + file: &str, +) -> PathBuf { let mut cmd = Command::new(compiler); cmd.args(builder.cflags(target, GitRepo::Rustc)); cmd.arg(format!("-print-file-name={}", file)); @@ -878,7 +901,7 @@ pub struct Sysroot { impl Step for Sysroot { type Output = Interned; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.never() } @@ -888,7 +911,7 @@ impl Step for Sysroot { /// That is, the sysroot for the stage0 compiler is not what the compiler /// thinks it is by default, but it's the same as the default for stages /// 1-3. - fn run(self, builder: &Builder) -> Interned { + fn run(self, builder: &Builder<'_>) -> Interned { let compiler = self.compiler; let sysroot = if compiler.stage == 0 { builder.out.join(&compiler.host).join("stage0-sysroot") @@ -913,7 +936,7 @@ pub struct Assemble { impl Step for Assemble { type Output = Compiler; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.never() } @@ -922,7 +945,7 @@ impl Step for Assemble { /// This will assemble a compiler in `build/$host/stage$stage`. The compiler /// must have been previously produced by the `stage - 1` builder.build /// compiler. - fn run(self, builder: &Builder) -> Compiler { + fn run(self, builder: &Builder<'_>) -> Compiler { let target_compiler = self.target_compiler; if target_compiler.stage == 0 { @@ -982,13 +1005,13 @@ impl Step for Assemble { // Link in all dylibs to the libdir let sysroot = builder.sysroot(target_compiler); - let sysroot_libdir = sysroot.join(libdir(&*host)); - t!(fs::create_dir_all(&sysroot_libdir)); + let rustc_libdir = builder.rustc_libdir(target_compiler); + t!(fs::create_dir_all(&rustc_libdir)); let src_libdir = builder.sysroot_libdir(build_compiler, host); for f in builder.read_dir(&src_libdir) { let filename = f.file_name().into_string().unwrap(); if is_dylib(&filename) { - builder.copy(&f.path(), &sysroot_libdir.join(&filename)); + builder.copy(&f.path(), &rustc_libdir.join(&filename)); } } @@ -999,6 +1022,8 @@ impl Step for Assemble { copy_lld_to_sysroot(builder, target_compiler, &lld_install); } + dist::maybe_install_llvm_dylib(builder, target_compiler.host, &sysroot); + // Link the compiler binary itself into place let out_dir = builder.cargo_out(build_compiler, Mode::Rustc, host); let rustc = out_dir.join(exe("rustc_binary", &*host)); @@ -1016,16 +1041,25 @@ impl Step for Assemble { /// /// For a particular stage this will link the file listed in `stamp` into the /// `sysroot_dst` provided. -pub fn add_to_sysroot(builder: &Builder, sysroot_dst: &Path, stamp: &Path) { +pub fn add_to_sysroot( + builder: &Builder<'_>, + sysroot_dst: &Path, + sysroot_host_dst: &Path, + stamp: &Path +) { t!(fs::create_dir_all(&sysroot_dst)); - for path in builder.read_stamp_file(stamp) { - builder.copy(&path, &sysroot_dst.join(path.file_name().unwrap())); + t!(fs::create_dir_all(&sysroot_host_dst)); + for (path, host) in builder.read_stamp_file(stamp) { + if host { + builder.copy(&path, &sysroot_host_dst.join(path.file_name().unwrap())); + } else { + builder.copy(&path, &sysroot_dst.join(path.file_name().unwrap())); + } } } -pub fn run_cargo(builder: &Builder, +pub fn run_cargo(builder: &Builder<'_>, cargo: &mut Command, - tail_args: Vec, stamp: &Path, is_check: bool) -> Vec @@ -1048,9 +1082,15 @@ pub fn run_cargo(builder: &Builder, // files we need to probe for later. let mut deps = Vec::new(); let mut toplevel = Vec::new(); - let ok = stream_cargo(builder, cargo, tail_args, &mut |msg| { - let filenames = match msg { - CargoMessage::CompilerArtifact { filenames, .. } => filenames, + let ok = stream_cargo(builder, cargo, &mut |msg| { + let (filenames, crate_types) = match msg { + CargoMessage::CompilerArtifact { + filenames, + target: CargoTarget { + crate_types, + }, + .. + } => (filenames, crate_types), _ => return, }; for filename in filenames { @@ -1065,15 +1105,19 @@ pub fn run_cargo(builder: &Builder, let filename = Path::new(&*filename); // If this was an output file in the "host dir" we don't actually - // worry about it, it's not relevant for us. + // worry about it, it's not relevant for us if filename.starts_with(&host_root_dir) { + // Unless it's a proc macro used in the compiler + if crate_types.iter().any(|t| t == "proc-macro") { + deps.push((filename.to_path_buf(), true)); + } continue; } // If this was output in the `deps` dir then this is a precise file // name (hash included) so we start tracking it. if filename.starts_with(&target_deps_dir) { - deps.push(filename.to_path_buf()); + deps.push((filename.to_path_buf(), false)); continue; } @@ -1126,10 +1170,10 @@ pub fn run_cargo(builder: &Builder, let candidate = format!("{}.lib", path_to_add); let candidate = PathBuf::from(candidate); if candidate.exists() { - deps.push(candidate); + deps.push((candidate, false)); } } - deps.push(path_to_add.into()); + deps.push((path_to_add.into(), false)); } // Now we want to update the contents of the stamp file, if necessary. First @@ -1142,12 +1186,13 @@ pub fn run_cargo(builder: &Builder, let mut new_contents = Vec::new(); let mut max = None; let mut max_path = None; - for dep in deps.iter() { + for (dep, proc_macro) in deps.iter() { let mtime = mtime(dep); if Some(mtime) > max { max = Some(mtime); max_path = Some(dep.clone()); } + new_contents.extend(if *proc_macro { b"h" } else { b"t" }); new_contents.extend(dep.to_str().unwrap().as_bytes()); new_contents.extend(b"\0"); } @@ -1159,7 +1204,7 @@ pub fn run_cargo(builder: &Builder, if contents_equal && max <= stamp_mtime { builder.verbose(&format!("not updating {:?}; contents equal and {:?} <= {:?}", stamp, max, stamp_mtime)); - return deps + return deps.into_iter().map(|(d, _)| d).collect() } if max > stamp_mtime { builder.verbose(&format!("updating {:?} as {:?} changed", stamp, max_path)); @@ -1167,14 +1212,13 @@ pub fn run_cargo(builder: &Builder, builder.verbose(&format!("updating {:?} as deps changed", stamp)); } t!(fs::write(&stamp, &new_contents)); - deps + deps.into_iter().map(|(d, _)| d).collect() } pub fn stream_cargo( - builder: &Builder, + builder: &Builder<'_>, cargo: &mut Command, - tail_args: Vec, - cb: &mut dyn FnMut(CargoMessage), + cb: &mut dyn FnMut(CargoMessage<'_>), ) -> bool { if builder.config.dry_run { return true; @@ -1184,10 +1228,6 @@ pub fn stream_cargo( cargo.arg("--message-format").arg("json") .stdout(Stdio::piped()); - for arg in tail_args { - cargo.arg(arg); - } - builder.verbose(&format!("running: {:?}", cargo)); let mut child = match cargo.spawn() { Ok(child) => child, @@ -1200,7 +1240,7 @@ pub fn stream_cargo( let stdout = BufReader::new(child.stdout.take().unwrap()); for line in stdout.lines() { let line = t!(line); - match serde_json::from_str::(&line) { + match serde_json::from_str::>(&line) { Ok(msg) => cb(msg), // If this was informational, just print it out and continue Err(_) => println!("{}", line) @@ -1218,6 +1258,11 @@ pub fn stream_cargo( status.success() } +#[derive(Deserialize)] +pub struct CargoTarget<'a> { + crate_types: Vec>, +} + #[derive(Deserialize)] #[serde(tag = "reason", rename_all = "kebab-case")] pub enum CargoMessage<'a> { @@ -1225,6 +1270,7 @@ pub enum CargoMessage<'a> { package_id: Cow<'a, str>, features: Vec>, filenames: Vec>, + target: CargoTarget<'a>, }, BuildScriptExecuted { package_id: Cow<'a, str>, diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index 2ae9da9c085d4..59e12cc52616a 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Serialized configuration of a build. //! //! This module implements parsing `config.toml` configuration files to tweak @@ -58,6 +48,7 @@ pub struct Config { pub exclude: Vec, pub rustc_error_format: Option, pub test_compare_mode: bool, + pub llvm_libunwind: bool, pub run_host_only: bool, @@ -74,7 +65,6 @@ pub struct Config { pub backtrace_on_ice: bool, // llvm codegen options - pub llvm_enabled: bool, pub llvm_assertions: bool, pub llvm_optimize: bool, pub llvm_thin_lto: bool, @@ -87,11 +77,18 @@ pub struct Config { pub llvm_experimental_targets: String, pub llvm_link_jobs: Option, pub llvm_version_suffix: Option, + pub llvm_use_linker: Option, + pub llvm_allow_old_toolchain: Option, pub lld_enabled: bool, pub lldb_enabled: bool, pub llvm_tools_enabled: bool, + pub llvm_cflags: Option, + pub llvm_cxxflags: Option, + pub llvm_ldflags: Option, + pub llvm_use_libcxx: bool, + // rust codegen options pub rust_optimize: bool, pub rust_codegen_units: Option, @@ -102,7 +99,7 @@ pub struct Config { pub rust_debuginfo_only_std: bool, pub rust_debuginfo_tools: bool, pub rust_rpath: bool, - pub rustc_parallel_queries: bool, + pub rustc_parallel: bool, pub rustc_default_linker: Option, pub rust_optimize_tests: bool, pub rust_debuginfo_tests: bool, @@ -173,6 +170,7 @@ pub struct Target { pub ndk: Option, pub crt_static: Option, pub musl_root: Option, + pub wasi_root: Option, pub qemu_rootfs: Option, pub no_std: bool, } @@ -247,7 +245,6 @@ struct Install { #[derive(Deserialize, Default)] #[serde(deny_unknown_fields, rename_all = "kebab-case")] struct Llvm { - enabled: Option, ccache: Option, ninja: Option, assertions: Option, @@ -262,6 +259,12 @@ struct Llvm { link_shared: Option, version_suffix: Option, clang_cl: Option, + cflags: Option, + cxxflags: Option, + ldflags: Option, + use_libcxx: Option, + use_linker: Option, + allow_old_toolchain: Option, } #[derive(Deserialize, Default, Clone)] @@ -299,7 +302,7 @@ struct Rust { debuginfo_lines: Option, debuginfo_only_std: Option, debuginfo_tools: Option, - experimental_parallel_queries: Option, + parallel_compiler: Option, backtrace: Option, default_linker: Option, channel: Option, @@ -327,6 +330,7 @@ struct Rust { remap_debuginfo: Option, jemalloc: Option, test_compare_mode: Option, + llvm_libunwind: Option, } /// TOML representation of how each build target is configured. @@ -343,6 +347,7 @@ struct TomlTarget { android_ndk: Option, crt_static: Option, musl_root: Option, + wasi_root: Option, qemu_rootfs: Option, } @@ -357,7 +362,6 @@ impl Config { pub fn default_opts() -> Config { let mut config = Config::default(); - config.llvm_enabled = true; config.llvm_optimize = true; config.llvm_version_check = true; config.backtrace = true; @@ -509,7 +513,6 @@ impl Config { Some(StringOrBool::Bool(false)) | None => {} } set(&mut config.ninja, llvm.ninja); - set(&mut config.llvm_enabled, llvm.enabled); llvm_assertions = llvm.assertions; set(&mut config.llvm_optimize, llvm.optimize); set(&mut config.llvm_thin_lto, llvm.thin_lto); @@ -519,10 +522,17 @@ impl Config { set(&mut config.llvm_link_shared, llvm.link_shared); config.llvm_targets = llvm.targets.clone(); config.llvm_experimental_targets = llvm.experimental_targets.clone() - .unwrap_or_else(|| "WebAssembly;RISCV".to_string()); + .unwrap_or_else(|| "AVR;WebAssembly;RISCV".to_string()); config.llvm_link_jobs = llvm.link_jobs; config.llvm_version_suffix = llvm.version_suffix.clone(); config.llvm_clang_cl = llvm.clang_cl.clone(); + + config.llvm_cflags = llvm.cflags.clone(); + config.llvm_cxxflags = llvm.cxxflags.clone(); + config.llvm_ldflags = llvm.ldflags.clone(); + set(&mut config.llvm_use_libcxx, llvm.use_libcxx); + config.llvm_use_linker = llvm.use_linker.clone(); + config.llvm_allow_old_toolchain = llvm.allow_old_toolchain.clone(); } if let Some(ref rust) = toml.rust { @@ -540,6 +550,7 @@ impl Config { set(&mut config.rust_rpath, rust.rpath); set(&mut config.jemalloc, rust.jemalloc); set(&mut config.test_compare_mode, rust.test_compare_mode); + set(&mut config.llvm_libunwind, rust.llvm_libunwind); set(&mut config.backtrace, rust.backtrace); set(&mut config.channel, rust.channel.clone()); set(&mut config.rust_dist_src, rust.dist_src); @@ -553,7 +564,7 @@ impl Config { set(&mut config.lld_enabled, rust.lld); set(&mut config.lldb_enabled, rust.lldb); set(&mut config.llvm_tools_enabled, rust.llvm_tools); - config.rustc_parallel_queries = rust.experimental_parallel_queries.unwrap_or(false); + config.rustc_parallel = rust.parallel_compiler.unwrap_or(false); config.rustc_default_linker = rust.default_linker.clone(); config.musl_root = rust.musl_root.clone().map(PathBuf::from); config.save_toolstates = rust.save_toolstates.clone().map(PathBuf::from); @@ -599,6 +610,7 @@ impl Config { target.linker = cfg.linker.clone().map(PathBuf::from); target.crt_static = cfg.crt_static.clone(); target.musl_root = cfg.musl_root.clone().map(PathBuf::from); + target.wasi_root = cfg.wasi_root.clone().map(PathBuf::from); target.qemu_rootfs = cfg.qemu_rootfs.clone().map(PathBuf::from); config.target_config.insert(INTERNER.intern_string(triple.clone()), target); @@ -661,6 +673,11 @@ impl Config { pub fn very_verbose(&self) -> bool { self.verbose > 1 } + + pub fn llvm_enabled(&self) -> bool { + self.rust_codegen_backends.contains(&INTERNER.intern_str("llvm")) + || self.rust_codegen_backends.contains(&INTERNER.intern_str("emscripten")) + } } fn set(field: &mut T, val: Option) { diff --git a/src/bootstrap/configure.py b/src/bootstrap/configure.py index 5467c9f9d5bf9..ade8afee7c109 100755 --- a/src/bootstrap/configure.py +++ b/src/bootstrap/configure.py @@ -1,13 +1,4 @@ #!/usr/bin/env python -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. # ignore-tidy-linelength @@ -44,7 +35,7 @@ def v(*args): o("docs", "build.docs", "build standard library documentation") o("compiler-docs", "build.compiler-docs", "build compiler documentation") o("optimize-tests", "rust.optimize-tests", "build tests with optimizations") -o("experimental-parallel-queries", "rust.experimental-parallel-queries", "build rustc with experimental parallelization") +o("parallel-compiler", "rust.parallel-compiler", "build a multi-threaded rustc") o("test-miri", "rust.test-miri", "run miri's test suite") o("debuginfo-tests", "rust.debuginfo-tests", "build tests with debugger metadata") o("verbose-tests", "rust.verbose-tests", "enable verbose output when running tests") @@ -71,6 +62,13 @@ def v(*args): o("lld", "rust.lld", "build lld") o("lldb", "rust.lldb", "build lldb") o("missing-tools", "dist.missing-tools", "allow failures when building tools") +o("use-libcxx", "llvm.use_libcxx", "build LLVM with libc++") + +o("cflags", "llvm.cflags", "build LLVM with these extra compiler flags") +o("cxxflags", "llvm.cxxflags", "build LLVM with these extra compiler flags") +o("ldflags", "llvm.ldflags", "build LLVM with these extra linker flags") + +o("llvm-libunwind", "rust.llvm_libunwind", "use LLVM libunwind") # Optimization and debugging options. These may be overridden by the release # channel, etc. @@ -107,6 +105,8 @@ def v(*args): "arm-linux-androideabi NDK standalone path") v("armv7-linux-androideabi-ndk", "target.armv7-linux-androideabi.android-ndk", "armv7-linux-androideabi NDK standalone path") +v("thumbv7neon-linux-androideabi-ndk", "target.thumbv7neon-linux-androideabi.android-ndk", + "thumbv7neon-linux-androideabi NDK standalone path") v("aarch64-linux-android-ndk", "target.aarch64-linux-android.android-ndk", "aarch64-linux-android NDK standalone path") v("x86_64-linux-android-ndk", "target.x86_64-linux-android.android-ndk", diff --git a/src/bootstrap/dist.rs b/src/bootstrap/dist.rs index 927f9bf8ddbca..61a7705bd6cc6 100644 --- a/src/bootstrap/dist.rs +++ b/src/bootstrap/dist.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Implementation of the various distribution aspects of the compiler. //! //! This module is responsible for creating tarballs of the standard library, @@ -28,20 +18,22 @@ use build_helper::output; use crate::{Compiler, Mode, LLVM_TOOLS}; use crate::channel; -use crate::util::{libdir, is_dylib, exe}; +use crate::util::{is_dylib, exe}; use crate::builder::{Builder, RunConfig, ShouldRun, Step}; use crate::compile; use crate::tool::{self, Tool}; use crate::cache::{INTERNER, Interned}; -use time; +use time::{self, Timespec}; -pub fn pkgname(builder: &Builder, component: &str) -> String { +pub fn pkgname(builder: &Builder<'_>, component: &str) -> String { if component == "cargo" { format!("{}-{}", component, builder.cargo_package_vers()) } else if component == "rls" { format!("{}-{}", component, builder.rls_package_vers()) } else if component == "clippy" { format!("{}-{}", component, builder.clippy_package_vers()) + } else if component == "miri" { + format!("{}-{}", component, builder.miri_package_vers()) } else if component == "rustfmt" { format!("{}-{}", component, builder.rustfmt_package_vers()) } else if component == "llvm-tools" { @@ -54,15 +46,15 @@ pub fn pkgname(builder: &Builder, component: &str) -> String { } } -fn distdir(builder: &Builder) -> PathBuf { +fn distdir(builder: &Builder<'_>) -> PathBuf { builder.out.join("dist") } -pub fn tmpdir(builder: &Builder) -> PathBuf { +pub fn tmpdir(builder: &Builder<'_>) -> PathBuf { builder.out.join("tmp/dist") } -fn rust_installer(builder: &Builder) -> Command { +fn rust_installer(builder: &Builder<'_>) -> Command { builder.tool_cmd(Tool::RustInstaller) } @@ -84,11 +76,11 @@ impl Step for Docs { type Output = PathBuf; const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/doc") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Docs { stage: run.builder.top_stage, host: run.target, @@ -96,7 +88,7 @@ impl Step for Docs { } /// Builds the `rust-docs` installer component. - fn run(self, builder: &Builder) -> PathBuf { + fn run(self, builder: &Builder<'_>) -> PathBuf { let host = self.host; let name = pkgname(builder, "rust-docs"); @@ -146,11 +138,11 @@ impl Step for RustcDocs { type Output = PathBuf; const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/librustc") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(RustcDocs { stage: run.builder.top_stage, host: run.target, @@ -158,7 +150,7 @@ impl Step for RustcDocs { } /// Builds the `rustc-docs` installer component. - fn run(self, builder: &Builder) -> PathBuf { + fn run(self, builder: &Builder<'_>) -> PathBuf { let host = self.host; let name = pkgname(builder, "rustc-docs"); @@ -218,7 +210,7 @@ fn find_files(files: &[&str], path: &[PathBuf]) -> Vec { } fn make_win_dist( - rust_root: &Path, plat_root: &Path, target_triple: Interned, builder: &Builder + rust_root: &Path, plat_root: &Path, target_triple: Interned, builder: &Builder<'_> ) { //Ask gcc where it keeps its stuff let mut cmd = Command::new(builder.cc(target_triple)); @@ -234,7 +226,7 @@ fn make_win_dist( let trim_chars: &[_] = &[' ', '=']; let value = line[(idx + 1)..] - .trim_left_matches(trim_chars) + .trim_start_matches(trim_chars) .split(';') .map(PathBuf::from); @@ -342,19 +334,19 @@ impl Step for Mingw { type Output = Option; const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.never() } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Mingw { host: run.target }); } - /// Build the `rust-mingw` installer component. + /// Builds the `rust-mingw` installer component. /// /// This contains all the bits and pieces to run the MinGW Windows targets /// without any extra installed software (e.g., we bundle gcc, libraries, etc). - fn run(self, builder: &Builder) -> Option { + fn run(self, builder: &Builder<'_>) -> Option { let host = self.host; if !host.contains("pc-windows-gnu") { @@ -400,18 +392,18 @@ impl Step for Rustc { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/librustc") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Rustc { compiler: run.builder.compiler(run.builder.top_stage, run.target), }); } /// Creates the `rustc` installer component. - fn run(self, builder: &Builder) -> PathBuf { + fn run(self, builder: &Builder<'_>) -> PathBuf { let compiler = self.compiler; let host = self.compiler.host; @@ -478,24 +470,26 @@ impl Step for Rustc { return distdir(builder).join(format!("{}-{}.tar.gz", name, host)); - fn prepare_image(builder: &Builder, compiler: Compiler, image: &Path) { + fn prepare_image(builder: &Builder<'_>, compiler: Compiler, image: &Path) { let host = compiler.host; let src = builder.sysroot(compiler); - let libdir = libdir(&host); + let libdir = builder.rustc_libdir(compiler); // Copy rustc/rustdoc binaries t!(fs::create_dir_all(image.join("bin"))); builder.cp_r(&src.join("bin"), &image.join("bin")); - builder.install(&builder.rustdoc(compiler.host), &image.join("bin"), 0o755); + builder.install(&builder.rustdoc(compiler), &image.join("bin"), 0o755); + + let libdir_relative = builder.libdir_relative(compiler); // Copy runtime DLLs needed by the compiler - if libdir != "bin" { - for entry in builder.read_dir(&src.join(libdir)) { + if libdir_relative.to_str() != Some("bin") { + for entry in builder.read_dir(&libdir) { let name = entry.file_name(); if let Some(s) = name.to_str() { if is_dylib(s) { - builder.install(&entry.path(), &image.join(libdir), 0o644); + builder.install(&entry.path(), &image.join(&libdir_relative), 0o644); } } } @@ -536,7 +530,19 @@ impl Step for Rustc { t!(fs::create_dir_all(image.join("share/man/man1"))); let man_src = builder.src.join("src/doc/man"); let man_dst = image.join("share/man/man1"); - let month_year = t!(time::strftime("%B %Y", &time::now())); + + // Reproducible builds: If SOURCE_DATE_EPOCH is set, use that as the time. + let time = env::var("SOURCE_DATE_EPOCH") + .map(|timestamp| { + let epoch = timestamp.parse().map_err(|err| { + format!("could not parse SOURCE_DATE_EPOCH: {}", err) + }).unwrap(); + + time::at(Timespec::new(epoch, 0)) + }) + .unwrap_or_else(|_| time::now()); + + let month_year = t!(time::strftime("%B %Y", &time)); // don't use our `bootstrap::util::{copy, cp_r}`, because those try // to hardlink, and we don't want to edit the source templates for file_entry in builder.read_dir(&man_src) { @@ -576,11 +582,11 @@ pub struct DebuggerScripts { impl Step for DebuggerScripts { type Output = (); - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/lldb_batchmode.py") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(DebuggerScripts { sysroot: run.builder.sysroot(run.builder.compiler(run.builder.top_stage, run.host)), host: run.target, @@ -588,7 +594,7 @@ impl Step for DebuggerScripts { } /// Copies debugger scripts for `target` into the `sysroot` specified. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let host = self.host; let sysroot = self.sysroot; let dst = sysroot.join("lib/rustlib/etc"); @@ -610,6 +616,8 @@ impl Step for DebuggerScripts { // gdb debugger scripts builder.install(&builder.src.join("src/etc/rust-gdb"), &sysroot.join("bin"), 0o755); + builder.install(&builder.src.join("src/etc/rust-gdbgui"), &sysroot.join("bin"), + 0o755); cp_debugger_script("gdb_load_rust_pretty_printers.py"); cp_debugger_script("gdb_rust_pretty_printing.py"); @@ -633,18 +641,18 @@ impl Step for Std { type Output = PathBuf; const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/libstd") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Std { compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), target: run.target, }); } - fn run(self, builder: &Builder) -> PathBuf { + fn run(self, builder: &Builder<'_>) -> PathBuf { let compiler = self.compiler; let target = self.target; @@ -681,10 +689,18 @@ impl Step for Std { let mut src = builder.sysroot_libdir(compiler, target).to_path_buf(); src.pop(); // Remove the trailing /lib folder from the sysroot_libdir builder.cp_filtered(&src, &dst, &|path| { - let name = path.file_name().and_then(|s| s.to_str()); - name != Some(builder.config.rust_codegen_backends_dir.as_str()) && - name != Some("bin") - + if let Some(name) = path.file_name().and_then(|s| s.to_str()) { + if name == builder.config.rust_codegen_backends_dir.as_str() { + return false + } + if name == "bin" { + return false + } + if name.contains("LLVM") { + return false + } + } + true }); let mut cmd = rust_installer(builder); @@ -714,12 +730,12 @@ impl Step for Analysis { type Output = PathBuf; const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path("analysis").default_condition(builder.config.extended) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Analysis { compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), target: run.target, @@ -727,7 +743,7 @@ impl Step for Analysis { } /// Creates a tarball of save-analysis metadata, if available. - fn run(self, builder: &Builder) -> PathBuf { + fn run(self, builder: &Builder<'_>) -> PathBuf { let compiler = self.compiler; let target = self.target; assert!(builder.config.extended); @@ -777,7 +793,7 @@ impl Step for Analysis { } } -fn copy_src_dirs(builder: &Builder, src_dirs: &[&str], exclude_dirs: &[&str], dst_dir: &Path) { +fn copy_src_dirs(builder: &Builder<'_>, src_dirs: &[&str], exclude_dirs: &[&str], dst_dir: &Path) { fn filter_fn(exclude_dirs: &[&str], dir: &str, path: &Path) -> bool { let spath = match path.to_str() { Some(path) => path, @@ -786,7 +802,24 @@ fn copy_src_dirs(builder: &Builder, src_dirs: &[&str], exclude_dirs: &[&str], ds if spath.ends_with("~") || spath.ends_with(".pyc") { return false } - if (spath.contains("llvm/test") || spath.contains("llvm\\test")) && + + const LLVM_PROJECTS: &[&str] = &[ + "llvm-project/clang", "llvm-project\\clang", + "llvm-project/lld", "llvm-project\\lld", + "llvm-project/lldb", "llvm-project\\lldb", + "llvm-project/llvm", "llvm-project\\llvm", + ]; + if spath.contains("llvm-project") && !spath.ends_with("llvm-project") + && !LLVM_PROJECTS.iter().any(|path| spath.contains(path)) + { + return false; + } + + const LLVM_TEST: &[&str] = &[ + "llvm-project/llvm/test", "llvm-project\\llvm\\test", + "llvm-emscripten/test", "llvm-emscripten\\test", + ]; + if LLVM_TEST.iter().any(|path| spath.contains(path)) && (spath.ends_with(".ll") || spath.ends_with(".td") || spath.ends_with(".s")) { @@ -830,16 +863,16 @@ impl Step for Src { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Src); } /// Creates the `rust-src` installer component - fn run(self, builder: &Builder) -> PathBuf { + fn run(self, builder: &Builder<'_>) -> PathBuf { builder.info("Dist src"); let name = pkgname(builder, "rust-src"); @@ -858,7 +891,6 @@ impl Step for Src { let std_src_dirs = [ "src/build_helper", "src/liballoc", - "src/libbacktrace", "src/libcore", "src/libpanic_abort", "src/libpanic_unwind", @@ -874,6 +906,8 @@ impl Step for Src { "src/stdsimd", "src/libproc_macro", "src/tools/rustc-std-workspace-core", + "src/librustc", + "src/libsyntax", ]; copy_src_dirs(builder, &std_src_dirs[..], &[], &dst_src); @@ -911,17 +945,17 @@ impl Step for PlainSourceTarball { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path("src").default_condition(builder.config.rust_dist_src) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(PlainSourceTarball); } /// Creates the plain source tarball - fn run(self, builder: &Builder) -> PathBuf { + fn run(self, builder: &Builder<'_>) -> PathBuf { builder.info("Create plain source tarball"); // Make sure that the root folder of tarball has the correct name @@ -1039,18 +1073,18 @@ impl Step for Cargo { type Output = PathBuf; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("cargo") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Cargo { stage: run.builder.top_stage, target: run.target, }); } - fn run(self, builder: &Builder) -> PathBuf { + fn run(self, builder: &Builder<'_>) -> PathBuf { let stage = self.stage; let target = self.target; @@ -1125,18 +1159,18 @@ impl Step for Rls { type Output = Option; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("rls") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Rls { stage: run.builder.top_stage, target: run.target, }); } - fn run(self, builder: &Builder) -> Option { + fn run(self, builder: &Builder<'_>) -> Option { let stage = self.stage; let target = self.target; assert!(builder.config.extended); @@ -1204,18 +1238,18 @@ impl Step for Clippy { type Output = Option; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("clippy") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Clippy { stage: run.builder.top_stage, target: run.target, }); } - fn run(self, builder: &Builder) -> Option { + fn run(self, builder: &Builder<'_>) -> Option { let stage = self.stage; let target = self.target; assert!(builder.config.extended); @@ -1278,6 +1312,90 @@ impl Step for Clippy { } } +#[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] +pub struct Miri { + pub stage: u32, + pub target: Interned, +} + +impl Step for Miri { + type Output = Option; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { + run.path("miri") + } + + fn make_run(run: RunConfig<'_>) { + run.builder.ensure(Miri { + stage: run.builder.top_stage, + target: run.target, + }); + } + + fn run(self, builder: &Builder<'_>) -> Option { + let stage = self.stage; + let target = self.target; + assert!(builder.config.extended); + + builder.info(&format!("Dist miri stage{} ({})", stage, target)); + let src = builder.src.join("src/tools/miri"); + let release_num = builder.release_num("miri"); + let name = pkgname(builder, "miri"); + let version = builder.miri_info.version(builder, &release_num); + + let tmp = tmpdir(builder); + let image = tmp.join("miri-image"); + drop(fs::remove_dir_all(&image)); + builder.create_dir(&image); + + // Prepare the image directory + // We expect miri to build, because we've exited this step above if tool + // state for miri isn't testing. + let miri = builder.ensure(tool::Miri { + compiler: builder.compiler(stage, builder.config.build), + target, extra_features: Vec::new() + }).or_else(|| { missing_tool("miri", builder.build.config.missing_tools); None })?; + let cargomiri = builder.ensure(tool::CargoMiri { + compiler: builder.compiler(stage, builder.config.build), + target, extra_features: Vec::new() + }).or_else(|| { missing_tool("cargo miri", builder.build.config.missing_tools); None })?; + + builder.install(&miri, &image.join("bin"), 0o755); + builder.install(&cargomiri, &image.join("bin"), 0o755); + let doc = image.join("share/doc/miri"); + builder.install(&src.join("README.md"), &doc, 0o644); + builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644); + builder.install(&src.join("LICENSE-MIT"), &doc, 0o644); + + // Prepare the overlay + let overlay = tmp.join("miri-overlay"); + drop(fs::remove_dir_all(&overlay)); + t!(fs::create_dir_all(&overlay)); + builder.install(&src.join("README.md"), &overlay, 0o644); + builder.install(&src.join("LICENSE-APACHE"), &doc, 0o644); + builder.install(&src.join("LICENSE-MIT"), &doc, 0o644); + builder.create(&overlay.join("version"), &version); + + // Generate the installer tarball + let mut cmd = rust_installer(builder); + cmd.arg("generate") + .arg("--product-name=Rust") + .arg("--rel-manifest-dir=rustlib") + .arg("--success-message=miri-ready-to-serve.") + .arg("--image-dir").arg(&image) + .arg("--work-dir").arg(&tmpdir(builder)) + .arg("--output-dir").arg(&distdir(builder)) + .arg("--non-installed-overlay").arg(&overlay) + .arg(format!("--package-name={}-{}", name, target)) + .arg("--legacy-manifest-dirs=rustlib,cargo") + .arg("--component-name=miri-preview"); + + builder.run(&mut cmd); + Some(distdir(builder).join(format!("{}-{}.tar.gz", name, target))) + } +} + #[derive(Debug, PartialOrd, Ord, Copy, Clone, Hash, PartialEq, Eq)] pub struct Rustfmt { pub stage: u32, @@ -1288,18 +1406,18 @@ impl Step for Rustfmt { type Output = Option; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("rustfmt") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Rustfmt { stage: run.builder.top_stage, target: run.target, }); } - fn run(self, builder: &Builder) -> Option { + fn run(self, builder: &Builder<'_>) -> Option { let stage = self.stage; let target = self.target; @@ -1371,12 +1489,12 @@ impl Step for Extended { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path("extended").default_condition(builder.config.extended) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Extended { stage: run.builder.top_stage, host: run.builder.config.build, @@ -1385,7 +1503,7 @@ impl Step for Extended { } /// Creates a combined installer for the specified target in the provided stage. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let stage = self.stage; let target = self.target; @@ -1399,6 +1517,7 @@ impl Step for Extended { let rls_installer = builder.ensure(Rls { stage, target }); let llvm_tools_installer = builder.ensure(LlvmTools { stage, target }); let clippy_installer = builder.ensure(Clippy { stage, target }); + let miri_installer = builder.ensure(Miri { stage, target }); let lldb_installer = builder.ensure(Lldb { target }); let mingw_installer = builder.ensure(Mingw { host: target }); let analysis_installer = builder.ensure(Analysis { @@ -1437,6 +1556,7 @@ impl Step for Extended { tarballs.push(cargo_installer); tarballs.extend(rls_installer.clone()); tarballs.extend(clippy_installer.clone()); + tarballs.extend(miri_installer.clone()); tarballs.extend(rustfmt_installer.clone()); tarballs.extend(llvm_tools_installer); tarballs.extend(lldb_installer); @@ -1509,6 +1629,9 @@ impl Step for Extended { if clippy_installer.is_none() { contents = filter(&contents, "clippy"); } + if miri_installer.is_none() { + contents = filter(&contents, "miri"); + } if rustfmt_installer.is_none() { contents = filter(&contents, "rustfmt"); } @@ -1549,6 +1672,9 @@ impl Step for Extended { if clippy_installer.is_some() { prepare("clippy"); } + if miri_installer.is_some() { + prepare("miri"); + } // create an 'uninstall' package builder.install(&etc.join("pkg/postinstall"), &pkg.join("uninstall"), 0o755); @@ -1579,6 +1705,8 @@ impl Step for Extended { "rls-preview".to_string() } else if name == "clippy" { "clippy-preview".to_string() + } else if name == "miri" { + "miri-preview".to_string() } else { name.to_string() }; @@ -1598,6 +1726,9 @@ impl Step for Extended { if clippy_installer.is_some() { prepare("clippy"); } + if miri_installer.is_some() { + prepare("miri"); + } if target.contains("windows-gnu") { prepare("rust-mingw"); } @@ -1690,6 +1821,18 @@ impl Step for Extended { .arg("-out").arg(exe.join("ClippyGroup.wxs")) .arg("-t").arg(etc.join("msi/remove-duplicates.xsl"))); } + if miri_installer.is_some() { + builder.run(Command::new(&heat) + .current_dir(&exe) + .arg("dir") + .arg("miri") + .args(&heat_flags) + .arg("-cg").arg("MiriGroup") + .arg("-dr").arg("Miri") + .arg("-var").arg("var.MiriDir") + .arg("-out").arg(exe.join("MiriGroup.wxs")) + .arg("-t").arg(etc.join("msi/remove-duplicates.xsl"))); + } builder.run(Command::new(&heat) .current_dir(&exe) .arg("dir") @@ -1735,6 +1878,9 @@ impl Step for Extended { if clippy_installer.is_some() { cmd.arg("-dClippyDir=clippy"); } + if miri_installer.is_some() { + cmd.arg("-dMiriDir=miri"); + } if target.contains("windows-gnu") { cmd.arg("-dGccDir=rust-mingw"); } @@ -1753,6 +1899,9 @@ impl Step for Extended { if clippy_installer.is_some() { candle("ClippyGroup.wxs".as_ref()); } + if miri_installer.is_some() { + candle("MiriGroup.wxs".as_ref()); + } candle("AnalysisGroup.wxs".as_ref()); if target.contains("windows-gnu") { @@ -1785,6 +1934,9 @@ impl Step for Extended { if clippy_installer.is_some() { cmd.arg("ClippyGroup.wixobj"); } + if miri_installer.is_some() { + cmd.arg("MiriGroup.wixobj"); + } if target.contains("windows-gnu") { cmd.arg("GccGroup.wixobj"); @@ -1801,7 +1953,7 @@ impl Step for Extended { } } -fn add_env(builder: &Builder, cmd: &mut Command, target: Interned) { +fn add_env(builder: &Builder<'_>, cmd: &mut Command, target: Interned) { let mut parts = channel::CFG_RELEASE_NUM.split('.'); cmd.env("CFG_RELEASE_INFO", builder.rust_version()) .env("CFG_RELEASE_NUM", channel::CFG_RELEASE_NUM) @@ -1837,15 +1989,15 @@ impl Step for HashSign { type Output = (); const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("hash-and-sign") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(HashSign); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let mut cmd = builder.tool_cmd(Tool::BuildManifest); if builder.config.dry_run { return; @@ -1867,13 +2019,14 @@ impl Step for HashSign { cmd.arg(distdir(builder)); cmd.arg(today.trim()); cmd.arg(builder.rust_package_vers()); + cmd.arg(addr); cmd.arg(builder.package_vers(&builder.release_num("cargo"))); cmd.arg(builder.package_vers(&builder.release_num("rls"))); cmd.arg(builder.package_vers(&builder.release_num("clippy"))); + cmd.arg(builder.package_vers(&builder.release_num("miri"))); cmd.arg(builder.package_vers(&builder.release_num("rustfmt"))); cmd.arg(builder.llvm_tools_package_vers()); cmd.arg(builder.lldb_package_vers()); - cmd.arg(addr); builder.create_dir(&distdir(builder)); @@ -1888,13 +2041,13 @@ impl Step for HashSign { // LLVM tools are linked dynamically. // Note: This function does no yet support Windows but we also don't support // linking LLVM tools dynamically on Windows yet. -fn maybe_install_llvm_dylib(builder: &Builder, - target: Interned, - image: &Path) { +pub fn maybe_install_llvm_dylib(builder: &Builder<'_>, + target: Interned, + sysroot: &Path) { let src_libdir = builder .llvm_out(target) .join("lib"); - let dst_libdir = image.join("lib/rustlib").join(&*target).join("lib"); + let dst_libdir = sysroot.join("lib/rustlib").join(&*target).join("lib"); t!(fs::create_dir_all(&dst_libdir)); if target.contains("apple-darwin") { @@ -1930,18 +2083,18 @@ impl Step for LlvmTools { type Output = Option; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("llvm-tools") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(LlvmTools { stage: run.builder.top_stage, target: run.target, }); } - fn run(self, builder: &Builder) -> Option { + fn run(self, builder: &Builder<'_>) -> Option { let stage = self.stage; let target = self.target; assert!(builder.config.extended); @@ -1956,7 +2109,7 @@ impl Step for LlvmTools { } builder.info(&format!("Dist LlvmTools stage{} ({})", stage, target)); - let src = builder.src.join("src/llvm"); + let src = builder.src.join("src/llvm-project/llvm"); let name = pkgname(builder, "llvm-tools"); let tmp = tmpdir(builder); @@ -2014,17 +2167,17 @@ impl Step for Lldb { const ONLY_HOSTS: bool = true; const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/tools/lldb") + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { + run.path("src/llvm-project/lldb").path("src/tools/lldb") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Lldb { target: run.target, }); } - fn run(self, builder: &Builder) -> Option { + fn run(self, builder: &Builder<'_>) -> Option { let target = self.target; if builder.config.dry_run { @@ -2040,7 +2193,7 @@ impl Step for Lldb { } builder.info(&format!("Dist Lldb ({})", target)); - let src = builder.src.join("src/tools/lldb"); + let src = builder.src.join("src/llvm-project/lldb"); let name = pkgname(builder, "lldb"); let tmp = tmpdir(builder); diff --git a/src/bootstrap/doc.rs b/src/bootstrap/doc.rs index 217328adfbf66..330f66c1df0df 100644 --- a/src/bootstrap/doc.rs +++ b/src/bootstrap/doc.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Documentation generation for rustbuilder. //! //! This module implements generation for all bits and pieces of documentation @@ -33,7 +23,7 @@ use crate::cache::{INTERNER, Interned}; use crate::config::Config; macro_rules! book { - ($($name:ident, $path:expr, $book_name:expr;)+) => { + ($($name:ident, $path:expr, $book_name:expr, $book_ver:expr;)+) => { $( #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct $name { @@ -44,21 +34,22 @@ macro_rules! book { type Output = (); const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path($path).default_condition(builder.config.docs) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure($name { target: run.target, }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { builder.ensure(Rustbook { target: self.target, name: INTERNER.intern_str($book_name), + version: $book_ver, }) } } @@ -66,19 +57,29 @@ macro_rules! book { } } +// NOTE: When adding a book here, make sure to ALSO build the book by +// adding a build step in `src/bootstrap/builder.rs`! book!( - Nomicon, "src/doc/nomicon", "nomicon"; - Reference, "src/doc/reference", "reference"; - EditionGuide, "src/doc/edition-guide", "edition-guide"; - RustdocBook, "src/doc/rustdoc", "rustdoc"; - RustcBook, "src/doc/rustc", "rustc"; - RustByExample, "src/doc/rust-by-example", "rust-by-example"; + EditionGuide, "src/doc/edition-guide", "edition-guide", RustbookVersion::MdBook2; + EmbeddedBook, "src/doc/embedded-book", "embedded-book", RustbookVersion::MdBook2; + Nomicon, "src/doc/nomicon", "nomicon", RustbookVersion::MdBook1; + Reference, "src/doc/reference", "reference", RustbookVersion::MdBook1; + RustByExample, "src/doc/rust-by-example", "rust-by-example", RustbookVersion::MdBook1; + RustcBook, "src/doc/rustc", "rustc", RustbookVersion::MdBook1; + RustdocBook, "src/doc/rustdoc", "rustdoc", RustbookVersion::MdBook1; ); +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +enum RustbookVersion { + MdBook1, + MdBook2, +} + #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] struct Rustbook { target: Interned, name: Interned, + version: RustbookVersion, } impl Step for Rustbook { @@ -86,7 +87,7 @@ impl Step for Rustbook { // rustbook is never directly called, and only serves as a shim for the nomicon and the // reference. - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.never() } @@ -94,12 +95,13 @@ impl Step for Rustbook { /// /// This will not actually generate any documentation if the documentation has /// already been generated. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let src = builder.src.join("src/doc"); builder.ensure(RustbookSrc { target: self.target, name: self.name, src: INTERNER.intern_path(src), + version: self.version, }); } } @@ -113,18 +115,18 @@ impl Step for UnstableBook { type Output = (); const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path("src/doc/unstable-book").default_condition(builder.config.docs) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(UnstableBook { target: run.target, }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { builder.ensure(UnstableBookGen { target: self.target, }); @@ -132,6 +134,7 @@ impl Step for UnstableBook { target: self.target, name: INTERNER.intern_str("unstable-book"), src: builder.md_doc_out(self.target), + version: RustbookVersion::MdBook1, }) } } @@ -146,19 +149,19 @@ impl Step for CargoBook { type Output = (); const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path("src/tools/cargo/src/doc/book").default_condition(builder.config.docs) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(CargoBook { target: run.target, name: INTERNER.intern_str("cargo"), }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let target = self.target; let name = self.name; let src = builder.src.join("src/tools/cargo/src/doc"); @@ -185,12 +188,13 @@ struct RustbookSrc { target: Interned, name: Interned, src: Interned, + version: RustbookVersion, } impl Step for RustbookSrc { type Output = (); - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.never() } @@ -198,7 +202,7 @@ impl Step for RustbookSrc { /// /// This will not actually generate any documentation if the documentation has /// already been generated. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let target = self.target; let name = self.name; let src = self.src; @@ -215,11 +219,19 @@ impl Step for RustbookSrc { } builder.info(&format!("Rustbook ({}) - {}", target, name)); let _ = fs::remove_dir_all(&out); + + let vers = match self.version { + RustbookVersion::MdBook1 => "1", + RustbookVersion::MdBook2 => "2", + }; + builder.run(rustbook_cmd .arg("build") .arg(&src) .arg("-d") - .arg(out)); + .arg(out) + .arg("-m") + .arg(vers)); } } @@ -234,12 +246,12 @@ impl Step for TheBook { type Output = (); const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path("src/doc/book").default_condition(builder.config.docs) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(TheBook { compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), target: run.target, @@ -247,7 +259,7 @@ impl Step for TheBook { }); } - /// Build the book and associated stuff. + /// Builds the book and associated stuff. /// /// We need to build: /// @@ -256,7 +268,7 @@ impl Step for TheBook { /// * Version info and CSS /// * Index page /// * Redirect pages - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = self.compiler; let target = self.target; let name = self.name; @@ -265,6 +277,7 @@ impl Step for TheBook { builder.ensure(Rustbook { target, name: INTERNER.intern_string(name.to_string()), + version: RustbookVersion::MdBook2, }); // building older edition redirects @@ -273,18 +286,21 @@ impl Step for TheBook { builder.ensure(Rustbook { target, name: INTERNER.intern_string(source_name), + version: RustbookVersion::MdBook2, }); let source_name = format!("{}/second-edition", name); builder.ensure(Rustbook { target, name: INTERNER.intern_string(source_name), + version: RustbookVersion::MdBook2, }); let source_name = format!("{}/2018-edition", name); builder.ensure(Rustbook { target, name: INTERNER.intern_string(source_name), + version: RustbookVersion::MdBook2, }); // build the version info page and CSS @@ -305,29 +321,31 @@ impl Step for TheBook { } } -fn invoke_rustdoc(builder: &Builder, compiler: Compiler, target: Interned, markdown: &str) { +fn invoke_rustdoc( + builder: &Builder<'_>, + compiler: Compiler, + target: Interned, + markdown: &str, +) { let out = builder.doc_out(target); let path = builder.src.join("src/doc").join(markdown); - let favicon = builder.src.join("src/doc/favicon.inc"); + let header = builder.src.join("src/doc/redirect.inc"); let footer = builder.src.join("src/doc/footer.inc"); let version_info = out.join("version_info.html"); - let mut cmd = builder.rustdoc_cmd(compiler.host); + let mut cmd = builder.rustdoc_cmd(compiler); let out = out.join("book"); cmd.arg("--html-after-content").arg(&footer) .arg("--html-before-content").arg(&version_info) - .arg("--html-in-header").arg(&favicon) + .arg("--html-in-header").arg(&header) .arg("--markdown-no-toc") - .arg("--markdown-playground-url") - .arg("https://play.rust-lang.org/") - .arg("-o").arg(&out) - .arg(&path) - .arg("--markdown-css") - .arg("../rust.css"); + .arg("--markdown-playground-url").arg("https://play.rust-lang.org/") + .arg("-o").arg(&out).arg(&path) + .arg("--markdown-css").arg("../rust.css"); builder.run(&mut cmd); } @@ -342,12 +360,12 @@ impl Step for Standalone { type Output = (); const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path("src/doc").default_condition(builder.config.docs) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Standalone { compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), target: run.target, @@ -362,7 +380,7 @@ impl Step for Standalone { /// `STAMP` along with providing the various header/footer HTML we've customized. /// /// In the end, this is just a glorified wrapper around rustdoc! - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let target = self.target; let compiler = self.compiler; builder.info(&format!("Documenting standalone ({})", target)); @@ -394,7 +412,7 @@ impl Step for Standalone { } let html = out.join(filename).with_extension("html"); - let rustdoc = builder.rustdoc(compiler.host); + let rustdoc = builder.rustdoc(compiler); if up_to_date(&path, &html) && up_to_date(&footer, &html) && up_to_date(&favicon, &html) && @@ -404,14 +422,13 @@ impl Step for Standalone { continue } - let mut cmd = builder.rustdoc_cmd(compiler.host); + let mut cmd = builder.rustdoc_cmd(compiler); cmd.arg("--html-after-content").arg(&footer) .arg("--html-before-content").arg(&version_info) .arg("--html-in-header").arg(&favicon) .arg("--markdown-no-toc") .arg("--index-page").arg(&builder.src.join("src/doc/index.md")) - .arg("--markdown-playground-url") - .arg("https://play.rust-lang.org/") + .arg("--markdown-playground-url").arg("https://play.rust-lang.org/") .arg("-o").arg(&out) .arg(&path); @@ -436,12 +453,12 @@ impl Step for Std { type Output = (); const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.all_krates("std").default_condition(builder.config.docs) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Std { stage: run.builder.top_stage, target: run.target @@ -452,7 +469,7 @@ impl Step for Std { /// /// This will generate all documentation for the standard library and its /// dependencies. This is largely just a wrapper around `cargo doc`. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let stage = self.stage; let target = self.target; builder.info(&format!("Documenting stage{} std ({})", stage, target)); @@ -501,6 +518,8 @@ impl Step for Std { cargo.arg("--") .arg("--markdown-css").arg("rust.css") .arg("--markdown-no-toc") + .arg("--generate-redirect-pages") + .arg("--resource-suffix").arg(crate::channel::CFG_RELEASE_NUM) .arg("--index-page").arg(&builder.src.join("src/doc/index.md")); builder.run(&mut cargo); @@ -522,12 +541,12 @@ impl Step for Test { type Output = (); const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.krate("test").default_condition(builder.config.docs) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Test { stage: run.builder.top_stage, target: run.target, @@ -538,7 +557,7 @@ impl Step for Test { /// /// This will generate all documentation for libtest and its dependencies. This /// is largely just a wrapper around `cargo doc`. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let stage = self.stage; let target = self.target; builder.info(&format!("Documenting stage{} test ({})", stage, target)); @@ -565,7 +584,10 @@ impl Step for Test { let mut cargo = builder.cargo(compiler, Mode::Test, target, "doc"); compile::test_cargo(builder, &compiler, target, &mut cargo); - cargo.arg("--no-deps").arg("-p").arg("test"); + cargo.arg("--no-deps") + .arg("-p").arg("test") + .env("RUSTDOC_RESOURCE_SUFFIX", crate::channel::CFG_RELEASE_NUM) + .env("RUSTDOC_GENERATE_REDIRECT_PAGES", "1"); builder.run(&mut cargo); builder.cp_r(&my_out, &out); @@ -583,19 +605,19 @@ impl Step for WhitelistedRustc { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.krate("rustc-main").default_condition(builder.config.docs) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(WhitelistedRustc { stage: run.builder.top_stage, target: run.target, }); } - /// Generate whitelisted compiler crate documentation. + /// Generates whitelisted compiler crate documentation. /// /// This will generate all documentation for crates that are whitelisted /// to be included in the standard documentation. This documentation is @@ -604,7 +626,7 @@ impl Step for WhitelistedRustc { /// documentation. We don't build other compiler documentation /// here as we want to be able to keep it separate from the standard /// documentation. This is largely just a wrapper around `cargo doc`. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let stage = self.stage; let target = self.target; builder.info(&format!("Documenting stage{} whitelisted compiler ({})", stage, target)); @@ -634,9 +656,10 @@ impl Step for WhitelistedRustc { // We don't want to build docs for internal compiler dependencies in this // step (there is another step for that). Therefore, we whitelist the crates // for which docs must be built. - cargo.arg("--no-deps"); for krate in &["proc_macro"] { - cargo.arg("-p").arg(krate); + cargo.arg("-p").arg(krate) + .env("RUSTDOC_RESOURCE_SUFFIX", crate::channel::CFG_RELEASE_NUM) + .env("RUSTDOC_GENERATE_REDIRECT_PAGES", "1"); } builder.run(&mut cargo); @@ -655,25 +678,25 @@ impl Step for Rustc { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.krate("rustc-main").default_condition(builder.config.docs) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Rustc { stage: run.builder.top_stage, target: run.target, }); } - /// Generate compiler documentation. + /// Generates compiler documentation. /// /// This will generate all documentation for compiler and dependencies. /// Compiler documentation is distributed separately, so we make sure /// we do not merge it with the other documentation from std, test and /// proc_macros. This is largely just a wrapper around `cargo doc`. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let stage = self.stage; let target = self.target; builder.info(&format!("Documenting stage{} compiler ({})", stage, target)); @@ -731,7 +754,7 @@ impl Step for Rustc { } fn find_compiler_crates( - builder: &Builder, + builder: &Builder<'_>, name: &Interned, crates: &mut HashSet> ) { @@ -757,24 +780,24 @@ impl Step for Rustdoc { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.krate("rustdoc-tool") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Rustdoc { stage: run.builder.top_stage, target: run.target, }); } - /// Generate compiler documentation. + /// Generates compiler documentation. /// /// This will generate all documentation for compiler and dependencies. /// Compiler documentation is distributed separately, so we make sure /// we do not merge it with the other documentation from std, test and /// proc_macros. This is largely just a wrapper around `cargo doc`. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let stage = self.stage; let target = self.target; builder.info(&format!("Documenting stage{} rustdoc ({})", stage, target)); @@ -800,7 +823,7 @@ impl Step for Rustdoc { builder.ensure(Rustc { stage, target }); // Build rustdoc. - builder.ensure(tool::Rustdoc { host: compiler.host }); + builder.ensure(tool::Rustdoc { compiler: compiler }); // Symlink compiler docs to the output directory of rustdoc documentation. let out_dir = builder.stage_out(compiler, Mode::ToolRustc) @@ -840,12 +863,12 @@ impl Step for ErrorIndex { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path("src/tools/error_index_generator").default_condition(builder.config.docs) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(ErrorIndex { target: run.target, }); @@ -853,15 +876,20 @@ impl Step for ErrorIndex { /// Generates the HTML rendered error-index by running the /// `error_index_generator` tool. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let target = self.target; builder.info(&format!("Documenting error index ({})", target)); let out = builder.doc_out(target); t!(fs::create_dir_all(&out)); - let mut index = builder.tool_cmd(Tool::ErrorIndex); + let compiler = builder.compiler(2, builder.config.build); + let mut index = tool::ErrorIndex::command( + builder, + compiler, + ); index.arg("html"); index.arg(out.join("error-index.html")); + index.arg(crate::channel::CFG_RELEASE_NUM); // FIXME: shouldn't have to pass this env var index.env("CFG_BUILD", &builder.config.build) @@ -881,18 +909,18 @@ impl Step for UnstableBookGen { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path("src/tools/unstable-book-gen").default_condition(builder.config.docs) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(UnstableBookGen { target: run.target, }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let target = self.target; builder.ensure(compile::Std { diff --git a/src/bootstrap/flags.rs b/src/bootstrap/flags.rs index 14e2f69432dac..a1f89d6c86f1d 100644 --- a/src/bootstrap/flags.rs +++ b/src/bootstrap/flags.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Command-line interface of the rustbuild build system. //! //! This module implements the command-line parsing of the build system which @@ -66,6 +56,7 @@ pub enum Subcommand { rustc_args: Vec, fail_fast: bool, doc_tests: DocTests, + rustfix_coverage: bool, }, Bench { paths: Vec, @@ -198,6 +189,12 @@ To learn more about a subcommand, run `./x.py -h`" "mode describing what file the actual ui output will be compared to", "COMPARE MODE", ); + opts.optflag( + "", + "rustfix-coverage", + "enable this to generate a Rustfix coverage file, which is saved in \ + `//rustfix_missing_coverage.txt`", + ); } "bench" => { opts.optmulti("", "test-args", "extra arguments", "ARGS"); @@ -373,6 +370,7 @@ Arguments: test_args: matches.opt_strs("test-args"), rustc_args: matches.opt_strs("rustc-args"), fail_fast: !matches.opt_present("no-fail-fast"), + rustfix_coverage: matches.opt_present("rustfix-coverage"), doc_tests: if matches.opt_present("doc") { DocTests::Only } else if matches.opt_present("no-doc") { @@ -477,6 +475,13 @@ impl Subcommand { } } + pub fn rustfix_coverage(&self) -> bool { + match *self { + Subcommand::Test { rustfix_coverage, .. } => rustfix_coverage, + _ => false, + } + } + pub fn compare_mode(&self) -> Option<&str> { match *self { Subcommand::Test { diff --git a/src/bootstrap/install.rs b/src/bootstrap/install.rs index aebcfb4519543..2d040d60e5fd7 100644 --- a/src/bootstrap/install.rs +++ b/src/bootstrap/install.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Implementation of the install aspects of the compiler. //! //! This module is responsible for installing the standard library, @@ -24,42 +14,45 @@ use crate::builder::{Builder, RunConfig, ShouldRun, Step}; use crate::cache::Interned; use crate::config::Config; -pub fn install_docs(builder: &Builder, stage: u32, host: Interned) { +pub fn install_docs(builder: &Builder<'_>, stage: u32, host: Interned) { install_sh(builder, "docs", "rust-docs", stage, Some(host)); } -pub fn install_std(builder: &Builder, stage: u32, target: Interned) { +pub fn install_std(builder: &Builder<'_>, stage: u32, target: Interned) { install_sh(builder, "std", "rust-std", stage, Some(target)); } -pub fn install_cargo(builder: &Builder, stage: u32, host: Interned) { +pub fn install_cargo(builder: &Builder<'_>, stage: u32, host: Interned) { install_sh(builder, "cargo", "cargo", stage, Some(host)); } -pub fn install_rls(builder: &Builder, stage: u32, host: Interned) { +pub fn install_rls(builder: &Builder<'_>, stage: u32, host: Interned) { install_sh(builder, "rls", "rls", stage, Some(host)); } -pub fn install_clippy(builder: &Builder, stage: u32, host: Interned) { +pub fn install_clippy(builder: &Builder<'_>, stage: u32, host: Interned) { install_sh(builder, "clippy", "clippy", stage, Some(host)); } +pub fn install_miri(builder: &Builder<'_>, stage: u32, host: Interned) { + install_sh(builder, "miri", "miri", stage, Some(host)); +} -pub fn install_rustfmt(builder: &Builder, stage: u32, host: Interned) { +pub fn install_rustfmt(builder: &Builder<'_>, stage: u32, host: Interned) { install_sh(builder, "rustfmt", "rustfmt", stage, Some(host)); } -pub fn install_analysis(builder: &Builder, stage: u32, host: Interned) { +pub fn install_analysis(builder: &Builder<'_>, stage: u32, host: Interned) { install_sh(builder, "analysis", "rust-analysis", stage, Some(host)); } -pub fn install_src(builder: &Builder, stage: u32) { +pub fn install_src(builder: &Builder<'_>, stage: u32) { install_sh(builder, "src", "rust-src", stage, None); } -pub fn install_rustc(builder: &Builder, stage: u32, host: Interned) { +pub fn install_rustc(builder: &Builder<'_>, stage: u32, host: Interned) { install_sh(builder, "rustc", "rustc", stage, Some(host)); } fn install_sh( - builder: &Builder, + builder: &Builder<'_>, package: &str, name: &str, stage: u32, @@ -162,7 +155,7 @@ macro_rules! install { } #[allow(dead_code)] - fn should_install(builder: &Builder) -> bool { + fn should_install(builder: &Builder<'_>) -> bool { builder.config.tools.as_ref().map_or(false, |t| t.contains($path)) } } @@ -173,12 +166,12 @@ macro_rules! install { const ONLY_HOSTS: bool = $only_hosts; $(const $c: bool = true;)* - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let $_config = &run.builder.config; run.path($path).default_condition($default_cond) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure($name { stage: run.builder.top_stage, target: run.target, @@ -186,7 +179,7 @@ macro_rules! install { }); } - fn run($sel, $builder: &Builder) { + fn run($sel, $builder: &Builder<'_>) { $run_item } })+ @@ -227,6 +220,14 @@ install!((self, builder, _config), builder.info(&format!("skipping Install clippy stage{} ({})", self.stage, self.target)); } }; + Miri, "miri", Self::should_build(_config), only_hosts: true, { + if builder.ensure(dist::Miri { stage: self.stage, target: self.target }).is_some() || + Self::should_install(builder) { + install_miri(builder, self.stage, self.target); + } else { + builder.info(&format!("skipping Install miri stage{} ({})", self.stage, self.target)); + } + }; Rustfmt, "rustfmt", Self::should_build(_config), only_hosts: true, { if builder.ensure(dist::Rustfmt { stage: self.stage, target: self.target }).is_some() || Self::should_install(builder) { @@ -261,20 +262,20 @@ impl Step for Src { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let config = &run.builder.config; let cond = config.extended && config.tools.as_ref().map_or(true, |t| t.contains("src")); run.path("src").default_condition(cond) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Src { stage: run.builder.top_stage, }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { builder.ensure(dist::Src); install_src(builder, self.stage); } diff --git a/src/bootstrap/job.rs b/src/bootstrap/job.rs index a9da2c491da53..df492e0fdfd1c 100644 --- a/src/bootstrap/job.rs +++ b/src/bootstrap/job.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Job management on Windows for bootstrapping //! //! Most of the time when you're running a build system (e.g., make) you expect diff --git a/src/bootstrap/lib.rs b/src/bootstrap/lib.rs index c5b8f19eee6fb..bcd28e9cf5e70 100644 --- a/src/bootstrap/lib.rs +++ b/src/bootstrap/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Implementation of rustbuild, the Rust build system. //! //! This module, and its descendants, are the implementation of the Rust build @@ -79,7 +69,7 @@ //! ## Copying stage0 {std,test,rustc} //! //! This copies the build output from Cargo into -//! `build/$HOST/stage0-sysroot/lib/rustlib/$ARCH/lib`. FIXME: This step's +//! `build/$HOST/stage0-sysroot/lib/rustlib/$ARCH/lib`. FIXME: this step's //! documentation should be expanded -- the information already here may be //! incorrect. //! @@ -113,7 +103,7 @@ //! More documentation can be found in each respective module below, and you can //! also check out the `src/bootstrap/README.md` file for more information. -#![deny(bare_trait_objects)] +#![deny(rust_2018_idioms)] #![deny(warnings)] #![feature(core_intrinsics)] #![feature(drain_filter)] @@ -124,28 +114,16 @@ extern crate build_helper; extern crate serde_derive; #[macro_use] extern crate lazy_static; -extern crate serde_json; -extern crate cmake; -extern crate filetime; -extern crate cc; -extern crate getopts; -extern crate num_cpus; -extern crate toml; -extern crate time; -extern crate petgraph; #[cfg(test)] #[macro_use] extern crate pretty_assertions; -#[cfg(unix)] -extern crate libc; - use std::cell::{RefCell, Cell}; use std::collections::{HashSet, HashMap}; use std::env; use std::fs::{self, OpenOptions, File}; -use std::io::{self, Seek, SeekFrom, Write, Read}; +use std::io::{Seek, SeekFrom, Write, Read}; use std::path::{PathBuf, Path}; use std::process::{self, Command}; use std::slice; @@ -186,8 +164,6 @@ mod job; #[cfg(all(unix, not(target_os = "haiku")))] mod job { - use libc; - pub unsafe fn setup(build: &mut crate::Build) { if build.config.low_priority { libc::setpriority(libc::PRIO_PGRP as _, 0, 10); @@ -214,6 +190,7 @@ const LLVM_TOOLS: &[&str] = &[ "llvm-readobj", // used to get information from ELFs/objects that the other tools don't provide "llvm-size", // used to prints the size of the linker sections of a program "llvm-strip", // used to discard symbols from binary files to reduce their size + "llvm-ar" // used for creating and modifying archive files ]; /// A structure representing a Rust compiler. @@ -263,7 +240,10 @@ pub struct Build { cargo_info: channel::GitInfo, rls_info: channel::GitInfo, clippy_info: channel::GitInfo, + miri_info: channel::GitInfo, rustfmt_info: channel::GitInfo, + in_tree_llvm_info: channel::GitInfo, + emscripten_llvm_info: channel::GitInfo, local_rebuild: bool, fail_fast: bool, doc_tests: DocTests, @@ -380,11 +360,18 @@ impl Build { } None => false, }; - let rust_info = channel::GitInfo::new(&config, &src); - let cargo_info = channel::GitInfo::new(&config, &src.join("src/tools/cargo")); - let rls_info = channel::GitInfo::new(&config, &src.join("src/tools/rls")); - let clippy_info = channel::GitInfo::new(&config, &src.join("src/tools/clippy")); - let rustfmt_info = channel::GitInfo::new(&config, &src.join("src/tools/rustfmt")); + + let ignore_git = config.ignore_git; + let rust_info = channel::GitInfo::new(ignore_git, &src); + let cargo_info = channel::GitInfo::new(ignore_git, &src.join("src/tools/cargo")); + let rls_info = channel::GitInfo::new(ignore_git, &src.join("src/tools/rls")); + let clippy_info = channel::GitInfo::new(ignore_git, &src.join("src/tools/clippy")); + let miri_info = channel::GitInfo::new(ignore_git, &src.join("src/tools/miri")); + let rustfmt_info = channel::GitInfo::new(ignore_git, &src.join("src/tools/rustfmt")); + + // we always try to use git for LLVM builds + let in_tree_llvm_info = channel::GitInfo::new(false, &src.join("src/llvm-project")); + let emscripten_llvm_info = channel::GitInfo::new(false, &src.join("src/llvm-emscripten")); let mut build = Build { initial_rustc: config.initial_rustc.clone(), @@ -406,7 +393,10 @@ impl Build { cargo_info, rls_info, clippy_info, + miri_info, rustfmt_info, + in_tree_llvm_info, + emscripten_llvm_info, cc: HashMap::new(), cxx: HashMap::new(), ar: HashMap::new(), @@ -430,7 +420,7 @@ impl Build { Command::new(&build.initial_rustc).arg("--version").arg("--verbose")); let local_release = local_version_verbose .lines().filter(|x| x.starts_with("release:")) - .next().unwrap().trim_left_matches("release:").trim(); + .next().unwrap().trim_start_matches("release:").trim(); let my_version = channel::CFG_RELEASE_NUM; if local_release.split('.').take(2).eq(my_version.split('.').take(2)) { build.verbose(&format!("auto-detected local-rebuild {}", local_release)); @@ -511,11 +501,14 @@ impl Build { cleared } - /// Get the space-separated set of activated features for the standard + /// Gets the space-separated set of activated features for the standard /// library. fn std_features(&self) -> String { let mut features = "panic-unwind".to_string(); + if self.config.llvm_libunwind { + features.push_str(" llvm-libunwind"); + } if self.config.backtrace { features.push_str(" backtrace"); } @@ -528,7 +521,7 @@ impl Build { features } - /// Get the space-separated set of activated features for the compiler. + /// Gets the space-separated set of activated features for the compiler. fn rustc_features(&self) -> String { let mut features = String::new(); if self.config.jemalloc { @@ -616,7 +609,7 @@ impl Build { self.out.join(&*target).join("crate-docs") } - /// Returns true if no custom `llvm-config` is set for the specified target. + /// Returns `true` if no custom `llvm-config` is set for the specified target. /// /// If no custom `llvm-config` was specified then Rust's llvm will be used. fn is_rust_llvm(&self, target: Interned) -> bool { @@ -741,6 +734,17 @@ impl Build { } } + pub fn is_verbose_than(&self, level: usize) -> bool { + self.verbosity > level + } + + /// Prints a message if this build is configured in more verbose mode than `level`. + fn verbose_than(&self, level: usize, msg: &str) { + if self.is_verbose_than(level) { + println!("{}", msg); + } + } + fn info(&self, msg: &str) { if self.config.dry_run { return; } println!("{}", msg); @@ -838,6 +842,7 @@ impl Build { !target.contains("msvc") && !target.contains("emscripten") && !target.contains("wasm32") && + !target.contains("nvptx") && !target.contains("fuchsia") { Some(self.cc(target)) } else { @@ -863,13 +868,20 @@ impl Build { .map(|p| &**p) } - /// Returns true if this is a no-std `target`, if defined + /// Returns the sysroot for the wasi target, if defined + fn wasi_root(&self, target: Interned) -> Option<&Path> { + self.config.target_config.get(&target) + .and_then(|t| t.wasi_root.as_ref()) + .map(|p| &**p) + } + + /// Returns `true` if this is a no-std `target`, if defined fn no_std(&self, target: Interned) -> Option { self.config.target_config.get(&target) .map(|t| t.no_std) } - /// Returns whether the target will be tested using the `remote-test-client` + /// Returns `true` if the target will be tested using the `remote-test-client` /// and `remote-test-server` binaries. fn remote_tested(&self, target: Interned) -> bool { self.qemu_rootfs(target).is_some() || target.contains("android") || @@ -1026,6 +1038,11 @@ impl Build { self.package_vers(&self.release_num("clippy")) } + /// Returns the value of `package_vers` above for miri + fn miri_package_vers(&self) -> String { + self.package_vers(&self.release_num("miri")) + } + /// Returns the value of `package_vers` above for rustfmt fn rustfmt_package_vers(&self) -> String { self.package_vers(&self.release_num("rustfmt")) @@ -1060,7 +1077,7 @@ impl Build { self.rust_info.version(self, channel::CFG_RELEASE_NUM) } - /// Return the full commit hash + /// Returns the full commit hash. fn rust_sha(&self) -> Option<&str> { self.rust_info.sha() } @@ -1080,7 +1097,7 @@ impl Build { panic!("failed to find version in {}'s Cargo.toml", package) } - /// Returns whether unstable features should be enabled for the compiler + /// Returns `true` if unstable features should be enabled for the compiler /// we're building. fn unstable_features(&self) -> bool { match &self.config.channel[..] { @@ -1144,7 +1161,7 @@ impl Build { ret } - fn read_stamp_file(&self, stamp: &Path) -> Vec { + fn read_stamp_file(&self, stamp: &Path) -> Vec<(PathBuf, bool)> { if self.config.dry_run { return Vec::new(); } @@ -1157,8 +1174,9 @@ impl Build { if part.is_empty() { continue } - let path = PathBuf::from(t!(str::from_utf8(part))); - paths.push(path); + let host = part[0] as char == 'h'; + let path = PathBuf::from(t!(str::from_utf8(&part[1..]))); + paths.push((path, host)); } paths } @@ -1166,6 +1184,7 @@ impl Build { /// Copies a file from `src` to `dst` pub fn copy(&self, src: &Path, dst: &Path) { if self.config.dry_run { return; } + self.verbose_than(1, &format!("Copy {:?} to {:?}", src, dst)); let _ = fs::remove_file(&dst); let metadata = t!(src.symlink_metadata()); if metadata.file_type().is_symlink() { @@ -1259,15 +1278,22 @@ impl Build { fn install(&self, src: &Path, dstdir: &Path, perms: u32) { if self.config.dry_run { return; } let dst = dstdir.join(src.file_name().unwrap()); + self.verbose_than(1, &format!("Install {:?} to {:?}", src, dst)); t!(fs::create_dir_all(dstdir)); drop(fs::remove_file(&dst)); { if !src.exists() { panic!("Error: File \"{}\" not found!", src.display()); } - let mut s = t!(fs::File::open(&src)); - let mut d = t!(fs::File::create(&dst)); - io::copy(&mut s, &mut d).expect("failed to copy"); + let metadata = t!(src.symlink_metadata()); + if let Err(e) = fs::copy(&src, &dst) { + panic!("failed to copy `{}` to `{}`: {}", src.display(), + dst.display(), e) + } + t!(fs::set_permissions(&dst, metadata.permissions())); + let atime = FileTime::from_last_access_time(&metadata); + let mtime = FileTime::from_last_modification_time(&metadata); + t!(filetime::set_file_times(&dst, atime, mtime)); } chmod(&dst, perms); } @@ -1322,7 +1348,7 @@ impl<'a> Compiler { self } - /// Returns whether this is a snapshot compiler for `build`'s configuration + /// Returns `true` if this is a snapshot compiler for `build`'s configuration pub fn is_snapshot(&self, build: &Build) -> bool { self.stage == 0 && self.host == build.build } diff --git a/src/bootstrap/metadata.rs b/src/bootstrap/metadata.rs index bb503e8b8d3d1..7fa377f310b4f 100644 --- a/src/bootstrap/metadata.rs +++ b/src/bootstrap/metadata.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::collections::HashMap; use std::process::Command; use std::path::PathBuf; diff --git a/src/bootstrap/mk/Makefile.in b/src/bootstrap/mk/Makefile.in index 862fbbf1f286b..07be27c2f5a02 100644 --- a/src/bootstrap/mk/Makefile.in +++ b/src/bootstrap/mk/Makefile.in @@ -1,13 +1,3 @@ -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - ifdef VERBOSE Q := BOOTSTRAP_ARGS := -v @@ -58,12 +48,10 @@ check: $(Q)$(BOOTSTRAP) test $(BOOTSTRAP_ARGS) check-aux: $(Q)$(BOOTSTRAP) test \ - src/test/pretty \ src/test/run-pass/pretty \ src/test/run-fail/pretty \ src/test/run-pass-valgrind/pretty \ src/test/run-pass-fulldeps/pretty \ - src/test/run-fail-fulldeps/pretty \ $(AUX_ARGS) \ $(BOOTSTRAP_ARGS) check-bootstrap: diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index d9f51f6fd3d07..fde40b0d1b407 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Compilation of native dependencies like LLVM. //! //! Native projects like LLVM unfortunately aren't suited just yet for @@ -28,6 +18,7 @@ use build_helper::output; use cmake; use cc; +use crate::channel; use crate::util::{self, exe}; use build_helper::up_to_date; use crate::builder::{Builder, RunConfig, ShouldRun, Step}; @@ -45,11 +36,14 @@ impl Step for Llvm { const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/llvm").path("src/llvm-emscripten") + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { + run.path("src/llvm-project") + .path("src/llvm-project/llvm") + .path("src/llvm") + .path("src/llvm-emscripten") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { let emscripten = run.path.ends_with("llvm-emscripten"); run.builder.ensure(Llvm { target: run.target, @@ -58,7 +52,7 @@ impl Step for Llvm { } /// Compile LLVM for `target`. - fn run(self, builder: &Builder) -> PathBuf { + fn run(self, builder: &Builder<'_>) -> PathBuf { let target = self.target; let emscripten = self.emscripten; @@ -73,30 +67,40 @@ impl Step for Llvm { } } - let rebuild_trigger = builder.src.join("src/rustllvm/llvm-rebuild-trigger"); - let rebuild_trigger_contents = t!(fs::read_to_string(&rebuild_trigger)); - - let (out_dir, llvm_config_ret_dir) = if emscripten { + let (llvm_info, root, out_dir, llvm_config_ret_dir) = if emscripten { + let info = &builder.emscripten_llvm_info; let dir = builder.emscripten_llvm_out(target); let config_dir = dir.join("bin"); - (dir, config_dir) + (info, "src/llvm-emscripten", dir, config_dir) } else { + let info = &builder.in_tree_llvm_info; let mut dir = builder.llvm_out(builder.config.build); if !builder.config.build.contains("msvc") || builder.config.ninja { dir.push("build"); } - (builder.llvm_out(target), dir.join("bin")) + (info, "src/llvm-project/llvm", builder.llvm_out(target), dir.join("bin")) }; - let done_stamp = out_dir.join("llvm-finished-building"); + + if !llvm_info.is_git() { + println!( + "git could not determine the LLVM submodule commit hash. \ + Assuming that an LLVM build is necessary.", + ); + } + let build_llvm_config = llvm_config_ret_dir .join(exe("llvm-config", &*builder.config.build)); - if done_stamp.exists() { - let done_contents = t!(fs::read_to_string(&done_stamp)); + let done_stamp = out_dir.join("llvm-finished-building"); + + if let Some(llvm_commit) = llvm_info.sha() { + if done_stamp.exists() { + let done_contents = t!(fs::read(&done_stamp)); - // If LLVM was already built previously and contents of the rebuild-trigger file - // didn't change from the previous build, then no action is required. - if done_contents == rebuild_trigger_contents { - return build_llvm_config + // If LLVM was already built previously and the submodule's commit didn't change + // from the previous build, then no action is required. + if done_contents == llvm_commit.as_bytes() { + return build_llvm_config + } } } @@ -107,7 +111,6 @@ impl Step for Llvm { t!(fs::create_dir_all(&out_dir)); // http://llvm.org/docs/CMake.html - let root = if self.emscripten { "src/llvm-emscripten" } else { "src/llvm" }; let mut cfg = cmake::Config::new(builder.src.join(root)); let profile = match (builder.config.llvm_optimize, builder.config.llvm_release_debuginfo) { @@ -199,10 +202,10 @@ impl Step for Llvm { } if want_lldb { - cfg.define("LLVM_EXTERNAL_CLANG_SOURCE_DIR", builder.src.join("src/tools/clang")); - cfg.define("LLVM_EXTERNAL_LLDB_SOURCE_DIR", builder.src.join("src/tools/lldb")); + cfg.define("LLVM_ENABLE_PROJECTS", "clang;lldb"); // For the time being, disable code signing. cfg.define("LLDB_CODESIGN_IDENTITY", ""); + cfg.define("LLDB_NO_DEBUGSERVER", "ON"); } else { // LLDB requires libxml2; but otherwise we want it to be disabled. // See https://github.com/rust-lang/rust/pull/50104 @@ -238,14 +241,36 @@ impl Step for Llvm { } if let Some(ref suffix) = builder.config.llvm_version_suffix { - cfg.define("LLVM_VERSION_SUFFIX", suffix); + // Allow version-suffix="" to not define a version suffix at all. + if !suffix.is_empty() { + cfg.define("LLVM_VERSION_SUFFIX", suffix); + } + } else { + let mut default_suffix = format!( + "-rust-{}-{}", + channel::CFG_RELEASE_NUM, + builder.config.channel, + ); + if let Some(sha) = llvm_info.sha_short() { + default_suffix.push_str("-"); + default_suffix.push_str(sha); + } + cfg.define("LLVM_VERSION_SUFFIX", default_suffix); + } + + if let Some(ref linker) = builder.config.llvm_use_linker { + cfg.define("LLVM_USE_LINKER", linker); + } + + if let Some(true) = builder.config.llvm_allow_old_toolchain { + cfg.define("LLVM_TEMPORARILY_ALLOW_OLD_TOOLCHAIN", "YES"); } if let Some(ref python) = builder.config.python { cfg.define("PYTHON_EXECUTABLE", python); } - configure_cmake(builder, target, &mut cfg, false); + configure_cmake(builder, target, &mut cfg); // FIXME: we don't actually need to build all LLVM tools and all LLVM // libraries here, e.g., we just want a few components and a few @@ -258,13 +283,15 @@ impl Step for Llvm { cfg.build(); - t!(fs::write(&done_stamp, &rebuild_trigger_contents)); + if let Some(llvm_commit) = llvm_info.sha() { + t!(fs::write(&done_stamp, llvm_commit)); + } build_llvm_config } } -fn check_llvm_version(builder: &Builder, llvm_config: &Path) { +fn check_llvm_version(builder: &Builder<'_>, llvm_config: &Path) { if !builder.config.llvm_version_check { return } @@ -285,10 +312,9 @@ fn check_llvm_version(builder: &Builder, llvm_config: &Path) { panic!("\n\nbad LLVM version: {}, need >=6.0\n\n", version) } -fn configure_cmake(builder: &Builder, +fn configure_cmake(builder: &Builder<'_>, target: Interned, - cfg: &mut cmake::Config, - building_dist_binaries: bool) { + cfg: &mut cmake::Config) { if builder.config.ninja { cfg.generator("Ninja"); } @@ -357,26 +383,32 @@ fn configure_cmake(builder: &Builder, if builder.config.llvm_clang_cl.is_some() && target.contains("i686") { cfg.env("SCCACHE_EXTRA_ARGS", "-m32"); } - - // If ccache is configured we inform the build a little differently how - // to invoke ccache while also invoking our compilers. - } else if let Some(ref ccache) = builder.config.ccache { - cfg.define("CMAKE_C_COMPILER", ccache) - .define("CMAKE_C_COMPILER_ARG1", sanitize_cc(cc)) - .define("CMAKE_CXX_COMPILER", ccache) - .define("CMAKE_CXX_COMPILER_ARG1", sanitize_cc(cxx)); } else { + // If ccache is configured we inform the build a little differently how + // to invoke ccache while also invoking our compilers. + if let Some(ref ccache) = builder.config.ccache { + cfg.define("CMAKE_C_COMPILER_LAUNCHER", ccache) + .define("CMAKE_CXX_COMPILER_LAUNCHER", ccache); + } cfg.define("CMAKE_C_COMPILER", sanitize_cc(cc)) .define("CMAKE_CXX_COMPILER", sanitize_cc(cxx)); } cfg.build_arg("-j").build_arg(builder.jobs().to_string()); - cfg.define("CMAKE_C_FLAGS", builder.cflags(target, GitRepo::Llvm).join(" ")); + let mut cflags = builder.cflags(target, GitRepo::Llvm).join(" "); + if let Some(ref s) = builder.config.llvm_cxxflags { + cflags.push_str(&format!(" {}", s)); + } + cfg.define("CMAKE_C_FLAGS", cflags); let mut cxxflags = builder.cflags(target, GitRepo::Llvm).join(" "); - if building_dist_binaries { - if builder.config.llvm_static_stdcpp && !target.contains("windows") { - cxxflags.push_str(" -static-libstdc++"); - } + if builder.config.llvm_static_stdcpp && + !target.contains("windows") && + !target.contains("netbsd") + { + cxxflags.push_str(" -static-libstdc++"); + } + if let Some(ref s) = builder.config.llvm_cxxflags { + cxxflags.push_str(&format!(" {}", s)); } cfg.define("CMAKE_CXX_FLAGS", cxxflags); if let Some(ar) = builder.ar(target) { @@ -395,6 +427,12 @@ fn configure_cmake(builder: &Builder, } } + if let Some(ref s) = builder.config.llvm_ldflags { + cfg.define("CMAKE_SHARED_LINKER_FLAGS", s); + cfg.define("CMAKE_MODULE_LINKER_FLAGS", s); + cfg.define("CMAKE_EXE_LINKER_FLAGS", s); + } + if env::var_os("SCCACHE_ERROR_LOG").is_some() { cfg.env("RUST_LOG", "sccache=warn"); } @@ -409,16 +447,16 @@ impl Step for Lld { type Output = PathBuf; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/tools/lld") + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { + run.path("src/llvm-project/lld").path("src/tools/lld") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Lld { target: run.target }); } /// Compile LLVM for `target`. - fn run(self, builder: &Builder) -> PathBuf { + fn run(self, builder: &Builder<'_>) -> PathBuf { if builder.config.dry_run { return PathBuf::from("lld-out-dir-test-gen"); } @@ -440,8 +478,8 @@ impl Step for Lld { let _time = util::timeit(&builder); t!(fs::create_dir_all(&out_dir)); - let mut cfg = cmake::Config::new(builder.src.join("src/tools/lld")); - configure_cmake(builder, target, &mut cfg, true); + let mut cfg = cmake::Config::new(builder.src.join("src/llvm-project/lld")); + configure_cmake(builder, target, &mut cfg); // This is an awful, awful hack. Discovered when we migrated to using // clang-cl to compile LLVM/LLD it turns out that LLD, when built out of @@ -481,17 +519,17 @@ pub struct TestHelpers { impl Step for TestHelpers { type Output = (); - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/test/auxiliary/rust_test_helpers.c") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(TestHelpers { target: run.target }) } /// Compiles the `rust_test_helpers.c` library which we used in various /// `run-pass` test suites for ABI testing. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { if builder.config.dry_run { return; } diff --git a/src/bootstrap/sanity.rs b/src/bootstrap/sanity.rs index f585495b0aa94..fa6857cdc1125 100644 --- a/src/bootstrap/sanity.rs +++ b/src/bootstrap/sanity.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Sanity checking performed by rustbuild before actually executing anything. //! //! This module contains the implementation of ensuring that the build @@ -44,15 +34,17 @@ impl Finder { fn maybe_have>(&mut self, cmd: S) -> Option { let cmd: OsString = cmd.as_ref().into(); - let path = self.path.clone(); + let path = &self.path; self.cache.entry(cmd.clone()).or_insert_with(|| { - for path in env::split_paths(&path) { + for path in env::split_paths(path) { let target = path.join(&cmd); - let mut cmd_alt = cmd.clone(); - cmd_alt.push(".exe"); - if target.is_file() || // some/path/git - target.with_extension("exe").exists() || // some/path/git.exe - target.join(&cmd_alt).exists() { // some/path/git/git.exe + let mut cmd_exe = cmd.clone(); + cmd_exe.push(".exe"); + + if target.is_file() // some/path/git + || path.join(&cmd_exe).exists() // some/path/git.exe + || target.join(&cmd_exe).exists() // some/path/git/git.exe + { return Some(target); } } @@ -117,9 +109,9 @@ pub fn check(build: &mut Build) { } build.config.python = build.config.python.take().map(|p| cmd_finder.must_have(p)) - .or_else(|| env::var_os("BOOTSTRAP_PYTHON").map(PathBuf::from)) // set by bootstrap.py .or_else(|| cmd_finder.maybe_have("python2.7")) .or_else(|| cmd_finder.maybe_have("python2")) + .or_else(|| env::var_os("BOOTSTRAP_PYTHON").map(PathBuf::from)) // set by bootstrap.py .or_else(|| Some(cmd_finder.must_have("python"))); build.config.nodejs = build.config.nodejs.take().map(|p| cmd_finder.must_have(p)) @@ -139,6 +131,11 @@ pub fn check(build: &mut Build) { continue; } + // We don't use a C compiler on wasm32 + if target.contains("wasm32") { + continue; + } + if !build.config.dry_run { cmd_finder.must_have(build.cc(*target)); if let Some(ar) = build.ar(*target) { @@ -166,7 +163,7 @@ pub fn check(build: &mut Build) { panic!("the iOS target is only supported on macOS"); } - if target.contains("-none-") { + if target.contains("-none-") || target.contains("nvptx") { if build.no_std(*target).is_none() { let target = build.config.target_config.entry(target.clone()) .or_default(); @@ -175,7 +172,7 @@ pub fn check(build: &mut Build) { } if build.no_std(*target) == Some(false) { - panic!("All the *-none-* targets are no-std targets") + panic!("All the *-none-* and nvptx* targets are no-std targets") } } diff --git a/src/bootstrap/test.rs b/src/bootstrap/test.rs index 11932d58ceac6..c552f607960b4 100644 --- a/src/bootstrap/test.rs +++ b/src/bootstrap/test.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Implementation of the test-related targets of the build system. //! //! This file implements the various regression test suites that we execute on @@ -40,9 +30,9 @@ const ADB_TEST_DIR: &str = "/data/tmp/work"; /// The two modes of the test runner; tests or benchmarks. #[derive(Debug, PartialEq, Eq, Hash, Copy, Clone, PartialOrd, Ord)] pub enum TestKind { - /// Run `cargo test` + /// Run `cargo test`. Test, - /// Run `cargo bench` + /// Run `cargo bench`. Bench, } @@ -67,7 +57,7 @@ impl TestKind { } impl fmt::Display for TestKind { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(match *self { TestKind::Test => "Testing", TestKind::Bench => "Benchmarking", @@ -75,7 +65,7 @@ impl fmt::Display for TestKind { } } -fn try_run(builder: &Builder, cmd: &mut Command) -> bool { +fn try_run(builder: &Builder<'_>, cmd: &mut Command) -> bool { if !builder.fail_fast { if !builder.try_run(cmd) { let mut failures = builder.delayed_failures.borrow_mut(); @@ -88,7 +78,7 @@ fn try_run(builder: &Builder, cmd: &mut Command) -> bool { true } -fn try_run_quiet(builder: &Builder, cmd: &mut Command) -> bool { +fn try_run_quiet(builder: &Builder<'_>, cmd: &mut Command) -> bool { if !builder.fail_fast { if !builder.try_run_quiet(cmd) { let mut failures = builder.delayed_failures.borrow_mut(); @@ -115,7 +105,7 @@ impl Step for Linkcheck { /// /// This tool in `src/tools` will verify the validity of all our links in the /// documentation to ensure we don't have a bunch of dead ones. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let host = self.host; builder.info(&format!("Linkcheck ({})", host)); @@ -131,13 +121,13 @@ impl Step for Linkcheck { ); } - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path("src/tools/linkchecker") .default_condition(builder.config.docs) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Linkcheck { host: run.target }); } } @@ -152,11 +142,11 @@ impl Step for Cargotest { type Output = (); const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/tools/cargotest") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Cargotest { stage: run.builder.top_stage, host: run.target, @@ -167,7 +157,7 @@ impl Step for Cargotest { /// /// This tool in `src/tools` will check out a few Rust projects and run `cargo /// test` to ensure that we don't regress the test suites there. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = builder.compiler(self.stage, self.host); builder.ensure(compile::Rustc { compiler, @@ -187,7 +177,7 @@ impl Step for Cargotest { cmd.arg(&builder.initial_cargo) .arg(&out_dir) .env("RUSTC", builder.rustc(compiler)) - .env("RUSTDOC", builder.rustdoc(compiler.host)), + .env("RUSTDOC", builder.rustdoc(compiler)), ); } } @@ -202,11 +192,11 @@ impl Step for Cargo { type Output = (); const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/tools/cargo") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Cargo { stage: run.builder.top_stage, host: run.target, @@ -214,7 +204,7 @@ impl Step for Cargo { } /// Runs `cargo test` for `cargo` packaged with Rust. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = builder.compiler(self.stage, self.host); builder.ensure(tool::Cargo { @@ -257,11 +247,11 @@ impl Step for Rls { type Output = (); const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/tools/rls") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Rls { stage: run.builder.top_stage, host: run.target, @@ -269,7 +259,7 @@ impl Step for Rls { } /// Runs `cargo test` for the rls. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let stage = self.stage; let host = self.host; let compiler = builder.compiler(stage, host); @@ -293,13 +283,6 @@ impl Step for Rls { SourceType::Submodule, &[]); - // Copy `src/tools/rls/test_data` to a writable drive. - let test_workspace_path = builder.out.join("rls-test-data"); - let test_data_path = test_workspace_path.join("test_data"); - builder.create_dir(&test_data_path); - builder.cp_r(&builder.src.join("src/tools/rls/test_data"), &test_data_path); - cargo.env("RLS_TEST_WORKSPACE_DIR", test_workspace_path); - builder.add_rustc_lib_path(compiler, &mut cargo); cargo.arg("--") .args(builder.config.cmd.test_args()); @@ -320,11 +303,11 @@ impl Step for Rustfmt { type Output = (); const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/tools/rustfmt") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Rustfmt { stage: run.builder.top_stage, host: run.target, @@ -332,7 +315,7 @@ impl Step for Rustfmt { } /// Runs `cargo test` for rustfmt. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let stage = self.stage; let host = self.host; let compiler = builder.compiler(stage, host); @@ -379,12 +362,12 @@ impl Step for Miri { const ONLY_HOSTS: bool = true; const DEFAULT: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let test_miri = run.builder.config.test_miri; run.path("src/tools/miri").default_condition(test_miri) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Miri { stage: run.builder.top_stage, host: run.target, @@ -392,7 +375,7 @@ impl Step for Miri { } /// Runs `cargo test` for miri. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let stage = self.stage; let host = self.host; let compiler = builder.compiler(stage, host); @@ -431,29 +414,26 @@ impl Step for Miri { #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct CompiletestTest { - stage: u32, host: Interned, } impl Step for CompiletestTest { type Output = (); - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/tools/compiletest") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(CompiletestTest { - stage: run.builder.top_stage, host: run.target, }); } /// Runs `cargo test` for compiletest. - fn run(self, builder: &Builder) { - let stage = self.stage; + fn run(self, builder: &Builder<'_>) { let host = self.host; - let compiler = builder.compiler(stage, host); + let compiler = builder.compiler(0, host); let mut cargo = tool::prepare_tool_cargo(builder, compiler, @@ -479,11 +459,11 @@ impl Step for Clippy { const ONLY_HOSTS: bool = true; const DEFAULT: bool = false; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/tools/clippy") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Clippy { stage: run.builder.top_stage, host: run.target, @@ -491,7 +471,7 @@ impl Step for Clippy { } /// Runs `cargo test` for clippy. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let stage = self.stage; let host = self.host; let compiler = builder.compiler(stage, host); @@ -533,7 +513,7 @@ impl Step for Clippy { } } -fn path_for_cargo(builder: &Builder, compiler: Compiler) -> OsString { +fn path_for_cargo(builder: &Builder<'_>, compiler: Compiler) -> OsString { // Configure PATH to find the right rustc. NB. we have to use PATH // and not RUSTC because the Cargo test suite has tests that will // fail if rustc is not spelled `rustc`. @@ -552,17 +532,17 @@ impl Step for RustdocTheme { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/tools/rustdoc-themes") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { let compiler = run.builder.compiler(run.builder.top_stage, run.host); run.builder.ensure(RustdocTheme { compiler }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let rustdoc = builder.out.join("bootstrap/debug/rustdoc"); let mut cmd = builder.tool_cmd(Tool::RustdocTheme); cmd.arg(rustdoc.to_str().unwrap()) @@ -580,7 +560,7 @@ impl Step for RustdocTheme { builder.sysroot_libdir(self.compiler, self.compiler.host), ) .env("CFG_RELEASE_CHANNEL", &builder.config.channel) - .env("RUSTDOC_REAL", builder.rustdoc(self.compiler.host)) + .env("RUSTDOC_REAL", builder.rustdoc(self.compiler)) .env("RUSTDOC_CRATE_VERSION", builder.rust_version()) .env("RUSTC_BOOTSTRAP", "1"); if let Some(linker) = builder.linker(self.compiler.host) { @@ -591,36 +571,79 @@ impl Step for RustdocTheme { } #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct RustdocJS { +pub struct RustdocJSStd { pub host: Interned, pub target: Interned, } -impl Step for RustdocJS { +impl Step for RustdocJSStd { type Output = (); const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { - run.path("src/test/rustdoc-js") + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { + run.path("src/test/rustdoc-js-std") } - fn make_run(run: RunConfig) { - run.builder.ensure(RustdocJS { + fn make_run(run: RunConfig<'_>) { + run.builder.ensure(RustdocJSStd { host: run.host, target: run.target, }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { if let Some(ref nodejs) = builder.config.nodejs { let mut command = Command::new(nodejs); - command.args(&["src/tools/rustdoc-js/tester.js", &*self.host]); + command.args(&["src/tools/rustdoc-js-std/tester.js", &*self.host]); builder.ensure(crate::doc::Std { target: self.target, stage: builder.top_stage, }); builder.run(&mut command); + } else { + builder.info( + "No nodejs found, skipping \"src/test/rustdoc-js-std\" tests" + ); + } + } +} + +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct RustdocJSNotStd { + pub host: Interned, + pub target: Interned, + pub compiler: Compiler, +} + +impl Step for RustdocJSNotStd { + type Output = (); + const DEFAULT: bool = true; + const ONLY_HOSTS: bool = true; + + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { + run.path("src/test/rustdoc-js") + } + + fn make_run(run: RunConfig<'_>) { + let compiler = run.builder.compiler(run.builder.top_stage, run.host); + run.builder.ensure(RustdocJSNotStd { + host: run.host, + target: run.target, + compiler, + }); + } + + fn run(self, builder: &Builder<'_>) { + if builder.config.nodejs.is_some() { + builder.ensure(Compiletest { + compiler: self.compiler, + target: self.target, + mode: "js-doc-test", + suite: "rustdoc-js", + path: None, + compare_mode: None, + }); } else { builder.info( "No nodejs found, skipping \"src/test/rustdoc-js\" tests" @@ -641,11 +664,11 @@ impl Step for RustdocUi { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/test/rustdoc-ui") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { let compiler = run.builder.compiler(run.builder.top_stage, run.host); run.builder.ensure(RustdocUi { host: run.host, @@ -654,7 +677,7 @@ impl Step for RustdocUi { }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { builder.ensure(Compiletest { compiler: self.compiler, target: self.target, @@ -679,7 +702,7 @@ impl Step for Tidy { /// This tool in `src/tools` checks up on various bits and pieces of style and /// otherwise just implements a few lint-like checks that are specific to the /// compiler itself. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let mut cmd = builder.tool_cmd(Tool::Tidy); cmd.arg(builder.src.join("src")); cmd.arg(&builder.initial_cargo); @@ -695,16 +718,16 @@ impl Step for Tidy { try_run(builder, &mut cmd); } - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/tools/tidy") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Tidy); } } -fn testdir(builder: &Builder, host: Interned) -> PathBuf { +fn testdir(builder: &Builder<'_>, host: Interned) -> PathBuf { builder.out.join(host).join("test") } @@ -764,11 +787,11 @@ macro_rules! test_definitions { const DEFAULT: bool = $default; const ONLY_HOSTS: bool = $host; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.suite_path($path) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { let compiler = run.builder.compiler(run.builder.top_stage, run.host); run.builder.ensure($name { @@ -777,7 +800,7 @@ macro_rules! test_definitions { }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { builder.ensure(Compiletest { compiler: self.compiler, target: self.target, @@ -865,24 +888,16 @@ host_test!(RunPassFullDeps { suite: "run-pass-fulldeps" }); -host_test!(RunFailFullDeps { - path: "src/test/run-fail-fulldeps", - mode: "run-fail", - suite: "run-fail-fulldeps" -}); - host_test!(Rustdoc { path: "src/test/rustdoc", mode: "rustdoc", suite: "rustdoc" }); -test!(Pretty { +host_test!(Pretty { path: "src/test/pretty", mode: "pretty", - suite: "pretty", - default: false, - host: true + suite: "pretty" }); test!(RunPassPretty { path: "src/test/run-pass/pretty", @@ -905,20 +920,6 @@ test!(RunPassValgrindPretty { default: false, host: true }); -test!(RunPassFullDepsPretty { - path: "src/test/run-pass-fulldeps/pretty", - mode: "pretty", - suite: "run-pass-fulldeps", - default: false, - host: true -}); -test!(RunFailFullDepsPretty { - path: "src/test/run-fail-fulldeps/pretty", - mode: "pretty", - suite: "run-fail-fulldeps", - default: false, - host: true -}); default_test!(RunMake { path: "src/test/run-make", @@ -932,6 +933,12 @@ host_test!(RunMakeFullDeps { suite: "run-make-fulldeps" }); +default_test!(Assembly { + path: "src/test/assembly", + mode: "assembly", + suite: "assembly" +}); + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] struct Compiletest { compiler: Compiler, @@ -945,7 +952,7 @@ struct Compiletest { impl Step for Compiletest { type Output = (); - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.never() } @@ -954,7 +961,7 @@ impl Step for Compiletest { /// Compiles all tests with `compiler` for `target` with the specified /// compiletest `mode` and `suite` arguments. For example `mode` can be /// "run-pass" or `suite` can be something like `debuginfo`. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = self.compiler; let target = self.target; let mode = self.mode; @@ -987,11 +994,7 @@ impl Step for Compiletest { }); } - if suite.ends_with("fulldeps") || - // FIXME: Does pretty need librustc compiled? Note that there are - // fulldeps test suites with mode = pretty as well. - mode == "pretty" - { + if suite.ends_with("fulldeps") { builder.ensure(compile::Rustc { compiler, target }); } @@ -1013,7 +1016,10 @@ impl Step for Compiletest { // Also provide `rust_test_helpers` for the host. builder.ensure(native::TestHelpers { target: compiler.host }); - builder.ensure(native::TestHelpers { target }); + // wasm32 can't build the test helpers + if !target.contains("wasm32") { + builder.ensure(native::TestHelpers { target }); + } builder.ensure(RemoteCopyLibs { compiler, target }); let mut cmd = builder.tool_cmd(Tool::Compiletest); @@ -1027,15 +1033,16 @@ impl Step for Compiletest { .arg(builder.sysroot_libdir(compiler, target)); cmd.arg("--rustc-path").arg(builder.rustc(compiler)); - let is_rustdoc_ui = suite.ends_with("rustdoc-ui"); + let is_rustdoc = suite.ends_with("rustdoc-ui") || suite.ends_with("rustdoc-js"); // Avoid depending on rustdoc when we don't need it. if mode == "rustdoc" || (mode == "run-make" && suite.ends_with("fulldeps")) - || (mode == "ui" && is_rustdoc_ui) + || (mode == "ui" && is_rustdoc) + || mode == "js-doc-test" { cmd.arg("--rustdoc-path") - .arg(builder.rustdoc(compiler.host)); + .arg(builder.rustdoc(compiler)); } cmd.arg("--src-base") @@ -1066,12 +1073,12 @@ impl Step for Compiletest { cmd.arg("--nodejs").arg(nodejs); } - let mut flags = if is_rustdoc_ui { + let mut flags = if is_rustdoc { Vec::new() } else { vec!["-Crpath".to_string()] }; - if !is_rustdoc_ui { + if !is_rustdoc { if builder.config.rust_optimize_tests { flags.push("-O".to_string()); } @@ -1125,9 +1132,7 @@ impl Step for Compiletest { }; let lldb_exe = if builder.config.lldb_enabled && !target.contains("emscripten") { // Test against the lldb that was just built. - builder.llvm_out(target) - .join("bin") - .join("lldb") + builder.llvm_out(target).join("bin").join("lldb") } else { PathBuf::from("lldb") }; @@ -1144,6 +1149,26 @@ impl Step for Compiletest { } } + if let Some(var) = env::var_os("RUSTBUILD_FORCE_CLANG_BASED_TESTS") { + match &var.to_string_lossy().to_lowercase()[..] { + "1" | "yes" | "on" => { + assert!(builder.config.lldb_enabled, + "RUSTBUILD_FORCE_CLANG_BASED_TESTS needs Clang/LLDB to \ + be built."); + let clang_exe = builder.llvm_out(target).join("bin").join("clang"); + cmd.arg("--run-clang-based-tests-with").arg(clang_exe); + } + "0" | "no" | "off" => { + // Nothing to do. + } + other => { + // Let's make sure typos don't get unnoticed + panic!("Unrecognized option '{}' set in \ + RUSTBUILD_FORCE_CLANG_BASED_TESTS", other); + } + } + } + // Get paths from cmd args let paths = match &builder.config.cmd { Subcommand::Test { ref paths, .. } => &paths[..], @@ -1175,7 +1200,7 @@ impl Step for Compiletest { cmd.arg("--quiet"); } - if builder.config.llvm_enabled { + if builder.config.llvm_enabled() { let llvm_config = builder.ensure(native::Llvm { target: builder.config.build, emscripten: false, @@ -1208,12 +1233,6 @@ impl Step for Compiletest { } } } - if suite == "run-make-fulldeps" && !builder.config.llvm_enabled { - builder.info( - "Ignoring run-make test suite as they generally don't work without LLVM" - ); - return; - } if suite != "run-make-fulldeps" { cmd.arg("--cc") @@ -1268,6 +1287,10 @@ impl Step for Compiletest { cmd.arg("--android-cross-path").arg(""); } + if builder.config.cmd.rustfix_coverage() { + cmd.arg("--rustfix-coverage"); + } + builder.ci_env.force_coloring_in_ci(&mut cmd); let _folder = builder.fold_output(|| format!("test_{}", suite)); @@ -1303,16 +1326,16 @@ impl Step for DocTest { type Output = (); const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.never() } - /// Run `rustdoc --test` for all documentation in `src/doc`. + /// Runs `rustdoc --test` for all documentation in `src/doc`. /// /// This will run all tests in our markdown documentation (e.g., the book) /// located in `src/doc`. The `rustdoc` that's run is the one that sits next to /// `compiler`. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = self.compiler; builder.ensure(compile::Test { @@ -1373,17 +1396,17 @@ macro_rules! test_book { const DEFAULT: bool = $default; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path($path) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure($name { compiler: run.builder.compiler(run.builder.top_stage, run.host), }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { builder.ensure(DocTest { compiler: self.compiler, path: $path, @@ -1402,8 +1425,10 @@ test_book!( RustdocBook, "src/doc/rustdoc", "rustdoc", default=true; RustcBook, "src/doc/rustc", "rustc", default=true; RustByExample, "src/doc/rust-by-example", "rust-by-example", default=false; + EmbeddedBook, "src/doc/embedded-book", "embedded-book", default=false; TheBook, "src/doc/book", "book", default=false; UnstableBook, "src/doc/unstable-book", "unstable-book", default=true; + EditionGuide, "src/doc/edition-guide", "edition-guide", default=false; ); #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] @@ -1416,23 +1441,23 @@ impl Step for ErrorIndex { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/tools/error_index_generator") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(ErrorIndex { compiler: run.builder.compiler(run.builder.top_stage, run.host), }); } - /// Run the error index generator tool to execute the tests located in the error + /// Runs the error index generator tool to execute the tests located in the error /// index. /// /// The `error_index_generator` tool lives in `src/tools` and is used to /// generate a markdown file from the error indexes of the code base which is /// then passed to `rustdoc --test`. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = self.compiler; builder.ensure(compile::Std { @@ -1444,7 +1469,10 @@ impl Step for ErrorIndex { t!(fs::create_dir_all(&dir)); let output = dir.join("error-index.md"); - let mut tool = builder.tool_cmd(Tool::ErrorIndex); + let mut tool = tool::ErrorIndex::command( + builder, + builder.compiler(compiler.stage, builder.config.build), + ); tool.arg("markdown") .arg(&output) .env("CFG_BUILD", &builder.config.build) @@ -1458,7 +1486,7 @@ impl Step for ErrorIndex { } } -fn markdown_test(builder: &Builder, compiler: Compiler, markdown: &Path) -> bool { +fn markdown_test(builder: &Builder<'_>, compiler: Compiler, markdown: &Path) -> bool { match fs::read_to_string(markdown) { Ok(contents) => { if !contents.contains("```") { @@ -1469,7 +1497,7 @@ fn markdown_test(builder: &Builder, compiler: Compiler, markdown: &Path) -> bool } builder.info(&format!("doc tests for: {}", markdown.display())); - let mut cmd = builder.rustdoc_cmd(compiler.host); + let mut cmd = builder.rustdoc_cmd(compiler); builder.add_rust_test_threads(&mut cmd); cmd.arg("--test"); cmd.arg(markdown); @@ -1498,11 +1526,11 @@ impl Step for CrateLibrustc { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.krate("rustc-main") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { let builder = run.builder; let compiler = builder.compiler(builder.top_stage, run.host); @@ -1520,7 +1548,7 @@ impl Step for CrateLibrustc { } } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { builder.ensure(Crate { compiler: self.compiler, target: self.target, @@ -1542,14 +1570,14 @@ pub struct CrateNotDefault { impl Step for CrateNotDefault { type Output = (); - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/librustc_asan") .path("src/librustc_lsan") .path("src/librustc_msan") .path("src/librustc_tsan") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { let builder = run.builder; let compiler = builder.compiler(builder.top_stage, run.host); @@ -1569,7 +1597,7 @@ impl Step for CrateNotDefault { }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { builder.ensure(Crate { compiler: self.compiler, target: self.target, @@ -1593,7 +1621,7 @@ impl Step for Crate { type Output = (); const DEFAULT: bool = true; - fn should_run(mut run: ShouldRun) -> ShouldRun { + fn should_run(mut run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run = run.krate("test"); for krate in run.builder.in_tree_crates("std") { @@ -1604,7 +1632,7 @@ impl Step for Crate { run } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { let builder = run.builder; let compiler = builder.compiler(builder.top_stage, run.host); @@ -1632,7 +1660,7 @@ impl Step for Crate { } } - /// Run all unit tests plus documentation tests for a given crate defined + /// Runs all unit tests plus documentation tests for a given crate defined /// by a `Cargo.toml` (single manifest) /// /// This is what runs tests for crates like the standard library, compiler, etc. @@ -1640,7 +1668,7 @@ impl Step for Crate { /// /// Currently this runs all tests for a DAG by passing a bunch of `-p foo` /// arguments, and those arguments are discovered from `cargo metadata`. - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = self.compiler; let target = self.target; let mode = self.mode; @@ -1781,11 +1809,11 @@ impl Step for CrateRustdoc { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.paths(&["src/librustdoc", "src/tools/rustdoc"]) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { let builder = run.builder; let test_kind = builder.kind.into(); @@ -1796,7 +1824,7 @@ impl Step for CrateRustdoc { }); } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let test_kind = self.test_kind; let compiler = builder.compiler(builder.top_stage, self.host); @@ -1851,7 +1879,7 @@ fn envify(s: &str) -> String { /// the standard library and such to the emulator ahead of time. This step /// represents this and is a dependency of all test suites. /// -/// Most of the time this is a noop. For some steps such as shipping data to +/// Most of the time this is a no-op. For some steps such as shipping data to /// QEMU we have to build our own tools so we've got conditional dependencies /// on those programs as well. Note that the remote test client is built for /// the build target (us) and the server is built for the target. @@ -1864,11 +1892,11 @@ pub struct RemoteCopyLibs { impl Step for RemoteCopyLibs { type Output = (); - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.never() } - fn run(self, builder: &Builder) { + fn run(self, builder: &Builder<'_>) { let compiler = self.compiler; let target = self.target; if !builder.remote_tested(target) { @@ -1914,16 +1942,16 @@ pub struct Distcheck; impl Step for Distcheck { type Output = (); - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("distcheck") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Distcheck); } - /// Run "distcheck", a 'make check' from a tarball - fn run(self, builder: &Builder) { + /// Runs "distcheck", a 'make check' from a tarball + fn run(self, builder: &Builder<'_>) { builder.info("Distcheck"); let dir = builder.out.join("tmp").join("distcheck"); let _ = fs::remove_dir_all(&dir); @@ -1983,8 +2011,8 @@ impl Step for Bootstrap { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - /// Test the build system itself - fn run(self, builder: &Builder) { + /// Tests the build system itself. + fn run(self, builder: &Builder<'_>) { let mut cmd = Command::new(&builder.initial_cargo); cmd.arg("test") .current_dir(builder.src.join("src/bootstrap")) @@ -2008,11 +2036,11 @@ impl Step for Bootstrap { try_run(builder, &mut cmd); } - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/bootstrap") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Bootstrap); } } diff --git a/src/bootstrap/tool.rs b/src/bootstrap/tool.rs index 4335359e11589..23775a91e4ce0 100644 --- a/src/bootstrap/tool.rs +++ b/src/bootstrap/tool.rs @@ -1,16 +1,5 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::fs; use std::env; -use std::iter; use std::path::PathBuf; use std::process::{Command, exit}; use std::collections::HashSet; @@ -47,15 +36,15 @@ struct ToolBuild { impl Step for ToolBuild { type Output = Option; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.never() } - /// Build a tool in `src/tools` + /// Builds a tool in `src/tools` /// /// This will build the specified tool with the specified `host` compiler in /// `stage` into the normal cargo output directory. - fn run(self, builder: &Builder) -> Option { + fn run(self, builder: &Builder<'_>) -> Option { let compiler = self.compiler; let target = self.target; let tool = self.tool; @@ -87,12 +76,13 @@ impl Step for ToolBuild { let _folder = builder.fold_output(|| format!("stage{}-{}", compiler.stage, tool)); builder.info(&format!("Building stage{} tool {} ({})", compiler.stage, tool, target)); let mut duplicates = Vec::new(); - let is_expected = compile::stream_cargo(builder, &mut cargo, vec![], &mut |msg| { + let is_expected = compile::stream_cargo(builder, &mut cargo, &mut |msg| { // Only care about big things like the RLS/Cargo for now match tool { | "rls" | "cargo" | "clippy-driver" + | "miri" => {} _ => return, @@ -101,7 +91,8 @@ impl Step for ToolBuild { compile::CargoMessage::CompilerArtifact { package_id, features, - filenames + filenames, + target: _, } => { (package_id, features, filenames) } @@ -150,7 +141,7 @@ impl Step for ToolBuild { }); if is_expected && !duplicates.is_empty() { - println!("duplicate artfacts found when compiling a tool, this \ + println!("duplicate artifacts found when compiling a tool, this \ typically means that something was recompiled because \ a transitive dependency has different features activated \ than in a previous build:\n"); @@ -202,7 +193,7 @@ impl Step for ToolBuild { } pub fn prepare_tool_cargo( - builder: &Builder, + builder: &Builder<'_>, compiler: Compiler, mode: Mode, target: Interned, @@ -228,6 +219,7 @@ pub fn prepare_tool_cargo( if path.ends_with("cargo") || path.ends_with("rls") || path.ends_with("clippy") || + path.ends_with("miri") || path.ends_with("rustfmt") { cargo.env("LIBZ_SYS_STATIC", "1"); @@ -243,7 +235,7 @@ pub fn prepare_tool_cargo( cargo.env("CFG_VERSION", builder.rust_version()); cargo.env("CFG_RELEASE_NUM", channel::CFG_RELEASE_NUM); - let info = GitInfo::new(&builder.config, &dir); + let info = GitInfo::new(builder.config.ignore_git, &dir); if let Some(sha) = info.sha() { cargo.env("CFG_COMMIT_HASH", sha); } @@ -259,9 +251,9 @@ pub fn prepare_tool_cargo( cargo } -macro_rules! tool { +macro_rules! bootstrap_tool { ($( - $name:ident, $path:expr, $tool_name:expr, $mode:expr + $name:ident, $path:expr, $tool_name:expr $(,llvm_tools = $llvm:expr)* $(,is_external_tool = $external:expr)* ; @@ -275,10 +267,7 @@ macro_rules! tool { impl Tool { pub fn get_mode(&self) -> Mode { - let mode = match self { - $(Tool::$name => $mode,)+ - }; - mode + Mode::ToolBootstrap } /// Whether this tool requires LLVM to run @@ -291,27 +280,15 @@ macro_rules! tool { impl<'a> Builder<'a> { pub fn tool_exe(&self, tool: Tool) -> PathBuf { - let stage = self.tool_default_stage(tool); match tool { $(Tool::$name => self.ensure($name { - compiler: self.compiler(stage, self.config.build), + compiler: self.compiler(0, self.config.build), target: self.config.build, }), )+ } } - - pub fn tool_default_stage(&self, tool: Tool) -> u32 { - // Compile the error-index in the same stage as rustdoc to avoid - // recompiling rustdoc twice if we can. Otherwise compile - // everything else in stage0 as there's no need to rebootstrap - // everything. - match tool { - Tool::ErrorIndex if self.top_stage >= 2 => self.top_stage, - _ => 0, - } - } } $( @@ -324,23 +301,24 @@ macro_rules! tool { impl Step for $name { type Output = PathBuf; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path($path) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure($name { - compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), + // snapshot compiler + compiler: run.builder.compiler(0, run.builder.config.build), target: run.target, }); } - fn run(self, builder: &Builder) -> PathBuf { + fn run(self, builder: &Builder<'_>) -> PathBuf { builder.ensure(ToolBuild { compiler: self.compiler, target: self.target, tool: $tool_name, - mode: $mode, + mode: Mode::ToolBootstrap, path: $path, is_optional_tool: false, source_type: if false $(|| $external)* { @@ -356,21 +334,67 @@ macro_rules! tool { } } -tool!( - Rustbook, "src/tools/rustbook", "rustbook", Mode::ToolBootstrap; - ErrorIndex, "src/tools/error_index_generator", "error_index_generator", Mode::ToolRustc; - UnstableBookGen, "src/tools/unstable-book-gen", "unstable-book-gen", Mode::ToolBootstrap; - Tidy, "src/tools/tidy", "tidy", Mode::ToolBootstrap; - Linkchecker, "src/tools/linkchecker", "linkchecker", Mode::ToolBootstrap; - CargoTest, "src/tools/cargotest", "cargotest", Mode::ToolBootstrap; - Compiletest, "src/tools/compiletest", "compiletest", Mode::ToolBootstrap, llvm_tools = true; - BuildManifest, "src/tools/build-manifest", "build-manifest", Mode::ToolBootstrap; - RemoteTestClient, "src/tools/remote-test-client", "remote-test-client", Mode::ToolBootstrap; - RustInstaller, "src/tools/rust-installer", "fabricate", Mode::ToolBootstrap, - is_external_tool = true; - RustdocTheme, "src/tools/rustdoc-themes", "rustdoc-themes", Mode::ToolBootstrap; +bootstrap_tool!( + Rustbook, "src/tools/rustbook", "rustbook"; + UnstableBookGen, "src/tools/unstable-book-gen", "unstable-book-gen"; + Tidy, "src/tools/tidy", "tidy"; + Linkchecker, "src/tools/linkchecker", "linkchecker"; + CargoTest, "src/tools/cargotest", "cargotest"; + Compiletest, "src/tools/compiletest", "compiletest", llvm_tools = true; + BuildManifest, "src/tools/build-manifest", "build-manifest"; + RemoteTestClient, "src/tools/remote-test-client", "remote-test-client"; + RustInstaller, "src/tools/rust-installer", "fabricate", is_external_tool = true; + RustdocTheme, "src/tools/rustdoc-themes", "rustdoc-themes"; ); +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct ErrorIndex { + pub compiler: Compiler, +} + +impl ErrorIndex { + pub fn command(builder: &Builder<'_>, compiler: Compiler) -> Command { + let mut cmd = Command::new(builder.ensure(ErrorIndex { + compiler + })); + add_lib_path( + vec![PathBuf::from(&builder.sysroot_libdir(compiler, compiler.host))], + &mut cmd, + ); + cmd + } +} + +impl Step for ErrorIndex { + type Output = PathBuf; + + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { + run.path("src/tools/error_index_generator") + } + + fn make_run(run: RunConfig<'_>) { + // Compile the error-index in the same stage as rustdoc to avoid + // recompiling rustdoc twice if we can. + let stage = if run.builder.top_stage >= 2 { run.builder.top_stage } else { 0 }; + run.builder.ensure(ErrorIndex { + compiler: run.builder.compiler(stage, run.builder.config.build), + }); + } + + fn run(self, builder: &Builder<'_>) -> PathBuf { + builder.ensure(ToolBuild { + compiler: self.compiler, + target: self.compiler.host, + tool: "error_index_generator", + mode: Mode::ToolRustc, + path: "src/tools/error_index_generator", + is_optional_tool: false, + source_type: SourceType::InTree, + extra_features: Vec::new(), + }).expect("expected to build -- essential tool") + } +} + #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct RemoteTestServer { pub compiler: Compiler, @@ -380,18 +404,18 @@ pub struct RemoteTestServer { impl Step for RemoteTestServer { type Output = PathBuf; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/tools/remote-test-server") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(RemoteTestServer { compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), target: run.target, }); } - fn run(self, builder: &Builder) -> PathBuf { + fn run(self, builder: &Builder<'_>) -> PathBuf { builder.ensure(ToolBuild { compiler: self.compiler, target: self.target, @@ -407,7 +431,9 @@ impl Step for RemoteTestServer { #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct Rustdoc { - pub host: Interned, + /// This should only ever be 0 or 2. + /// We sometimes want to reference the "bootstrap" rustdoc, which is why this option is here. + pub compiler: Compiler, } impl Step for Rustdoc { @@ -415,37 +441,37 @@ impl Step for Rustdoc { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { run.path("src/tools/rustdoc") } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Rustdoc { - host: run.host, + compiler: run.builder.compiler(run.builder.top_stage, run.host), }); } - fn run(self, builder: &Builder) -> PathBuf { - let target_compiler = builder.compiler(builder.top_stage, self.host); + fn run(self, builder: &Builder<'_>) -> PathBuf { + let target_compiler = self.compiler; + if target_compiler.stage == 0 { + if !target_compiler.is_snapshot(builder) { + panic!("rustdoc in stage 0 must be snapshot rustdoc"); + } + return builder.initial_rustc.with_file_name(exe("rustdoc", &target_compiler.host)); + } let target = target_compiler.host; - let build_compiler = if target_compiler.stage == 0 { - builder.compiler(0, builder.config.build) - } else if target_compiler.stage >= 2 { - // Past stage 2, we consider the compiler to be ABI-compatible and hence capable of - // building rustdoc itself. - builder.compiler(target_compiler.stage, builder.config.build) - } else { - // Similar to `compile::Assemble`, build with the previous stage's compiler. Otherwise - // we'd have stageN/bin/rustc and stageN/bin/rustdoc be effectively different stage - // compilers, which isn't what we want. - builder.compiler(target_compiler.stage - 1, builder.config.build) - }; - - builder.ensure(compile::Rustc { compiler: build_compiler, target }); - builder.ensure(compile::Rustc { - compiler: build_compiler, - target: builder.config.build, - }); + // Similar to `compile::Assemble`, build with the previous stage's compiler. Otherwise + // we'd have stageN/bin/rustc and stageN/bin/rustdoc be effectively different stage + // compilers, which isn't what we want. Rustdoc should be linked in the same way as the + // rustc compiler it's paired with, so it must be built with the previous stage compiler. + let build_compiler = builder.compiler(target_compiler.stage - 1, builder.config.build); + + // The presence of `target_compiler` ensures that the necessary libraries (codegen backends, + // compiler libraries, ...) are built. Rustdoc does not require the presence of any + // libraries within sysroot_libdir (i.e., rustlib), though doctests may want it (since + // they'll be linked to those libraries). As such, don't explicitly `ensure` any additional + // libraries here. The intuition here is that If we've built a compiler, we should be able + // to build rustdoc. let mut cargo = prepare_tool_cargo( builder, @@ -499,19 +525,19 @@ impl Step for Cargo { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path("src/tools/cargo").default_condition(builder.config.extended) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure(Cargo { compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), target: run.target, }); } - fn run(self, builder: &Builder) -> PathBuf { + fn run(self, builder: &Builder<'_>) -> PathBuf { // Cargo depends on procedural macros, which requires a full host // compiler to be available, so we need to depend on that. builder.ensure(compile::Rustc { @@ -551,12 +577,12 @@ macro_rules! tool_extended { const DEFAULT: bool = true; const ONLY_HOSTS: bool = true; - fn should_run(run: ShouldRun) -> ShouldRun { + fn should_run(run: ShouldRun<'_>) -> ShouldRun<'_> { let builder = run.builder; run.path($path).default_condition(builder.config.extended) } - fn make_run(run: RunConfig) { + fn make_run(run: RunConfig<'_>) { run.builder.ensure($name { compiler: run.builder.compiler(run.builder.top_stage, run.builder.config.build), target: run.target, @@ -565,7 +591,7 @@ macro_rules! tool_extended { } #[allow(unused_mut)] - fn run(mut $sel, $builder: &Builder) -> Option { + fn run(mut $sel, $builder: &Builder<'_>) -> Option { $extra_deps $builder.ensure(ToolBuild { compiler: $sel.compiler, @@ -602,6 +628,14 @@ tool_extended!((self, builder), }); }; Miri, miri, "src/tools/miri", "miri", {}; + CargoMiri, miri, "src/tools/miri", "cargo-miri", { + // Miri depends on procedural macros (serde), which requires a full host + // compiler to be available, so we need to depend on that. + builder.ensure(compile::Rustc { + compiler: self.compiler, + target: builder.config.build, + }); + }; Rls, rls, "src/tools/rls", "rls", { let clippy = builder.ensure(Clippy { compiler: self.compiler, @@ -622,11 +656,11 @@ tool_extended!((self, builder), ); impl<'a> Builder<'a> { - /// Get a `Command` which is ready to run `tool` in `stage` built for + /// Gets a `Command` which is ready to run `tool` in `stage` built for /// `host`. pub fn tool_cmd(&self, tool: Tool) -> Command { let mut cmd = Command::new(self.tool_exe(tool)); - let compiler = self.compiler(self.tool_default_stage(tool), self.config.build); + let compiler = self.compiler(0, self.config.build); self.prepare_tool_cmd(compiler, tool, &mut cmd); cmd } @@ -638,7 +672,7 @@ impl<'a> Builder<'a> { fn prepare_tool_cmd(&self, compiler: Compiler, tool: Tool, cmd: &mut Command) { let host = &compiler.host; let mut lib_paths: Vec = vec![ - if compiler.stage == 0 && tool != Tool::ErrorIndex { + if compiler.stage == 0 { self.build.rustc_snapshot_libdir() } else { PathBuf::from(&self.sysroot_libdir(compiler, compiler.host)) @@ -666,19 +700,33 @@ impl<'a> Builder<'a> { // Add the llvm/bin directory to PATH since it contains lots of // useful, platform-independent tools - if tool.uses_llvm_tools() { + if tool.uses_llvm_tools() && !self.config.dry_run { + let mut additional_paths = vec![]; + if let Some(llvm_bin_path) = self.llvm_bin_path() { - if host.contains("windows") { - // On Windows, PATH and the dynamic library path are the same, - // so we just add the LLVM bin path to lib_path - lib_paths.push(llvm_bin_path); - } else { - let old_path = env::var_os("PATH").unwrap_or_default(); - let new_path = env::join_paths(iter::once(llvm_bin_path) - .chain(env::split_paths(&old_path))) - .expect("Could not add LLVM bin path to PATH"); - cmd.env("PATH", new_path); - } + additional_paths.push(llvm_bin_path); + } + + // If LLD is available, add that too. + if self.config.lld_enabled { + let lld_install_root = self.ensure(native::Lld { + target: self.config.build, + }); + + let lld_bin_path = lld_install_root.join("bin"); + additional_paths.push(lld_bin_path); + } + + if host.contains("windows") { + // On Windows, PATH and the dynamic library path are the same, + // so we just add the LLVM bin path to lib_path + lib_paths.extend(additional_paths); + } else { + let old_path = env::var_os("PATH").unwrap_or_default(); + let new_path = env::join_paths(additional_paths.into_iter() + .chain(env::split_paths(&old_path))) + .expect("Could not add LLVM bin path to PATH"); + cmd.env("PATH", new_path); } } @@ -686,7 +734,7 @@ impl<'a> Builder<'a> { } fn llvm_bin_path(&self) -> Option { - if self.config.llvm_enabled && !self.config.dry_run { + if self.config.llvm_enabled() { let llvm_config = self.ensure(native::Llvm { target: self.config.build, emscripten: false, diff --git a/src/bootstrap/toolstate.rs b/src/bootstrap/toolstate.rs index f63c1988906fb..8ff7c09fc2996 100644 --- a/src/bootstrap/toolstate.rs +++ b/src/bootstrap/toolstate.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #[derive(Copy, Clone, Debug, Deserialize, Serialize, PartialEq, Eq)] #[serde(rename_all = "kebab-case")] /// Whether a tool can be compiled, tested or neither diff --git a/src/bootstrap/util.rs b/src/bootstrap/util.rs index b18e38e471eff..bda1e56e1e73b 100644 --- a/src/bootstrap/util.rs +++ b/src/bootstrap/util.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Various utility functions used throughout rustbuild. //! //! Simple things like testing the various filesystem operations here and there, @@ -43,7 +33,7 @@ pub fn exe(name: &str, target: &str) -> String { } } -/// Returns whether the file name given looks like a dynamic library. +/// Returns `true` if the file name given looks like a dynamic library. pub fn is_dylib(name: &str) -> bool { name.ends_with(".dylib") || name.ends_with(".so") || name.ends_with(".dll") } @@ -80,7 +70,11 @@ pub fn dylib_path_var() -> &'static str { /// Parses the `dylib_path_var()` environment variable, returning a list of /// paths that are members of this lookup path. pub fn dylib_path() -> Vec { - env::split_paths(&env::var_os(dylib_path_var()).unwrap_or_default()).collect() + let var = match env::var_os(dylib_path_var()) { + Some(v) => v, + None => return vec![], + }; + env::split_paths(&var).collect() } /// `push` all components to `buf`. On windows, append `.exe` to the last component. @@ -101,7 +95,7 @@ pub fn push_exe_path(mut buf: PathBuf, components: &[&str]) -> PathBuf { pub struct TimeIt(bool, Instant); /// Returns an RAII structure that prints out how long it took to drop. -pub fn timeit(builder: &Builder) -> TimeIt { +pub fn timeit(builder: &Builder<'_>) -> TimeIt { TimeIt(builder.config.dry_run, Instant::now()) } diff --git a/src/build_helper/Cargo.toml b/src/build_helper/Cargo.toml index 01d704f816bbc..04c7820b45665 100644 --- a/src/build_helper/Cargo.toml +++ b/src/build_helper/Cargo.toml @@ -2,6 +2,7 @@ name = "build_helper" version = "0.1.0" authors = ["The Rust Project Developers"] +edition = "2018" [lib] name = "build_helper" diff --git a/src/build_helper/lib.rs b/src/build_helper/lib.rs index a580a874b3341..bd99dc118e66a 100644 --- a/src/build_helper/lib.rs +++ b/src/build_helper/lib.rs @@ -1,12 +1,4 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +#![deny(rust_2018_idioms)] use std::fs::File; use std::path::{Path, PathBuf}; @@ -33,6 +25,25 @@ macro_rules! t { }; } +// Because Cargo adds the compiler's dylib path to our library search path, llvm-config may +// break: the dylib path for the compiler, as of this writing, contains a copy of the LLVM +// shared library, which means that when our freshly built llvm-config goes to load it's +// associated LLVM, it actually loads the compiler's LLVM. In particular when building the first +// compiler (i.e., in stage 0) that's a problem, as the compiler's LLVM is likely different from +// the one we want to use. As such, we restore the environment to what bootstrap saw. This isn't +// perfect -- we might actually want to see something from Cargo's added library paths -- but +// for now it works. +pub fn restore_library_path() { + println!("cargo:rerun-if-env-changed=REAL_LIBRARY_PATH_VAR"); + println!("cargo:rerun-if-env-changed=REAL_LIBRARY_PATH"); + let key = env::var_os("REAL_LIBRARY_PATH_VAR").expect("REAL_LIBRARY_PATH_VAR"); + if let Some(env) = env::var_os("REAL_LIBRARY_PATH") { + env::set_var(&key, &env); + } else { + env::remove_var(&key); + } +} + pub fn run(cmd: &mut Command) { println!("running: {:?}", cmd); run_silent(cmd); @@ -152,7 +163,7 @@ pub fn mtime(path: &Path) -> SystemTime { .unwrap_or(UNIX_EPOCH) } -/// Returns whether `dst` is up to date given that the file or files in `src` +/// Returns `true` if `dst` is up to date given that the file or files in `src` /// are used to generate it. /// /// Uses last-modified time checks to verify this. @@ -179,12 +190,12 @@ pub struct NativeLibBoilerplate { } impl NativeLibBoilerplate { - /// On OSX we don't want to ship the exact filename that compiler-rt builds. + /// On macOS we don't want to ship the exact filename that compiler-rt builds. /// This conflicts with the system and ours is likely a wildly different /// version, so they can't be substituted. /// /// As a result, we rename it here but we need to also use - /// `install_name_tool` on OSX to rename the commands listed inside of it to + /// `install_name_tool` on macOS to rename the commands listed inside of it to /// ensure it's linked against correctly. pub fn fixup_sanitizer_lib_name(&self, sanitizer_name: &str) { if env::var("TARGET").unwrap() != "x86_64-apple-darwin" { diff --git a/src/ci/docker/README.md b/src/ci/docker/README.md index 8d4dbc399986f..34320ab4411e2 100644 --- a/src/ci/docker/README.md +++ b/src/ci/docker/README.md @@ -131,13 +131,15 @@ $category > $option = $value -- $comment For targets: `arm-unknown-linux-gnueabi` - Path and misc options > Prefix directory = /x-tools/${CT\_TARGET} +- Path and misc options > Patches origin = Bundled, then local +- Path and misc options > Local patch directory = /tmp/patches - Target options > Target Architecture = arm - Target options > Architecture level = armv6 -- (+) - Target options > Floating point = software (no FPU) -- (\*) - Operating System > Target OS = linux - Operating System > Linux kernel version = 3.2.72 -- Precise kernel -- C-library > glibc version = 2.14.1 -- C compiler > gcc version = 4.9.3 +- C-library > glibc version = 2.16.0 +- C compiler > gcc version = 5.2.0 - C compiler > C++ = ENABLE -- to cross compile LLVM ### `arm-linux-gnueabihf.config` @@ -145,6 +147,8 @@ For targets: `arm-unknown-linux-gnueabi` For targets: `arm-unknown-linux-gnueabihf` - Path and misc options > Prefix directory = /x-tools/${CT\_TARGET} +- Path and misc options > Patches origin = Bundled, then local +- Path and misc options > Local patch directory = /tmp/patches - Target options > Target Architecture = arm - Target options > Architecture level = armv6 -- (+) - Target options > Use specific FPU = vfp -- (+) @@ -152,8 +156,8 @@ For targets: `arm-unknown-linux-gnueabihf` - Target options > Default instruction set mode = arm -- (+) - Operating System > Target OS = linux - Operating System > Linux kernel version = 3.2.72 -- Precise kernel -- C-library > glibc version = 2.14.1 -- C compiler > gcc version = 4.9.3 +- C-library > glibc version = 2.16.0 +- C compiler > gcc version = 5.2.0 - C compiler > C++ = ENABLE -- to cross compile LLVM ### `armv7-linux-gnueabihf.config` @@ -161,6 +165,8 @@ For targets: `arm-unknown-linux-gnueabihf` For targets: `armv7-unknown-linux-gnueabihf` - Path and misc options > Prefix directory = /x-tools/${CT\_TARGET} +- Path and misc options > Patches origin = Bundled, then local +- Path and misc options > Local patch directory = /tmp/patches - Target options > Target Architecture = arm - Target options > Suffix to the arch-part = v7 - Target options > Architecture level = armv7-a -- (+) @@ -169,8 +175,8 @@ For targets: `armv7-unknown-linux-gnueabihf` - Target options > Default instruction set mode = thumb -- (\*) - Operating System > Target OS = linux - Operating System > Linux kernel version = 3.2.72 -- Precise kernel -- C-library > glibc version = 2.14.1 -- C compiler > gcc version = 4.9.3 +- C-library > glibc version = 2.16.0 +- C compiler > gcc version = 5.2.0 - C compiler > C++ = ENABLE -- to cross compile LLVM (\*) These options have been selected to match the configuration of the arm @@ -204,7 +210,7 @@ For targets: `powerpc-unknown-linux-gnu` - Operating System > Target OS = linux - Operating System > Linux kernel version = 2.6.32.68 -- ~RHEL6 kernel - C-library > glibc version = 2.12.2 -- ~RHEL6 glibc -- C compiler > gcc version = 4.9.3 +- C compiler > gcc version = 5.2.0 - C compiler > C++ = ENABLE -- to cross compile LLVM ### `powerpc64-linux-gnu.config` @@ -221,7 +227,7 @@ For targets: `powerpc64-unknown-linux-gnu` - Operating System > Target OS = linux - Operating System > Linux kernel version = 2.6.32.68 -- ~RHEL6 kernel - C-library > glibc version = 2.12.2 -- ~RHEL6 glibc -- C compiler > gcc version = 4.9.3 +- C compiler > gcc version = 5.2.0 - C compiler > C++ = ENABLE -- to cross compile LLVM (+) These CPU options match the configuration of the toolchains in RHEL6. @@ -232,12 +238,12 @@ For targets: `s390x-unknown-linux-gnu` - Path and misc options > Prefix directory = /x-tools/${CT\_TARGET} - Path and misc options > Patches origin = Bundled, then local -- Path and misc options > Local patch directory = /build/patches +- Path and misc options > Local patch directory = /tmp/patches - Target options > Target Architecture = s390 - Target options > Bitness = 64-bit - Operating System > Target OS = linux - Operating System > Linux kernel version = 2.6.32.68 -- ~RHEL6 kernel - C-library > glibc version = 2.12.2 -- ~RHEL6 glibc -- C compiler > gcc version = 4.9.3 +- C compiler > gcc version = 5.2.0 - C compiler > gcc extra config = --with-arch=z10 -- LLVM's minimum support - C compiler > C++ = ENABLE -- to cross compile LLVM diff --git a/src/ci/docker/arm-android/Dockerfile b/src/ci/docker/arm-android/Dockerfile index e10ccd56a4a54..bbf700ae2339f 100644 --- a/src/ci/docker/arm-android/Dockerfile +++ b/src/ci/docker/arm-android/Dockerfile @@ -23,7 +23,7 @@ RUN dpkg --add-architecture i386 && \ COPY scripts/android-sdk.sh /scripts/ RUN . /scripts/android-sdk.sh && \ - download_and_create_avd 4333796 armeabi-v7a 18 + download_and_create_avd 4333796 armeabi-v7a 18 5264690 ENV PATH=$PATH:/android/sdk/emulator ENV PATH=$PATH:/android/sdk/tools diff --git a/src/ci/docker/armhf-gnu/Dockerfile b/src/ci/docker/armhf-gnu/Dockerfile index 2b7624d53ee05..e4c2097f970a9 100644 --- a/src/ci/docker/armhf-gnu/Dockerfile +++ b/src/ci/docker/armhf-gnu/Dockerfile @@ -71,7 +71,8 @@ COPY scripts/qemu-bare-bones-addentropy.c /tmp/addentropy.c RUN arm-linux-gnueabihf-gcc addentropy.c -o rootfs/addentropy -static # TODO: What is this?! -RUN curl -O http://ftp.nl.debian.org/debian/dists/jessie/main/installer-armhf/current/images/device-tree/vexpress-v2p-ca15-tc1.dtb +# Source of the file: https://github.com/vfdev-5/qemu-rpi2-vexpress/raw/master/vexpress-v2p-ca15-tc1.dtb +RUN curl -O https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/vexpress-v2p-ca15-tc1.dtb COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh diff --git a/src/ci/docker/disabled/dist-x86_64-dragonfly/build-toolchain.sh b/src/ci/docker/disabled/dist-x86_64-dragonfly/build-toolchain.sh index 2ebbe0cdee9b8..112d747fe4ed6 100755 --- a/src/ci/docker/disabled/dist-x86_64-dragonfly/build-toolchain.sh +++ b/src/ci/docker/disabled/dist-x86_64-dragonfly/build-toolchain.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex diff --git a/src/ci/docker/disabled/dist-x86_64-haiku/build-toolchain.sh b/src/ci/docker/disabled/dist-x86_64-haiku/build-toolchain.sh index a1115e254b5b2..faf30f36a20f9 100755 --- a/src/ci/docker/disabled/dist-x86_64-haiku/build-toolchain.sh +++ b/src/ci/docker/disabled/dist-x86_64-haiku/build-toolchain.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex diff --git a/src/ci/docker/disabled/dist-x86_64-haiku/fetch-packages.sh b/src/ci/docker/disabled/dist-x86_64-haiku/fetch-packages.sh index a37532e203aa4..e4c9f86d268dc 100755 --- a/src/ci/docker/disabled/dist-x86_64-haiku/fetch-packages.sh +++ b/src/ci/docker/disabled/dist-x86_64-haiku/fetch-packages.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. wget http://packages.haiku-os.org/haikuports/master/hpkg/llvm-4.0.1-2-x86_64.hpkg wget http://packages.haiku-os.org/haikuports/master/hpkg/llvm_libs-4.0.1-2-x86_64.hpkg diff --git a/src/ci/docker/disabled/dist-x86_64-haiku/llvm-config.sh b/src/ci/docker/disabled/dist-x86_64-haiku/llvm-config.sh index fb5206bed2258..83f3a6e5f1216 100755 --- a/src/ci/docker/disabled/dist-x86_64-haiku/llvm-config.sh +++ b/src/ci/docker/disabled/dist-x86_64-haiku/llvm-config.sh @@ -1,13 +1,4 @@ #!/bin/sh -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. case $1 in --version) echo 4.0.1;; diff --git a/src/ci/docker/disabled/dist-x86_64-redox/Dockerfile b/src/ci/docker/disabled/dist-x86_64-redox/Dockerfile index f4c25f791bc39..11a3acd68e3e8 100644 --- a/src/ci/docker/disabled/dist-x86_64-redox/Dockerfile +++ b/src/ci/docker/disabled/dist-x86_64-redox/Dockerfile @@ -7,8 +7,8 @@ COPY scripts/crosstool-ng.sh /scripts/ RUN sh /scripts/crosstool-ng.sh WORKDIR /tmp -COPY cross/install-x86_64-redox.sh /tmp/ -RUN ./install-x86_64-redox.sh +COPY dist-various-1/install-x86_64-redox.sh /scripts/ +RUN sh /scripts/install-x86_64-redox.sh COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh diff --git a/src/ci/docker/disabled/wasm32-exp/node.sh b/src/ci/docker/disabled/wasm32-exp/node.sh index 2bfddb0de99b0..aa938971c702f 100755 --- a/src/ci/docker/disabled/wasm32-exp/node.sh +++ b/src/ci/docker/disabled/wasm32-exp/node.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. path="$(dirname $1)" file="$(basename $1)" diff --git a/src/ci/docker/dist-aarch64-linux/build-toolchains.sh b/src/ci/docker/dist-aarch64-linux/build-toolchains.sh index 22b719bb30755..390ba1a1ddf9c 100755 --- a/src/ci/docker/dist-aarch64-linux/build-toolchains.sh +++ b/src/ci/docker/dist-aarch64-linux/build-toolchains.sh @@ -1,14 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex hide_output() { diff --git a/src/ci/docker/dist-android/Dockerfile b/src/ci/docker/dist-android/Dockerfile index e00c23dac89b0..a54a2d003b649 100644 --- a/src/ci/docker/dist-android/Dockerfile +++ b/src/ci/docker/dist-android/Dockerfile @@ -16,6 +16,7 @@ RUN . /scripts/android-ndk.sh && \ # env ENV TARGETS=arm-linux-androideabi ENV TARGETS=$TARGETS,armv7-linux-androideabi +ENV TARGETS=$TARGETS,thumbv7neon-linux-androideabi ENV TARGETS=$TARGETS,i686-linux-android ENV TARGETS=$TARGETS,aarch64-linux-android ENV TARGETS=$TARGETS,x86_64-linux-android @@ -24,6 +25,7 @@ ENV RUST_CONFIGURE_ARGS \ --enable-extended \ --arm-linux-androideabi-ndk=/android/ndk/arm-14 \ --armv7-linux-androideabi-ndk=/android/ndk/arm-14 \ + --thumbv7neon-linux-androideabi-ndk=/android/ndk/arm-14 \ --i686-linux-android-ndk=/android/ndk/x86-14 \ --aarch64-linux-android-ndk=/android/ndk/arm64-21 \ --x86_64-linux-android-ndk=/android/ndk/x86_64-21 \ diff --git a/src/ci/docker/dist-arm-linux/Dockerfile b/src/ci/docker/dist-arm-linux/Dockerfile index 6ddc5c1e04ae3..48851ae232c99 100644 --- a/src/ci/docker/dist-arm-linux/Dockerfile +++ b/src/ci/docker/dist-arm-linux/Dockerfile @@ -16,6 +16,7 @@ RUN sh /scripts/rustbuild-setup.sh USER rustbuild WORKDIR /tmp +COPY dist-arm-linux/patches/ /tmp/patches/ COPY dist-arm-linux/arm-linux-gnueabi.config dist-arm-linux/build-toolchains.sh /tmp/ RUN ./build-toolchains.sh diff --git a/src/ci/docker/dist-arm-linux/arm-linux-gnueabi.config b/src/ci/docker/dist-arm-linux/arm-linux-gnueabi.config index f73ad069550e1..4185112d8be90 100644 --- a/src/ci/docker/dist-arm-linux/arm-linux-gnueabi.config +++ b/src/ci/docker/dist-arm-linux/arm-linux-gnueabi.config @@ -3,6 +3,7 @@ # Crosstool-NG Configuration # CT_CONFIGURE_has_make381=y +CT_CONFIGURE_has_xz=y CT_MODULES=y # @@ -44,14 +45,16 @@ CT_CONNECT_TIMEOUT=10 # CT_FORCE_EXTRACT is not set CT_OVERIDE_CONFIG_GUESS_SUB=y # CT_ONLY_EXTRACT is not set -CT_PATCH_BUNDLED=y +# CT_PATCH_BUNDLED is not set # CT_PATCH_LOCAL is not set -# CT_PATCH_BUNDLED_LOCAL is not set +CT_PATCH_BUNDLED_LOCAL=y # CT_PATCH_LOCAL_BUNDLED is not set # CT_PATCH_BUNDLED_FALLBACK_LOCAL is not set # CT_PATCH_LOCAL_FALLBACK_BUNDLED is not set # CT_PATCH_NONE is not set -CT_PATCH_ORDER="bundled" +CT_PATCH_ORDER="bundled,local" +CT_PATCH_USE_LOCAL=y +CT_LOCAL_PATCH_DIR="/tmp/patches" # # Build behavior @@ -391,8 +394,8 @@ CT_CC_CORE_PASS_1_NEEDED=y CT_CC_CORE_PASS_2_NEEDED=y CT_CC_gcc=y # CT_CC_GCC_SHOW_LINARO is not set -# CT_CC_GCC_V_5_2_0 is not set -CT_CC_GCC_V_4_9_3=y +CT_CC_GCC_V_5_2_0=y +# CT_CC_GCC_V_4_9_3 is not set # CT_CC_GCC_V_4_8_5 is not set # CT_CC_GCC_V_4_7_4 is not set # CT_CC_GCC_V_4_6_4 is not set @@ -407,8 +410,9 @@ CT_CC_GCC_4_5_or_later=y CT_CC_GCC_4_6_or_later=y CT_CC_GCC_4_7_or_later=y CT_CC_GCC_4_8_or_later=y -CT_CC_GCC_4_9=y CT_CC_GCC_4_9_or_later=y +CT_CC_GCC_5=y +CT_CC_GCC_5_or_later=y CT_CC_GCC_HAS_GRAPHITE=y CT_CC_GCC_USE_GRAPHITE=y CT_CC_GCC_HAS_LTO=y @@ -420,7 +424,7 @@ CT_CC_GCC_USE_GMP_MPFR=y CT_CC_GCC_USE_MPC=y CT_CC_GCC_HAS_LIBQUADMATH=y CT_CC_GCC_HAS_LIBSANITIZER=y -CT_CC_GCC_VERSION="4.9.3" +CT_CC_GCC_VERSION="5.2.0" # CT_CC_LANG_FORTRAN is not set CT_CC_GCC_ENABLE_CXX_FLAGS="" CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY="" @@ -492,7 +496,6 @@ CT_GETTEXT_NEEDED=y CT_GMP_NEEDED=y CT_MPFR_NEEDED=y CT_ISL_NEEDED=y -CT_CLOOG_NEEDED=y CT_MPC_NEEDED=y CT_COMPLIBS=y CT_LIBICONV=y @@ -500,7 +503,6 @@ CT_GETTEXT=y CT_GMP=y CT_MPFR=y CT_ISL=y -CT_CLOOG=y CT_MPC=y CT_LIBICONV_V_1_14=y CT_LIBICONV_VERSION="1.14" @@ -526,15 +528,13 @@ CT_MPFR_V_3_1_3=y # CT_MPFR_V_2_4_0 is not set CT_MPFR_VERSION="3.1.3" CT_ISL_V_0_14=y +# CT_ISL_V_0_12_2 is not set CT_ISL_V_0_14_or_later=y CT_ISL_V_0_12_or_later=y CT_ISL_VERSION="0.14" -CT_CLOOG_V_0_18_4=y +# CT_CLOOG_V_0_18_4 is not set # CT_CLOOG_V_0_18_1 is not set # CT_CLOOG_V_0_18_0 is not set -CT_CLOOG_VERSION="0.18.4" -CT_CLOOG_0_18_4_or_later=y -CT_CLOOG_0_18_or_later=y CT_MPC_V_1_0_3=y # CT_MPC_V_1_0_2 is not set # CT_MPC_V_1_0_1 is not set diff --git a/src/ci/docker/dist-arm-linux/build-toolchains.sh b/src/ci/docker/dist-arm-linux/build-toolchains.sh index c53cca0bb982c..2e790b77a96c2 100755 --- a/src/ci/docker/dist-arm-linux/build-toolchains.sh +++ b/src/ci/docker/dist-arm-linux/build-toolchains.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex diff --git a/src/ci/docker/dist-arm-linux/patches/glibc/ports-2.16.0/001-arm-libgcc_s_resume-used.patch b/src/ci/docker/dist-arm-linux/patches/glibc/ports-2.16.0/001-arm-libgcc_s_resume-used.patch new file mode 100644 index 0000000000000..871d5225c0f71 --- /dev/null +++ b/src/ci/docker/dist-arm-linux/patches/glibc/ports-2.16.0/001-arm-libgcc_s_resume-used.patch @@ -0,0 +1,48 @@ +commit bdb24c2851fd5f0ad9b82d7ea1db911d334b02d2 +Author: Joseph Myers +Date: Tue May 20 21:27:13 2014 +0000 + + Fix ARM build with GCC trunk. + + sysdeps/unix/sysv/linux/arm/unwind-resume.c and + sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c have static + variables that are written in C code but only read from toplevel asms. + Current GCC trunk now optimizes away such apparently write-only static + variables, so causing a build failure. This patch marks those + variables with __attribute_used__ to avoid that optimization. + + Tested that this fixes the build for ARM. + + * sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c + (libgcc_s_resume): Use __attribute_used__. + * sysdeps/unix/sysv/linux/arm/unwind-resume.c (libgcc_s_resume): + Likewise. + +diff --git a/sysdeps/unix/sysv/linux/arm/nptl/unwind-forcedunwind.c b/sysdeps/unix/sysv/linux/arm/nptl/unwind-forcedunwind.c +index 29e2c2b00b04..e848bfeffdcb 100644 +--- a/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-forcedunwind.c ++++ b/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-forcedunwind.c +@@ -22,7 +22,8 @@ + #include + + static void *libgcc_s_handle; +-static void (*libgcc_s_resume) (struct _Unwind_Exception *exc); ++static void (*libgcc_s_resume) (struct _Unwind_Exception *exc) ++ __attribute_used__; + static _Unwind_Reason_Code (*libgcc_s_personality) + (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *); + static _Unwind_Reason_Code (*libgcc_s_forcedunwind) +diff --git a/sysdeps/unix/sysv/linux/arm/nptl/unwind-resume.c b/sysdeps/unix/sysv/linux/arm/nptl/unwind-resume.c +index 285b99b5ed0d..48d00fc83641 100644 +--- a/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-resume.c ++++ b/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-resume.c +@@ -20,7 +20,8 @@ + #include + #include + +-static void (*libgcc_s_resume) (struct _Unwind_Exception *exc); ++static void (*libgcc_s_resume) (struct _Unwind_Exception *exc) ++ __attribute_used__; + static _Unwind_Reason_Code (*libgcc_s_personality) + (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *); + diff --git a/src/ci/docker/dist-armhf-linux/Dockerfile b/src/ci/docker/dist-armhf-linux/Dockerfile index e4d4b2feeec40..d1dd9faaa1035 100644 --- a/src/ci/docker/dist-armhf-linux/Dockerfile +++ b/src/ci/docker/dist-armhf-linux/Dockerfile @@ -16,6 +16,7 @@ RUN sh /scripts/rustbuild-setup.sh USER rustbuild WORKDIR /tmp +COPY dist-armhf-linux/patches/ /tmp/patches/ COPY dist-armhf-linux/arm-linux-gnueabihf.config dist-armhf-linux/build-toolchains.sh /tmp/ RUN ./build-toolchains.sh diff --git a/src/ci/docker/dist-armhf-linux/arm-linux-gnueabihf.config b/src/ci/docker/dist-armhf-linux/arm-linux-gnueabihf.config index 1feeef1555749..bebbcd1670a5e 100644 --- a/src/ci/docker/dist-armhf-linux/arm-linux-gnueabihf.config +++ b/src/ci/docker/dist-armhf-linux/arm-linux-gnueabihf.config @@ -3,6 +3,7 @@ # Crosstool-NG Configuration # CT_CONFIGURE_has_make381=y +CT_CONFIGURE_has_xz=y CT_MODULES=y # @@ -44,14 +45,16 @@ CT_CONNECT_TIMEOUT=10 # CT_FORCE_EXTRACT is not set CT_OVERIDE_CONFIG_GUESS_SUB=y # CT_ONLY_EXTRACT is not set -CT_PATCH_BUNDLED=y +# CT_PATCH_BUNDLED is not set # CT_PATCH_LOCAL is not set -# CT_PATCH_BUNDLED_LOCAL is not set +CT_PATCH_BUNDLED_LOCAL=y # CT_PATCH_LOCAL_BUNDLED is not set # CT_PATCH_BUNDLED_FALLBACK_LOCAL is not set # CT_PATCH_LOCAL_FALLBACK_BUNDLED is not set # CT_PATCH_NONE is not set -CT_PATCH_ORDER="bundled" +CT_PATCH_ORDER="bundled,local" +CT_PATCH_USE_LOCAL=y +CT_LOCAL_PATCH_DIR="/tmp/patches" # # Build behavior @@ -392,8 +395,8 @@ CT_CC_CORE_PASS_1_NEEDED=y CT_CC_CORE_PASS_2_NEEDED=y CT_CC_gcc=y # CT_CC_GCC_SHOW_LINARO is not set -# CT_CC_GCC_V_5_2_0 is not set -CT_CC_GCC_V_4_9_3=y +CT_CC_GCC_V_5_2_0=y +# CT_CC_GCC_V_4_9_3 is not set # CT_CC_GCC_V_4_8_5 is not set # CT_CC_GCC_V_4_7_4 is not set # CT_CC_GCC_V_4_6_4 is not set @@ -408,8 +411,9 @@ CT_CC_GCC_4_5_or_later=y CT_CC_GCC_4_6_or_later=y CT_CC_GCC_4_7_or_later=y CT_CC_GCC_4_8_or_later=y -CT_CC_GCC_4_9=y CT_CC_GCC_4_9_or_later=y +CT_CC_GCC_5=y +CT_CC_GCC_5_or_later=y CT_CC_GCC_HAS_GRAPHITE=y CT_CC_GCC_USE_GRAPHITE=y CT_CC_GCC_HAS_LTO=y @@ -421,7 +425,7 @@ CT_CC_GCC_USE_GMP_MPFR=y CT_CC_GCC_USE_MPC=y CT_CC_GCC_HAS_LIBQUADMATH=y CT_CC_GCC_HAS_LIBSANITIZER=y -CT_CC_GCC_VERSION="4.9.3" +CT_CC_GCC_VERSION="5.2.0" # CT_CC_LANG_FORTRAN is not set CT_CC_GCC_ENABLE_CXX_FLAGS="" CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY="" @@ -493,7 +497,6 @@ CT_GETTEXT_NEEDED=y CT_GMP_NEEDED=y CT_MPFR_NEEDED=y CT_ISL_NEEDED=y -CT_CLOOG_NEEDED=y CT_MPC_NEEDED=y CT_COMPLIBS=y CT_LIBICONV=y @@ -501,7 +504,6 @@ CT_GETTEXT=y CT_GMP=y CT_MPFR=y CT_ISL=y -CT_CLOOG=y CT_MPC=y CT_LIBICONV_V_1_14=y CT_LIBICONV_VERSION="1.14" @@ -527,15 +529,13 @@ CT_MPFR_V_3_1_3=y # CT_MPFR_V_2_4_0 is not set CT_MPFR_VERSION="3.1.3" CT_ISL_V_0_14=y +# CT_ISL_V_0_12_2 is not set CT_ISL_V_0_14_or_later=y CT_ISL_V_0_12_or_later=y CT_ISL_VERSION="0.14" -CT_CLOOG_V_0_18_4=y +# CT_CLOOG_V_0_18_4 is not set # CT_CLOOG_V_0_18_1 is not set # CT_CLOOG_V_0_18_0 is not set -CT_CLOOG_VERSION="0.18.4" -CT_CLOOG_0_18_4_or_later=y -CT_CLOOG_0_18_or_later=y CT_MPC_V_1_0_3=y # CT_MPC_V_1_0_2 is not set # CT_MPC_V_1_0_1 is not set diff --git a/src/ci/docker/dist-armhf-linux/build-toolchains.sh b/src/ci/docker/dist-armhf-linux/build-toolchains.sh index 964182a5ad544..a01c2e0eb0a7c 100755 --- a/src/ci/docker/dist-armhf-linux/build-toolchains.sh +++ b/src/ci/docker/dist-armhf-linux/build-toolchains.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex diff --git a/src/ci/docker/dist-armhf-linux/patches/glibc/ports-2.16.0/001-arm-libgcc_s_resume-used.patch b/src/ci/docker/dist-armhf-linux/patches/glibc/ports-2.16.0/001-arm-libgcc_s_resume-used.patch new file mode 100644 index 0000000000000..871d5225c0f71 --- /dev/null +++ b/src/ci/docker/dist-armhf-linux/patches/glibc/ports-2.16.0/001-arm-libgcc_s_resume-used.patch @@ -0,0 +1,48 @@ +commit bdb24c2851fd5f0ad9b82d7ea1db911d334b02d2 +Author: Joseph Myers +Date: Tue May 20 21:27:13 2014 +0000 + + Fix ARM build with GCC trunk. + + sysdeps/unix/sysv/linux/arm/unwind-resume.c and + sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c have static + variables that are written in C code but only read from toplevel asms. + Current GCC trunk now optimizes away such apparently write-only static + variables, so causing a build failure. This patch marks those + variables with __attribute_used__ to avoid that optimization. + + Tested that this fixes the build for ARM. + + * sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c + (libgcc_s_resume): Use __attribute_used__. + * sysdeps/unix/sysv/linux/arm/unwind-resume.c (libgcc_s_resume): + Likewise. + +diff --git a/sysdeps/unix/sysv/linux/arm/nptl/unwind-forcedunwind.c b/sysdeps/unix/sysv/linux/arm/nptl/unwind-forcedunwind.c +index 29e2c2b00b04..e848bfeffdcb 100644 +--- a/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-forcedunwind.c ++++ b/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-forcedunwind.c +@@ -22,7 +22,8 @@ + #include + + static void *libgcc_s_handle; +-static void (*libgcc_s_resume) (struct _Unwind_Exception *exc); ++static void (*libgcc_s_resume) (struct _Unwind_Exception *exc) ++ __attribute_used__; + static _Unwind_Reason_Code (*libgcc_s_personality) + (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *); + static _Unwind_Reason_Code (*libgcc_s_forcedunwind) +diff --git a/sysdeps/unix/sysv/linux/arm/nptl/unwind-resume.c b/sysdeps/unix/sysv/linux/arm/nptl/unwind-resume.c +index 285b99b5ed0d..48d00fc83641 100644 +--- a/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-resume.c ++++ b/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-resume.c +@@ -20,7 +20,8 @@ + #include + #include + +-static void (*libgcc_s_resume) (struct _Unwind_Exception *exc); ++static void (*libgcc_s_resume) (struct _Unwind_Exception *exc) ++ __attribute_used__; + static _Unwind_Reason_Code (*libgcc_s_personality) + (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *); + diff --git a/src/ci/docker/dist-armv7-linux/Dockerfile b/src/ci/docker/dist-armv7-linux/Dockerfile index 99fe7bd7b8f78..170b8134d3edc 100644 --- a/src/ci/docker/dist-armv7-linux/Dockerfile +++ b/src/ci/docker/dist-armv7-linux/Dockerfile @@ -16,6 +16,7 @@ RUN sh /scripts/rustbuild-setup.sh USER rustbuild WORKDIR /tmp +COPY dist-armv7-linux/patches/ /tmp/patches/ COPY dist-armv7-linux/build-toolchains.sh dist-armv7-linux/armv7-linux-gnueabihf.config /tmp/ RUN ./build-toolchains.sh diff --git a/src/ci/docker/dist-armv7-linux/armv7-linux-gnueabihf.config b/src/ci/docker/dist-armv7-linux/armv7-linux-gnueabihf.config index 79d6c77c41152..5cccfd8444d35 100644 --- a/src/ci/docker/dist-armv7-linux/armv7-linux-gnueabihf.config +++ b/src/ci/docker/dist-armv7-linux/armv7-linux-gnueabihf.config @@ -3,6 +3,7 @@ # Crosstool-NG Configuration # CT_CONFIGURE_has_make381=y +CT_CONFIGURE_has_xz=y CT_MODULES=y # @@ -44,14 +45,16 @@ CT_CONNECT_TIMEOUT=10 # CT_FORCE_EXTRACT is not set CT_OVERIDE_CONFIG_GUESS_SUB=y # CT_ONLY_EXTRACT is not set -CT_PATCH_BUNDLED=y +# CT_PATCH_BUNDLED is not set # CT_PATCH_LOCAL is not set -# CT_PATCH_BUNDLED_LOCAL is not set +CT_PATCH_BUNDLED_LOCAL=y # CT_PATCH_LOCAL_BUNDLED is not set # CT_PATCH_BUNDLED_FALLBACK_LOCAL is not set # CT_PATCH_LOCAL_FALLBACK_BUNDLED is not set # CT_PATCH_NONE is not set -CT_PATCH_ORDER="bundled" +CT_PATCH_ORDER="bundled,local" +CT_PATCH_USE_LOCAL=y +CT_LOCAL_PATCH_DIR="/tmp/patches" # # Build behavior @@ -155,12 +158,6 @@ CT_ARCH_EXCLUSIVE_WITH_CPU=y # CT_ARCH_FLOAT_AUTO is not set # CT_ARCH_FLOAT_SOFTFP is not set CT_ARCH_FLOAT="hard" -# CT_ARCH_ALPHA_EV4 is not set -# CT_ARCH_ALPHA_EV45 is not set -# CT_ARCH_ALPHA_EV5 is not set -# CT_ARCH_ALPHA_EV56 is not set -# CT_ARCH_ALPHA_EV6 is not set -# CT_ARCH_ALPHA_EV67 is not set # # arm other options @@ -311,8 +308,6 @@ CT_LIBC="glibc" CT_LIBC_VERSION="2.16.0" CT_LIBC_glibc=y # CT_LIBC_musl is not set -# CT_LIBC_newlib is not set -# CT_LIBC_none is not set # CT_LIBC_uClibc is not set CT_LIBC_avr_libc_AVAILABLE=y CT_LIBC_glibc_AVAILABLE=y @@ -400,8 +395,8 @@ CT_CC_CORE_PASS_1_NEEDED=y CT_CC_CORE_PASS_2_NEEDED=y CT_CC_gcc=y # CT_CC_GCC_SHOW_LINARO is not set -# CT_CC_GCC_V_5_2_0 is not set -CT_CC_GCC_V_4_9_3=y +CT_CC_GCC_V_5_2_0=y +# CT_CC_GCC_V_4_9_3 is not set # CT_CC_GCC_V_4_8_5 is not set # CT_CC_GCC_V_4_7_4 is not set # CT_CC_GCC_V_4_6_4 is not set @@ -416,8 +411,9 @@ CT_CC_GCC_4_5_or_later=y CT_CC_GCC_4_6_or_later=y CT_CC_GCC_4_7_or_later=y CT_CC_GCC_4_8_or_later=y -CT_CC_GCC_4_9=y CT_CC_GCC_4_9_or_later=y +CT_CC_GCC_5=y +CT_CC_GCC_5_or_later=y CT_CC_GCC_HAS_GRAPHITE=y CT_CC_GCC_USE_GRAPHITE=y CT_CC_GCC_HAS_LTO=y @@ -429,7 +425,7 @@ CT_CC_GCC_USE_GMP_MPFR=y CT_CC_GCC_USE_MPC=y CT_CC_GCC_HAS_LIBQUADMATH=y CT_CC_GCC_HAS_LIBSANITIZER=y -CT_CC_GCC_VERSION="4.9.3" +CT_CC_GCC_VERSION="5.2.0" # CT_CC_LANG_FORTRAN is not set CT_CC_GCC_ENABLE_CXX_FLAGS="" CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY="" @@ -501,7 +497,6 @@ CT_GETTEXT_NEEDED=y CT_GMP_NEEDED=y CT_MPFR_NEEDED=y CT_ISL_NEEDED=y -CT_CLOOG_NEEDED=y CT_MPC_NEEDED=y CT_COMPLIBS=y CT_LIBICONV=y @@ -509,7 +504,6 @@ CT_GETTEXT=y CT_GMP=y CT_MPFR=y CT_ISL=y -CT_CLOOG=y CT_MPC=y CT_LIBICONV_V_1_14=y CT_LIBICONV_VERSION="1.14" @@ -535,15 +529,13 @@ CT_MPFR_V_3_1_3=y # CT_MPFR_V_2_4_0 is not set CT_MPFR_VERSION="3.1.3" CT_ISL_V_0_14=y +# CT_ISL_V_0_12_2 is not set CT_ISL_V_0_14_or_later=y CT_ISL_V_0_12_or_later=y CT_ISL_VERSION="0.14" -CT_CLOOG_V_0_18_4=y +# CT_CLOOG_V_0_18_4 is not set # CT_CLOOG_V_0_18_1 is not set # CT_CLOOG_V_0_18_0 is not set -CT_CLOOG_VERSION="0.18.4" -CT_CLOOG_0_18_4_or_later=y -CT_CLOOG_0_18_or_later=y CT_MPC_V_1_0_3=y # CT_MPC_V_1_0_2 is not set # CT_MPC_V_1_0_1 is not set diff --git a/src/ci/docker/dist-armv7-linux/build-toolchains.sh b/src/ci/docker/dist-armv7-linux/build-toolchains.sh index 40adfe5d53e0b..28f8ba2437b3d 100755 --- a/src/ci/docker/dist-armv7-linux/build-toolchains.sh +++ b/src/ci/docker/dist-armv7-linux/build-toolchains.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex diff --git a/src/ci/docker/dist-armv7-linux/patches/glibc/ports-2.16.0/001-arm-libgcc_s_resume-used.patch b/src/ci/docker/dist-armv7-linux/patches/glibc/ports-2.16.0/001-arm-libgcc_s_resume-used.patch new file mode 100644 index 0000000000000..871d5225c0f71 --- /dev/null +++ b/src/ci/docker/dist-armv7-linux/patches/glibc/ports-2.16.0/001-arm-libgcc_s_resume-used.patch @@ -0,0 +1,48 @@ +commit bdb24c2851fd5f0ad9b82d7ea1db911d334b02d2 +Author: Joseph Myers +Date: Tue May 20 21:27:13 2014 +0000 + + Fix ARM build with GCC trunk. + + sysdeps/unix/sysv/linux/arm/unwind-resume.c and + sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c have static + variables that are written in C code but only read from toplevel asms. + Current GCC trunk now optimizes away such apparently write-only static + variables, so causing a build failure. This patch marks those + variables with __attribute_used__ to avoid that optimization. + + Tested that this fixes the build for ARM. + + * sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c + (libgcc_s_resume): Use __attribute_used__. + * sysdeps/unix/sysv/linux/arm/unwind-resume.c (libgcc_s_resume): + Likewise. + +diff --git a/sysdeps/unix/sysv/linux/arm/nptl/unwind-forcedunwind.c b/sysdeps/unix/sysv/linux/arm/nptl/unwind-forcedunwind.c +index 29e2c2b00b04..e848bfeffdcb 100644 +--- a/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-forcedunwind.c ++++ b/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-forcedunwind.c +@@ -22,7 +22,8 @@ + #include + + static void *libgcc_s_handle; +-static void (*libgcc_s_resume) (struct _Unwind_Exception *exc); ++static void (*libgcc_s_resume) (struct _Unwind_Exception *exc) ++ __attribute_used__; + static _Unwind_Reason_Code (*libgcc_s_personality) + (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *); + static _Unwind_Reason_Code (*libgcc_s_forcedunwind) +diff --git a/sysdeps/unix/sysv/linux/arm/nptl/unwind-resume.c b/sysdeps/unix/sysv/linux/arm/nptl/unwind-resume.c +index 285b99b5ed0d..48d00fc83641 100644 +--- a/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-resume.c ++++ b/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-resume.c +@@ -20,7 +20,8 @@ + #include + #include + +-static void (*libgcc_s_resume) (struct _Unwind_Exception *exc); ++static void (*libgcc_s_resume) (struct _Unwind_Exception *exc) ++ __attribute_used__; + static _Unwind_Reason_Code (*libgcc_s_personality) + (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *); + diff --git a/src/ci/docker/dist-powerpc-linux/build-powerpc-toolchain.sh b/src/ci/docker/dist-powerpc-linux/build-powerpc-toolchain.sh index 15211acb4459b..d2e39834d6e96 100755 --- a/src/ci/docker/dist-powerpc-linux/build-powerpc-toolchain.sh +++ b/src/ci/docker/dist-powerpc-linux/build-powerpc-toolchain.sh @@ -1,14 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex hide_output() { diff --git a/src/ci/docker/dist-powerpc-linux/patches/glibc/2.12.2/002-newer-gcc.patch b/src/ci/docker/dist-powerpc-linux/patches/glibc/2.12.2/002-newer-gcc.patch new file mode 100644 index 0000000000000..a96b4882c2d57 --- /dev/null +++ b/src/ci/docker/dist-powerpc-linux/patches/glibc/2.12.2/002-newer-gcc.patch @@ -0,0 +1,26 @@ +diff --git a/configure b/configure +index b6752d147c6b..6089a3403410 100755 +--- a/configure ++++ b/configure +@@ -5079,7 +5079,7 @@ $as_echo_n "checking version of $CC... " >&6; } + ac_prog_version=`$CC -v 2>&1 | sed -n 's/^.*version \([egcygnustpi-]*[0-9.]*\).*$/\1/p'` + case $ac_prog_version in + '') ac_prog_version="v. ?.??, bad"; ac_verc_fail=yes;; +- 3.4* | 4.[0-9]* ) ++ 3.4* | [4-9].* ) + ac_prog_version="$ac_prog_version, ok"; ac_verc_fail=no;; + *) ac_prog_version="$ac_prog_version, bad"; ac_verc_fail=yes;; + +diff --git a/configure.in b/configure.in +index 56849dfc489a..09677eb3d0c1 100644 +--- a/configure.in ++++ b/configure.in +@@ -960,7 +960,7 @@ fi + # These programs are version sensitive. + AC_CHECK_TOOL_PREFIX + AC_CHECK_PROG_VER(CC, ${ac_tool_prefix}gcc ${ac_tool_prefix}cc, -v, +- [version \([egcygnustpi-]*[0-9.]*\)], [3.4* | 4.[0-9]* ], ++ [version \([egcygnustpi-]*[0-9.]*\)], [3.4* | [4-9].* ], + critic_missing="$critic_missing gcc") + AC_CHECK_PROG_VER(MAKE, gnumake gmake make, --version, + [GNU Make[^0-9]*\([0-9][0-9.]*\)], diff --git a/src/ci/docker/dist-powerpc-linux/powerpc-linux-gnu.config b/src/ci/docker/dist-powerpc-linux/powerpc-linux-gnu.config index 984a0a0304e47..7df41da2bf76e 100644 --- a/src/ci/docker/dist-powerpc-linux/powerpc-linux-gnu.config +++ b/src/ci/docker/dist-powerpc-linux/powerpc-linux-gnu.config @@ -359,8 +359,8 @@ CT_CC_CORE_PASS_1_NEEDED=y CT_CC_CORE_PASS_2_NEEDED=y CT_CC_gcc=y # CT_CC_GCC_SHOW_LINARO is not set -# CT_CC_GCC_V_5_2_0 is not set -CT_CC_GCC_V_4_9_3=y +CT_CC_GCC_V_5_2_0=y +# CT_CC_GCC_V_4_9_3 is not set # CT_CC_GCC_V_4_8_5 is not set # CT_CC_GCC_V_4_7_4 is not set # CT_CC_GCC_V_4_6_4 is not set @@ -375,8 +375,9 @@ CT_CC_GCC_4_5_or_later=y CT_CC_GCC_4_6_or_later=y CT_CC_GCC_4_7_or_later=y CT_CC_GCC_4_8_or_later=y -CT_CC_GCC_4_9=y CT_CC_GCC_4_9_or_later=y +CT_CC_GCC_5=y +CT_CC_GCC_5_or_later=y CT_CC_GCC_HAS_GRAPHITE=y CT_CC_GCC_USE_GRAPHITE=y CT_CC_GCC_HAS_LTO=y @@ -388,7 +389,7 @@ CT_CC_GCC_USE_GMP_MPFR=y CT_CC_GCC_USE_MPC=y CT_CC_GCC_HAS_LIBQUADMATH=y CT_CC_GCC_HAS_LIBSANITIZER=y -CT_CC_GCC_VERSION="4.9.3" +CT_CC_GCC_VERSION="5.2.0" # CT_CC_LANG_FORTRAN is not set CT_CC_GCC_ENABLE_CXX_FLAGS="" CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY="" @@ -460,7 +461,6 @@ CT_GETTEXT_NEEDED=y CT_GMP_NEEDED=y CT_MPFR_NEEDED=y CT_ISL_NEEDED=y -CT_CLOOG_NEEDED=y CT_MPC_NEEDED=y CT_COMPLIBS=y CT_LIBICONV=y @@ -468,7 +468,6 @@ CT_GETTEXT=y CT_GMP=y CT_MPFR=y CT_ISL=y -CT_CLOOG=y CT_MPC=y CT_LIBICONV_V_1_14=y CT_LIBICONV_VERSION="1.14" @@ -494,15 +493,13 @@ CT_MPFR_V_3_1_3=y # CT_MPFR_V_2_4_0 is not set CT_MPFR_VERSION="3.1.3" CT_ISL_V_0_14=y +# CT_ISL_V_0_12_2 is not set CT_ISL_V_0_14_or_later=y CT_ISL_V_0_12_or_later=y CT_ISL_VERSION="0.14" -CT_CLOOG_V_0_18_4=y +# CT_CLOOG_V_0_18_4 is not set # CT_CLOOG_V_0_18_1 is not set # CT_CLOOG_V_0_18_0 is not set -CT_CLOOG_VERSION="0.18.4" -CT_CLOOG_0_18_4_or_later=y -CT_CLOOG_0_18_or_later=y CT_MPC_V_1_0_3=y # CT_MPC_V_1_0_2 is not set # CT_MPC_V_1_0_1 is not set diff --git a/src/ci/docker/dist-powerpc64-linux/build-powerpc64-toolchain.sh b/src/ci/docker/dist-powerpc64-linux/build-powerpc64-toolchain.sh index ac6460a472993..fc53849a2ada4 100755 --- a/src/ci/docker/dist-powerpc64-linux/build-powerpc64-toolchain.sh +++ b/src/ci/docker/dist-powerpc64-linux/build-powerpc64-toolchain.sh @@ -1,21 +1,31 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex source shared.sh +BINUTILS=2.32 +TARGET=powerpc64-unknown-linux-gnu +PREFIX=/x-tools/$TARGET +SYSROOT=$PREFIX/$TARGET/sysroot + mkdir build cd build cp ../powerpc64-linux-gnu.config .config hide_output ct-ng build cd .. rm -rf build + +chmod -R u+w $PREFIX + +# Next, download and build newer binutils. +mkdir binutils-$TARGET +pushd binutils-$TARGET +curl https://ftp.gnu.org/gnu/binutils/binutils-$BINUTILS.tar.bz2 | tar xjf - +mkdir binutils-build +cd binutils-build +hide_output ../binutils-$BINUTILS/configure --target=$TARGET \ + --prefix=$PREFIX --with-sysroot=$SYSROOT +hide_output make -j10 +hide_output make install +popd +rm -rf binutils-$TARGET diff --git a/src/ci/docker/dist-powerpc64-linux/patches/glibc/2.12.2/003-newer-gcc.patch b/src/ci/docker/dist-powerpc64-linux/patches/glibc/2.12.2/003-newer-gcc.patch new file mode 100644 index 0000000000000..a96b4882c2d57 --- /dev/null +++ b/src/ci/docker/dist-powerpc64-linux/patches/glibc/2.12.2/003-newer-gcc.patch @@ -0,0 +1,26 @@ +diff --git a/configure b/configure +index b6752d147c6b..6089a3403410 100755 +--- a/configure ++++ b/configure +@@ -5079,7 +5079,7 @@ $as_echo_n "checking version of $CC... " >&6; } + ac_prog_version=`$CC -v 2>&1 | sed -n 's/^.*version \([egcygnustpi-]*[0-9.]*\).*$/\1/p'` + case $ac_prog_version in + '') ac_prog_version="v. ?.??, bad"; ac_verc_fail=yes;; +- 3.4* | 4.[0-9]* ) ++ 3.4* | [4-9].* ) + ac_prog_version="$ac_prog_version, ok"; ac_verc_fail=no;; + *) ac_prog_version="$ac_prog_version, bad"; ac_verc_fail=yes;; + +diff --git a/configure.in b/configure.in +index 56849dfc489a..09677eb3d0c1 100644 +--- a/configure.in ++++ b/configure.in +@@ -960,7 +960,7 @@ fi + # These programs are version sensitive. + AC_CHECK_TOOL_PREFIX + AC_CHECK_PROG_VER(CC, ${ac_tool_prefix}gcc ${ac_tool_prefix}cc, -v, +- [version \([egcygnustpi-]*[0-9.]*\)], [3.4* | 4.[0-9]* ], ++ [version \([egcygnustpi-]*[0-9.]*\)], [3.4* | [4-9].* ], + critic_missing="$critic_missing gcc") + AC_CHECK_PROG_VER(MAKE, gnumake gmake make, --version, + [GNU Make[^0-9]*\([0-9][0-9.]*\)], diff --git a/src/ci/docker/dist-powerpc64-linux/powerpc64-linux-gnu.config b/src/ci/docker/dist-powerpc64-linux/powerpc64-linux-gnu.config index c2d02ee85cf25..4aab4f4fd4445 100644 --- a/src/ci/docker/dist-powerpc64-linux/powerpc64-linux-gnu.config +++ b/src/ci/docker/dist-powerpc64-linux/powerpc64-linux-gnu.config @@ -359,8 +359,8 @@ CT_CC_CORE_PASS_1_NEEDED=y CT_CC_CORE_PASS_2_NEEDED=y CT_CC_gcc=y # CT_CC_GCC_SHOW_LINARO is not set -# CT_CC_GCC_V_5_2_0 is not set -CT_CC_GCC_V_4_9_3=y +CT_CC_GCC_V_5_2_0=y +# CT_CC_GCC_V_4_9_3 is not set # CT_CC_GCC_V_4_8_5 is not set # CT_CC_GCC_V_4_7_4 is not set # CT_CC_GCC_V_4_6_4 is not set @@ -375,8 +375,9 @@ CT_CC_GCC_4_5_or_later=y CT_CC_GCC_4_6_or_later=y CT_CC_GCC_4_7_or_later=y CT_CC_GCC_4_8_or_later=y -CT_CC_GCC_4_9=y CT_CC_GCC_4_9_or_later=y +CT_CC_GCC_5=y +CT_CC_GCC_5_or_later=y CT_CC_GCC_HAS_GRAPHITE=y CT_CC_GCC_USE_GRAPHITE=y CT_CC_GCC_HAS_LTO=y @@ -388,7 +389,7 @@ CT_CC_GCC_USE_GMP_MPFR=y CT_CC_GCC_USE_MPC=y CT_CC_GCC_HAS_LIBQUADMATH=y CT_CC_GCC_HAS_LIBSANITIZER=y -CT_CC_GCC_VERSION="4.9.3" +CT_CC_GCC_VERSION="5.2.0" # CT_CC_LANG_FORTRAN is not set CT_CC_GCC_ENABLE_CXX_FLAGS="" CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY="" @@ -460,7 +461,6 @@ CT_GETTEXT_NEEDED=y CT_GMP_NEEDED=y CT_MPFR_NEEDED=y CT_ISL_NEEDED=y -CT_CLOOG_NEEDED=y CT_MPC_NEEDED=y CT_COMPLIBS=y CT_LIBICONV=y @@ -468,7 +468,6 @@ CT_GETTEXT=y CT_GMP=y CT_MPFR=y CT_ISL=y -CT_CLOOG=y CT_MPC=y CT_LIBICONV_V_1_14=y CT_LIBICONV_VERSION="1.14" @@ -494,15 +493,10 @@ CT_MPFR_V_3_1_3=y # CT_MPFR_V_2_4_0 is not set CT_MPFR_VERSION="3.1.3" CT_ISL_V_0_14=y +# CT_ISL_V_0_12_2 is not set CT_ISL_V_0_14_or_later=y CT_ISL_V_0_12_or_later=y CT_ISL_VERSION="0.14" -CT_CLOOG_V_0_18_4=y -# CT_CLOOG_V_0_18_1 is not set -# CT_CLOOG_V_0_18_0 is not set -CT_CLOOG_VERSION="0.18.4" -CT_CLOOG_0_18_4_or_later=y -CT_CLOOG_0_18_or_later=y CT_MPC_V_1_0_3=y # CT_MPC_V_1_0_2 is not set # CT_MPC_V_1_0_1 is not set diff --git a/src/ci/docker/dist-powerpc64-linux/shared.sh b/src/ci/docker/dist-powerpc64-linux/shared.sh index 97e6d2908cf8a..b873569278914 100644 --- a/src/ci/docker/dist-powerpc64-linux/shared.sh +++ b/src/ci/docker/dist-powerpc64-linux/shared.sh @@ -1,13 +1,3 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - hide_output() { set +x on_err=" diff --git a/src/ci/docker/dist-powerpc64le-linux/build-powerpc64le-toolchain.sh b/src/ci/docker/dist-powerpc64le-linux/build-powerpc64le-toolchain.sh index 2f6937afff032..f866a24287f9e 100755 --- a/src/ci/docker/dist-powerpc64le-linux/build-powerpc64le-toolchain.sh +++ b/src/ci/docker/dist-powerpc64le-linux/build-powerpc64le-toolchain.sh @@ -1,19 +1,10 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex source shared.sh -BINUTILS=2.25.1 +BINUTILS=2.32 GCC=5.3.0 TARGET=powerpc64le-linux-gnu SYSROOT=/usr/local/$TARGET/sysroot diff --git a/src/ci/docker/dist-powerpc64le-linux/shared.sh b/src/ci/docker/dist-powerpc64le-linux/shared.sh index 97e6d2908cf8a..b873569278914 100644 --- a/src/ci/docker/dist-powerpc64le-linux/shared.sh +++ b/src/ci/docker/dist-powerpc64le-linux/shared.sh @@ -1,13 +1,3 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - hide_output() { set +x on_err=" diff --git a/src/ci/docker/dist-s390x-linux/build-s390x-toolchain.sh b/src/ci/docker/dist-s390x-linux/build-s390x-toolchain.sh index 306204dd0e1f6..df9529da8a162 100755 --- a/src/ci/docker/dist-s390x-linux/build-s390x-toolchain.sh +++ b/src/ci/docker/dist-s390x-linux/build-s390x-toolchain.sh @@ -1,14 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex hide_output() { diff --git a/src/ci/docker/dist-s390x-linux/patches/glibc/2.12.2/002-newer-gcc.patch b/src/ci/docker/dist-s390x-linux/patches/glibc/2.12.2/002-newer-gcc.patch new file mode 100644 index 0000000000000..a96b4882c2d57 --- /dev/null +++ b/src/ci/docker/dist-s390x-linux/patches/glibc/2.12.2/002-newer-gcc.patch @@ -0,0 +1,26 @@ +diff --git a/configure b/configure +index b6752d147c6b..6089a3403410 100755 +--- a/configure ++++ b/configure +@@ -5079,7 +5079,7 @@ $as_echo_n "checking version of $CC... " >&6; } + ac_prog_version=`$CC -v 2>&1 | sed -n 's/^.*version \([egcygnustpi-]*[0-9.]*\).*$/\1/p'` + case $ac_prog_version in + '') ac_prog_version="v. ?.??, bad"; ac_verc_fail=yes;; +- 3.4* | 4.[0-9]* ) ++ 3.4* | [4-9].* ) + ac_prog_version="$ac_prog_version, ok"; ac_verc_fail=no;; + *) ac_prog_version="$ac_prog_version, bad"; ac_verc_fail=yes;; + +diff --git a/configure.in b/configure.in +index 56849dfc489a..09677eb3d0c1 100644 +--- a/configure.in ++++ b/configure.in +@@ -960,7 +960,7 @@ fi + # These programs are version sensitive. + AC_CHECK_TOOL_PREFIX + AC_CHECK_PROG_VER(CC, ${ac_tool_prefix}gcc ${ac_tool_prefix}cc, -v, +- [version \([egcygnustpi-]*[0-9.]*\)], [3.4* | 4.[0-9]* ], ++ [version \([egcygnustpi-]*[0-9.]*\)], [3.4* | [4-9].* ], + critic_missing="$critic_missing gcc") + AC_CHECK_PROG_VER(MAKE, gnumake gmake make, --version, + [GNU Make[^0-9]*\([0-9][0-9.]*\)], diff --git a/src/ci/docker/dist-s390x-linux/s390x-linux-gnu.config b/src/ci/docker/dist-s390x-linux/s390x-linux-gnu.config index fa5e4510987f1..cd1c41b02e312 100644 --- a/src/ci/docker/dist-s390x-linux/s390x-linux-gnu.config +++ b/src/ci/docker/dist-s390x-linux/s390x-linux-gnu.config @@ -339,8 +339,8 @@ CT_CC_CORE_PASS_1_NEEDED=y CT_CC_CORE_PASS_2_NEEDED=y CT_CC_gcc=y # CT_CC_GCC_SHOW_LINARO is not set -# CT_CC_GCC_V_5_2_0 is not set -CT_CC_GCC_V_4_9_3=y +CT_CC_GCC_V_5_2_0=y +# CT_CC_GCC_V_4_9_3 is not set # CT_CC_GCC_V_4_8_5 is not set # CT_CC_GCC_V_4_7_4 is not set # CT_CC_GCC_V_4_6_4 is not set @@ -355,8 +355,9 @@ CT_CC_GCC_4_5_or_later=y CT_CC_GCC_4_6_or_later=y CT_CC_GCC_4_7_or_later=y CT_CC_GCC_4_8_or_later=y -CT_CC_GCC_4_9=y CT_CC_GCC_4_9_or_later=y +CT_CC_GCC_5=y +CT_CC_GCC_5_or_later=y CT_CC_GCC_HAS_GRAPHITE=y CT_CC_GCC_USE_GRAPHITE=y CT_CC_GCC_HAS_LTO=y @@ -368,7 +369,7 @@ CT_CC_GCC_USE_GMP_MPFR=y CT_CC_GCC_USE_MPC=y CT_CC_GCC_HAS_LIBQUADMATH=y CT_CC_GCC_HAS_LIBSANITIZER=y -CT_CC_GCC_VERSION="4.9.3" +CT_CC_GCC_VERSION="5.2.0" # CT_CC_LANG_FORTRAN is not set CT_CC_GCC_ENABLE_CXX_FLAGS="" CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY="" @@ -440,7 +441,6 @@ CT_GETTEXT_NEEDED=y CT_GMP_NEEDED=y CT_MPFR_NEEDED=y CT_ISL_NEEDED=y -CT_CLOOG_NEEDED=y CT_MPC_NEEDED=y CT_COMPLIBS=y CT_LIBICONV=y @@ -448,7 +448,6 @@ CT_GETTEXT=y CT_GMP=y CT_MPFR=y CT_ISL=y -CT_CLOOG=y CT_MPC=y CT_LIBICONV_V_1_14=y CT_LIBICONV_VERSION="1.14" @@ -474,15 +473,13 @@ CT_MPFR_V_3_1_3=y # CT_MPFR_V_2_4_0 is not set CT_MPFR_VERSION="3.1.3" CT_ISL_V_0_14=y +# CT_ISL_V_0_12_2 is not set CT_ISL_V_0_14_or_later=y CT_ISL_V_0_12_or_later=y CT_ISL_VERSION="0.14" -CT_CLOOG_V_0_18_4=y +# CT_CLOOG_V_0_18_4 is not set # CT_CLOOG_V_0_18_1 is not set # CT_CLOOG_V_0_18_0 is not set -CT_CLOOG_VERSION="0.18.4" -CT_CLOOG_0_18_4_or_later=y -CT_CLOOG_0_18_or_later=y CT_MPC_V_1_0_3=y # CT_MPC_V_1_0_2 is not set # CT_MPC_V_1_0_1 is not set diff --git a/src/ci/docker/dist-various-1/Dockerfile b/src/ci/docker/dist-various-1/Dockerfile index 4f8a3c0240e1a..a722a4183912e 100644 --- a/src/ci/docker/dist-various-1/Dockerfile +++ b/src/ci/docker/dist-various-1/Dockerfile @@ -21,12 +21,18 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ patch \ libssl-dev \ pkg-config \ - gcc-arm-none-eabi \ libnewlib-arm-none-eabi \ - qemu-system-arm + qemu-system-arm \ +# software-properties-common for the add-apt-repository command + software-properties-common WORKDIR /build +# Use the team-gcc-arm-embedded PPA for a newer version of Arm GCC +RUN add-apt-repository ppa:team-gcc-arm-embedded/ppa && \ + apt-get update && \ + apt-get install -y --no-install-recommends gcc-arm-embedded + COPY dist-various-1/build-rumprun.sh /build RUN ./build-rumprun.sh @@ -103,19 +109,28 @@ ENV TARGETS=$TARGETS,thumbv6m-none-eabi ENV TARGETS=$TARGETS,thumbv7m-none-eabi ENV TARGETS=$TARGETS,thumbv7em-none-eabi ENV TARGETS=$TARGETS,thumbv7em-none-eabihf +ENV TARGETS=$TARGETS,thumbv8m.base-none-eabi +ENV TARGETS=$TARGETS,thumbv8m.main-none-eabi +ENV TARGETS=$TARGETS,thumbv8m.main-none-eabihf ENV TARGETS=$TARGETS,riscv32imc-unknown-none-elf ENV TARGETS=$TARGETS,riscv32imac-unknown-none-elf +ENV TARGETS=$TARGETS,riscv64imac-unknown-none-elf +ENV TARGETS=$TARGETS,riscv64gc-unknown-none-elf ENV TARGETS=$TARGETS,armebv7r-none-eabi ENV TARGETS=$TARGETS,armebv7r-none-eabihf ENV TARGETS=$TARGETS,armv7r-none-eabi ENV TARGETS=$TARGETS,armv7r-none-eabihf +ENV TARGETS=$TARGETS,thumbv7neon-unknown-linux-gnueabihf ENV CC_mipsel_unknown_linux_musl=mipsel-openwrt-linux-gcc \ CC_mips_unknown_linux_musl=mips-openwrt-linux-gcc \ CC_sparc64_unknown_linux_gnu=sparc64-linux-gnu-gcc \ CC_x86_64_unknown_redox=x86_64-unknown-redox-gcc \ - CC_armebv7r_none_eabi=arm-none-eabi-gcc - + CC_armebv7r_none_eabi=arm-none-eabi-gcc \ + CC_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-gcc \ + AR_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-ar \ + CXX_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-g++ + ENV RUST_CONFIGURE_ARGS \ --musl-root-armv5te=/musl-armv5te \ --musl-root-arm=/musl-arm \ diff --git a/src/ci/docker/dist-various-1/build-rumprun.sh b/src/ci/docker/dist-various-1/build-rumprun.sh index ad38cf872ad07..9c7aaef4f436f 100755 --- a/src/ci/docker/dist-various-1/build-rumprun.sh +++ b/src/ci/docker/dist-various-1/build-rumprun.sh @@ -1,14 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex hide_output() { diff --git a/src/ci/docker/dist-various-1/install-mips-musl.sh b/src/ci/docker/dist-various-1/install-mips-musl.sh index eeb4aacbbb74c..8d05a046959d8 100755 --- a/src/ci/docker/dist-various-1/install-mips-musl.sh +++ b/src/ci/docker/dist-various-1/install-mips-musl.sh @@ -1,13 +1,3 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex mkdir /usr/local/mips-linux-musl diff --git a/src/ci/docker/dist-various-1/install-mipsel-musl.sh b/src/ci/docker/dist-various-1/install-mipsel-musl.sh index 74b6a10e77a67..2c414744bf47b 100755 --- a/src/ci/docker/dist-various-1/install-mipsel-musl.sh +++ b/src/ci/docker/dist-various-1/install-mipsel-musl.sh @@ -1,13 +1,3 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex mkdir /usr/local/mipsel-linux-musl diff --git a/src/ci/docker/dist-various-1/install-x86_64-redox.sh b/src/ci/docker/dist-various-1/install-x86_64-redox.sh index 9bfb57f5741c5..339042bb6729a 100755 --- a/src/ci/docker/dist-various-1/install-x86_64-redox.sh +++ b/src/ci/docker/dist-various-1/install-x86_64-redox.sh @@ -1,23 +1,7 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - # ignore-tidy-linelength set -ex -apt-get update -apt-get install -y --no-install-recommends software-properties-common apt-transport-https - -apt-key adv --batch --yes --keyserver keyserver.ubuntu.com --recv-keys AA12E97F0881517F -add-apt-repository -y 'deb https://static.redox-os.org/toolchain/apt /' - -apt-get update -apt-get install -y x86-64-unknown-redox-gcc +curl https://static.redox-os.org/toolchain/x86_64-unknown-redox/relibc-install.tar.gz | \ +tar --extract --gzip --directory /usr/local diff --git a/src/ci/docker/dist-various-2/Dockerfile b/src/ci/docker/dist-various-2/Dockerfile index 944c2a51b8d1f..c0f3326524d5c 100644 --- a/src/ci/docker/dist-various-2/Dockerfile +++ b/src/ci/docker/dist-various-2/Dockerfile @@ -29,6 +29,13 @@ RUN /tmp/build-fuchsia-toolchain.sh COPY dist-various-2/build-solaris-toolchain.sh /tmp/ RUN /tmp/build-solaris-toolchain.sh x86_64 amd64 solaris-i386 RUN /tmp/build-solaris-toolchain.sh sparcv9 sparcv9 solaris-sparc +COPY dist-various-2/build-x86_64-fortanix-unknown-sgx-toolchain.sh /tmp/ +# We pass the commit id of the port of LLVM's libunwind to the build script. +# Any update to the commit id here, should cause the container image to be re-built from this point on. +RUN /tmp/build-x86_64-fortanix-unknown-sgx-toolchain.sh "53b586346f2c7870e20b170decdc30729d97c42b" + +COPY dist-various-2/build-wasi-toolchain.sh /tmp/ +RUN /tmp/build-wasi-toolchain.sh COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh @@ -62,9 +69,15 @@ ENV TARGETS=x86_64-fuchsia ENV TARGETS=$TARGETS,aarch64-fuchsia ENV TARGETS=$TARGETS,sparcv9-sun-solaris ENV TARGETS=$TARGETS,wasm32-unknown-unknown +ENV TARGETS=$TARGETS,wasm32-unknown-wasi ENV TARGETS=$TARGETS,x86_64-sun-solaris ENV TARGETS=$TARGETS,x86_64-unknown-linux-gnux32 ENV TARGETS=$TARGETS,x86_64-unknown-cloudabi +ENV TARGETS=$TARGETS,x86_64-fortanix-unknown-sgx +ENV TARGETS=$TARGETS,nvptx64-nvidia-cuda + +ENV X86_FORTANIX_SGX_LIBS="/x86_64-fortanix-unknown-sgx/lib/" -ENV RUST_CONFIGURE_ARGS --enable-extended --enable-lld --disable-docs +ENV RUST_CONFIGURE_ARGS --enable-extended --enable-lld --disable-docs \ + --set target.wasm32-unknown-wasi.wasi-root=/wasm32-unknown-wasi ENV SCRIPT python2.7 ../x.py dist --target $TARGETS diff --git a/src/ci/docker/dist-various-2/build-cloudabi-toolchain.sh b/src/ci/docker/dist-various-2/build-cloudabi-toolchain.sh index 8c04d849e8d0a..3354a796c357e 100755 --- a/src/ci/docker/dist-various-2/build-cloudabi-toolchain.sh +++ b/src/ci/docker/dist-various-2/build-cloudabi-toolchain.sh @@ -1,13 +1,4 @@ #!/bin/bash -# Copyright 2018 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -eux @@ -41,9 +32,8 @@ ln -s ../lib/llvm-5.0/bin/lld /usr/bin/${target}-ld ln -s ../../${target} /usr/lib/llvm-5.0/${target} # Install the C++ runtime libraries from CloudABI Ports. -echo deb https://nuxi.nl/distfiles/cloudabi-ports/debian/ cloudabi cloudabi > \ - /etc/apt/sources.list.d/cloudabi.list -curl 'https://pgp.mit.edu/pks/lookup?op=get&search=0x0DA51B8531344B15' | \ - apt-key add - +apt-key adv --batch --yes --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 0DA51B8531344B15 +add-apt-repository -y 'deb https://nuxi.nl/distfiles/cloudabi-ports/debian/ cloudabi cloudabi' + apt-get update -apt-get install -y $(echo ${target} | sed -e s/_/-/g)-cxx-runtime +apt-get install -y "${target//_/-}-cxx-runtime" diff --git a/src/ci/docker/dist-various-2/build-fuchsia-toolchain.sh b/src/ci/docker/dist-various-2/build-fuchsia-toolchain.sh index ec19f7c4f45d9..ef486075ff9d8 100755 --- a/src/ci/docker/dist-various-2/build-fuchsia-toolchain.sh +++ b/src/ci/docker/dist-various-2/build-fuchsia-toolchain.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. # ignore-tidy-linelength diff --git a/src/ci/docker/dist-various-2/build-solaris-toolchain.sh b/src/ci/docker/dist-various-2/build-solaris-toolchain.sh index c04c8b7194c71..4b3f284450eaa 100755 --- a/src/ci/docker/dist-various-2/build-solaris-toolchain.sh +++ b/src/ci/docker/dist-various-2/build-solaris-toolchain.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex source shared.sh diff --git a/src/ci/docker/dist-various-2/build-wasi-toolchain.sh b/src/ci/docker/dist-various-2/build-wasi-toolchain.sh new file mode 100755 index 0000000000000..965286e5bcf64 --- /dev/null +++ b/src/ci/docker/dist-various-2/build-wasi-toolchain.sh @@ -0,0 +1,20 @@ +#!/bin/sh +# +# ignore-tidy-linelength + +set -ex + +# Originally from https://releases.llvm.org/8.0.0/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-14.04.tar.xz +curl https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/clang%2Bllvm-8.0.0-x86_64-linux-gnu-ubuntu-14.04.tar.xz | \ + tar xJf - +export PATH=`pwd`/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-14.04/bin:$PATH + +git clone https://github.com/CraneStation/wasi-sysroot + +cd wasi-sysroot +git reset --hard e5f14be38362f1ab83302895a6e74b2ffd0e2302 +make -j$(nproc) INSTALL_DIR=/wasm32-unknown-wasi install + +cd .. +rm -rf reference-sysroot-wasi +rm -rf clang+llvm* diff --git a/src/ci/docker/dist-various-2/build-x86_64-fortanix-unknown-sgx-toolchain.sh b/src/ci/docker/dist-various-2/build-x86_64-fortanix-unknown-sgx-toolchain.sh new file mode 100755 index 0000000000000..725ec341b9497 --- /dev/null +++ b/src/ci/docker/dist-various-2/build-x86_64-fortanix-unknown-sgx-toolchain.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +set -eu +source shared.sh + +if [ -z "$1" ]; then + echo "Usage: ${0} " + exit -1 +fi + +target="x86_64-fortanix-unknown-sgx" +url="https://github.com/fortanix/llvm-project/archive/${1}.tar.gz" +repo_name="llvm-project" + +install_prereq() { + apt-get update + apt-get install -y --no-install-recommends \ + build-essential \ + ca-certificates \ + cmake \ + git +} + +build_unwind() { + set -x + dir_name="${target}_temp" + rm -rf ${dir_name} + mkdir -p ${dir_name} + pushd ${dir_name} + + # Clone Fortanix's fork of llvm-project which has a port of libunwind + fetch_github_commit_archive "$repo_name" "$url" + cd "${repo_name}/libunwind" + + # Build libunwind + mkdir -p build + cd build + cmake -DCMAKE_BUILD_TYPE="RELEASE" -DRUST_SGX=1 -G "Unix Makefiles" \ + -DLLVM_ENABLE_WARNINGS=1 -DLIBUNWIND_ENABLE_WERROR=1 -DLIBUNWIND_ENABLE_PEDANTIC=0 \ + -DLLVM_PATH=../../llvm/ ../ + make unwind_static + install -D "lib/libunwind.a" "/${target}/lib/libunwind.a" + + popd + rm -rf ${dir_name} + + { set +x; } 2>/dev/null +} + +set -x +hide_output install_prereq +build_unwind diff --git a/src/ci/docker/dist-various-2/shared.sh b/src/ci/docker/dist-various-2/shared.sh index e26c6eb664578..7abace65b9c03 100644 --- a/src/ci/docker/dist-various-2/shared.sh +++ b/src/ci/docker/dist-various-2/shared.sh @@ -1,15 +1,5 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - hide_output() { - set +x + { set +x; } 2>/dev/null on_err=" echo ERROR: An error was encountered with the build. cat /tmp/build.log @@ -23,3 +13,34 @@ exit 1 kill $PING_LOOP_PID set -x } + +# Copied from ../../shared.sh +function retry { + echo "Attempting with retry:" "$@" + local n=1 + local max=5 + while true; do + "$@" && break || { + if [[ $n -lt $max ]]; then + sleep $n # don't retry immediately + ((n++)) + echo "Command failed. Attempt $n/$max:" + else + echo "The command has failed after $n attempts." + return 1 + fi + } + done +} + +# Copied from ../../init_repo.sh +function fetch_github_commit_archive { + local module=$1 + local cached="download-${module//\//-}.tar.gz" + retry sh -c "rm -f $cached && \ + curl -f -sSL -o $cached $2" + mkdir $module + touch "$module/.git" + tar -C $module --strip-components=1 -xf $cached + rm $cached +} diff --git a/src/ci/docker/dist-x86_64-linux/build-binutils.sh b/src/ci/docker/dist-x86_64-linux/build-binutils.sh index f4bdbd80d0edb..ed0b5c85f7ca9 100755 --- a/src/ci/docker/dist-x86_64-linux/build-binutils.sh +++ b/src/ci/docker/dist-x86_64-linux/build-binutils.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex diff --git a/src/ci/docker/dist-x86_64-linux/build-clang.sh b/src/ci/docker/dist-x86_64-linux/build-clang.sh index 2762f0bf7ec74..ac681b7168622 100755 --- a/src/ci/docker/dist-x86_64-linux/build-clang.sh +++ b/src/ci/docker/dist-x86_64-linux/build-clang.sh @@ -1,43 +1,19 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex source shared.sh -LLVM=7.0.0 - -mkdir clang -cd clang - -curl https://releases.llvm.org/$LLVM/llvm-$LLVM.src.tar.xz | \ - xz -d | \ - tar xf - - -cd llvm-$LLVM.src - -mkdir -p tools/clang - -curl https://releases.llvm.org/$LLVM/cfe-$LLVM.src.tar.xz | \ - xz -d | \ - tar xf - -C tools/clang --strip-components=1 +LLVM=llvmorg-8.0.0-rc2 -mkdir -p tools/lld +mkdir llvm-project +cd llvm-project -curl https://releases.llvm.org/$LLVM/lld-$LLVM.src.tar.xz | \ - xz -d | \ - tar xf - -C tools/lld --strip-components=1 +curl -L https://github.com/llvm/llvm-project/archive/$LLVM.tar.gz | \ + tar xzf - --strip-components=1 -mkdir ../clang-build -cd ../clang-build +mkdir clang-build +cd clang-build # For whatever reason the default set of include paths for clang is different # than that of gcc. As a result we need to manually include our sysroot's @@ -51,20 +27,21 @@ cd ../clang-build # # [1]: https://sourceware.org/ml/crossgcc/2008-11/msg00028.html INC="/rustroot/include" -INC="$INC:/rustroot/lib/gcc/x86_64-unknown-linux-gnu/4.8.5/include-fixed" +INC="$INC:/rustroot/lib/gcc/x86_64-unknown-linux-gnu/5.5.0/include-fixed" INC="$INC:/usr/include" hide_output \ - cmake ../llvm-$LLVM.src \ + cmake ../llvm \ -DCMAKE_C_COMPILER=/rustroot/bin/gcc \ -DCMAKE_CXX_COMPILER=/rustroot/bin/g++ \ -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_INSTALL_PREFIX=/rustroot \ -DLLVM_TARGETS_TO_BUILD=X86 \ + -DLLVM_ENABLE_PROJECTS="clang;lld" \ -DC_INCLUDE_DIRS="$INC" hide_output make -j10 hide_output make install cd ../.. -rm -rf clang +rm -rf llvm-project diff --git a/src/ci/docker/dist-x86_64-linux/build-cmake.sh b/src/ci/docker/dist-x86_64-linux/build-cmake.sh index 9a3763d421ad2..84522a7b87a4b 100755 --- a/src/ci/docker/dist-x86_64-linux/build-cmake.sh +++ b/src/ci/docker/dist-x86_64-linux/build-cmake.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex source shared.sh diff --git a/src/ci/docker/dist-x86_64-linux/build-curl.sh b/src/ci/docker/dist-x86_64-linux/build-curl.sh index edf3175b81c43..fb8b63d7920b1 100755 --- a/src/ci/docker/dist-x86_64-linux/build-curl.sh +++ b/src/ci/docker/dist-x86_64-linux/build-curl.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex source shared.sh diff --git a/src/ci/docker/dist-x86_64-linux/build-gcc.sh b/src/ci/docker/dist-x86_64-linux/build-gcc.sh index 62ea2506f4ef8..7f6e94d326ddf 100755 --- a/src/ci/docker/dist-x86_64-linux/build-gcc.sh +++ b/src/ci/docker/dist-x86_64-linux/build-gcc.sh @@ -1,21 +1,11 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex source shared.sh -GCC=4.8.5 +GCC=5.5.0 -curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.bz2 | tar xjf - +curl https://ftp.gnu.org/gnu/gcc/gcc-$GCC/gcc-$GCC.tar.xz | xzcat | tar xf - cd gcc-$GCC # FIXME(#49246): Remove the `sed` below. diff --git a/src/ci/docker/dist-x86_64-linux/build-git.sh b/src/ci/docker/dist-x86_64-linux/build-git.sh index aa31f50ba0343..38fea2a8094b9 100755 --- a/src/ci/docker/dist-x86_64-linux/build-git.sh +++ b/src/ci/docker/dist-x86_64-linux/build-git.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex source shared.sh diff --git a/src/ci/docker/dist-x86_64-linux/build-headers.sh b/src/ci/docker/dist-x86_64-linux/build-headers.sh index 2f15114d6f980..b623e53583b5e 100755 --- a/src/ci/docker/dist-x86_64-linux/build-headers.sh +++ b/src/ci/docker/dist-x86_64-linux/build-headers.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex source shared.sh diff --git a/src/ci/docker/dist-x86_64-linux/build-openssl.sh b/src/ci/docker/dist-x86_64-linux/build-openssl.sh index e7226ace020bd..7e391e21d13eb 100755 --- a/src/ci/docker/dist-x86_64-linux/build-openssl.sh +++ b/src/ci/docker/dist-x86_64-linux/build-openssl.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex source shared.sh diff --git a/src/ci/docker/dist-x86_64-linux/build-perl.sh b/src/ci/docker/dist-x86_64-linux/build-perl.sh index 4715fb553488f..a678d353d52f5 100755 --- a/src/ci/docker/dist-x86_64-linux/build-perl.sh +++ b/src/ci/docker/dist-x86_64-linux/build-perl.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2018 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex source shared.sh @@ -20,7 +11,8 @@ cd perl-5.28.0 # Gotta do some hackery to tell python about our custom OpenSSL build, but other # than that fairly normal. CC=gcc \ -CFLAGS='-I /rustroot/include' LDFLAGS='-L /rustroot/lib -L /rustroot/lib64' \ +CFLAGS='-I /rustroot/include -fgnu89-inline' \ +LDFLAGS='-L /rustroot/lib -L /rustroot/lib64' \ hide_output ./configure.gnu hide_output make -j10 hide_output make install diff --git a/src/ci/docker/dist-x86_64-linux/build-python.sh b/src/ci/docker/dist-x86_64-linux/build-python.sh index c6b8cdde4b9af..c172b9781120d 100755 --- a/src/ci/docker/dist-x86_64-linux/build-python.sh +++ b/src/ci/docker/dist-x86_64-linux/build-python.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex source shared.sh diff --git a/src/ci/docker/dist-x86_64-linux/shared.sh b/src/ci/docker/dist-x86_64-linux/shared.sh index 97e6d2908cf8a..b873569278914 100644 --- a/src/ci/docker/dist-x86_64-linux/shared.sh +++ b/src/ci/docker/dist-x86_64-linux/shared.sh @@ -1,13 +1,3 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - hide_output() { set +x on_err=" diff --git a/src/ci/docker/dist-x86_64-musl/Dockerfile b/src/ci/docker/dist-x86_64-musl/Dockerfile index 06f8a2fbba836..21a9023a4586a 100644 --- a/src/ci/docker/dist-x86_64-musl/Dockerfile +++ b/src/ci/docker/dist-x86_64-musl/Dockerfile @@ -4,6 +4,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ g++ \ make \ file \ + wget \ curl \ ca-certificates \ python2.7 \ @@ -18,19 +19,17 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ WORKDIR /build/ -COPY scripts/musl.sh /build/ +COPY scripts/musl-toolchain.sh /build/ # We need to mitigate rust-lang/rust#34978 when compiling musl itself as well -RUN CC=gcc \ - CFLAGS="-Wa,-mrelax-relocations=no" \ - CXX=g++ \ - CXXFLAGS="-Wa,-mrelax-relocations=no" \ - bash musl.sh x86_64 && rm -rf /build +RUN CFLAGS="-Wa,-mrelax-relocations=no -Wa,--compress-debug-sections=none -Wl,--compress-debug-sections=none" \ + CXXFLAGS="-Wa,-mrelax-relocations=no -Wa,--compress-debug-sections=none -Wl,--compress-debug-sections=none" \ + bash musl-toolchain.sh x86_64 && rm -rf build COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh ENV RUST_CONFIGURE_ARGS \ - --musl-root-x86_64=/musl-x86_64 \ + --musl-root-x86_64=/usr/local/x86_64-linux-musl \ --enable-extended \ --disable-docs @@ -39,8 +38,16 @@ ENV RUST_CONFIGURE_ARGS \ # way to produce "super compatible" binaries. # # See: https://github.com/rust-lang/rust/issues/34978 -ENV CFLAGS_x86_64_unknown_linux_musl=-Wa,-mrelax-relocations=no +# And: https://github.com/rust-lang/rust/issues/59411 +ENV CFLAGS_x86_64_unknown_linux_musl="-Wa,-mrelax-relocations=no -Wa,--compress-debug-sections=none \ + -Wl,--compress-debug-sections=none" -ENV SCRIPT \ - python2.7 ../x.py test --target x86_64-unknown-linux-musl && \ - python2.7 ../x.py dist --target x86_64-unknown-linux-musl +ENV HOSTS=x86_64-unknown-linux-musl \ + CC_x86_64_unknown_linux_musl=x86_64-linux-musl-gcc \ + CXX_x86_64_unknown_linux_musl=x86_64-linux-musl-g++ + +# Musl defaults to static libs but we need them to be dynamic for host toolchain. +# The toolchain will produce static libs by default. +ENV RUSTFLAGS="-C target-feature=-crt-static" + +ENV SCRIPT python2.7 ../x.py dist --host $HOSTS --target $HOSTS diff --git a/src/ci/docker/dist-x86_64-netbsd/Dockerfile b/src/ci/docker/dist-x86_64-netbsd/Dockerfile index a17a7ebc03dd1..44b1aaa24b19d 100644 --- a/src/ci/docker/dist-x86_64-netbsd/Dockerfile +++ b/src/ci/docker/dist-x86_64-netbsd/Dockerfile @@ -3,23 +3,8 @@ FROM ubuntu:16.04 COPY scripts/cross-apt-packages.sh /scripts/ RUN sh /scripts/cross-apt-packages.sh -# Ubuntu 16.04 (this container) ships with make 4, but something in the -# toolchains we build below chokes on that, so go back to make 3 -COPY scripts/make3.sh /scripts/ -RUN sh /scripts/make3.sh - -COPY scripts/crosstool-ng.sh /scripts/ -RUN sh /scripts/crosstool-ng.sh - -COPY scripts/rustbuild-setup.sh /scripts/ -RUN sh /scripts/rustbuild-setup.sh -USER rustbuild -WORKDIR /tmp - COPY dist-x86_64-netbsd/build-netbsd-toolchain.sh /tmp/ -RUN ./build-netbsd-toolchain.sh - -USER root +RUN /tmp/build-netbsd-toolchain.sh COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh diff --git a/src/ci/docker/dist-x86_64-netbsd/build-netbsd-toolchain.sh b/src/ci/docker/dist-x86_64-netbsd/build-netbsd-toolchain.sh index e730dd86087fb..b5377c64b1f54 100755 --- a/src/ci/docker/dist-x86_64-netbsd/build-netbsd-toolchain.sh +++ b/src/ci/docker/dist-x86_64-netbsd/build-netbsd-toolchain.sh @@ -1,14 +1,4 @@ #!/usr/bin/env bash -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - # ignore-tidy-linelength set -ex @@ -38,15 +28,15 @@ mkdir -p /x-tools/x86_64-unknown-netbsd/sysroot URL=https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror # Originally from ftp://ftp.netbsd.org/pub/NetBSD/NetBSD-$BSD/source/sets/*.tgz -curl $URL/2017-03-17-netbsd-src.tgz | tar xzf - -curl $URL/2017-03-17-netbsd-gnusrc.tgz | tar xzf - -curl $URL/2017-03-17-netbsd-sharesrc.tgz | tar xzf - -curl $URL/2017-03-17-netbsd-syssrc.tgz | tar xzf - +curl $URL/2018-03-01-netbsd-src.tgz | tar xzf - +curl $URL/2018-03-01-netbsd-gnusrc.tgz | tar xzf - +curl $URL/2018-03-01-netbsd-sharesrc.tgz | tar xzf - +curl $URL/2018-03-01-netbsd-syssrc.tgz | tar xzf - # Originally from ftp://ftp.netbsd.org/pub/NetBSD/NetBSD-$BSD/amd64/binary/sets/*.tgz -curl $URL/2017-03-17-netbsd-base.tgz | \ +curl $URL/2018-03-01-netbsd-base.tgz | \ tar xzf - -C /x-tools/x86_64-unknown-netbsd/sysroot ./usr/include ./usr/lib ./lib -curl $URL/2017-03-17-netbsd-comp.tgz | \ +curl $URL/2018-03-01-netbsd-comp.tgz | \ tar xzf - -C /x-tools/x86_64-unknown-netbsd/sysroot ./usr/include ./usr/lib cd usr/src diff --git a/src/ci/docker/i686-gnu/Dockerfile b/src/ci/docker/i686-gnu/Dockerfile index daa24e0e8186d..17441ddb4546b 100644 --- a/src/ci/docker/i686-gnu/Dockerfile +++ b/src/ci/docker/i686-gnu/Dockerfile @@ -18,4 +18,10 @@ COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh ENV RUST_CONFIGURE_ARGS --build=i686-unknown-linux-gnu -ENV SCRIPT python2.7 ../x.py test +# Exclude some tests that are unlikely to be platform specific, to speed up +# this slow job. +ENV SCRIPT python2.7 ../x.py test \ + --exclude src/bootstrap \ + --exclude src/test/rustdoc-js \ + --exclude src/tools/error_index_generator \ + --exclude src/tools/linkchecker diff --git a/src/ci/docker/mingw-check/Dockerfile b/src/ci/docker/mingw-check/Dockerfile index 10aedf6a60e10..24e2dea4ca773 100644 --- a/src/ci/docker/mingw-check/Dockerfile +++ b/src/ci/docker/mingw-check/Dockerfile @@ -21,4 +21,5 @@ RUN sh /scripts/sccache.sh ENV RUN_CHECK_WITH_PARALLEL_QUERIES 1 ENV SCRIPT python2.7 ../x.py check --target=i686-pc-windows-gnu --host=i686-pc-windows-gnu && \ - python2.7 ../x.py build --stage 0 src/tools/build-manifest + python2.7 ../x.py build --stage 0 src/tools/build-manifest && \ + python2.7 ../x.py test --stage 0 src/tools/compiletest diff --git a/src/ci/docker/run.sh b/src/ci/docker/run.sh index 3a7714d32fa17..98a765e1cec95 100755 --- a/src/ci/docker/run.sh +++ b/src/ci/docker/run.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -e @@ -21,6 +12,9 @@ ci_dir="`dirname $docker_dir`" src_dir="`dirname $ci_dir`" root_dir="`dirname $src_dir`" +objdir=$root_dir/obj +dist=$objdir/build/dist + source "$ci_dir/shared.sh" travis_fold start build_docker @@ -28,17 +22,30 @@ travis_time_start if [ -f "$docker_dir/$image/Dockerfile" ]; then if [ "$CI" != "" ]; then - cksum=$(find $docker_dir/$image $docker_dir/scripts -type f | \ - sort | \ - xargs cat | \ - sha512sum | \ + hash_key=/tmp/.docker-hash-key.txt + rm -f "${hash_key}" + echo $image >> $hash_key + + cat "$docker_dir/$image/Dockerfile" >> $hash_key + # Look for all source files involves in the COPY command + copied_files=/tmp/.docker-copied-files.txt + rm -f "$copied_files" + for i in $(sed -n -e 's/^COPY \(.*\) .*$/\1/p' "$docker_dir/$image/Dockerfile"); do + # List the file names + find "$docker_dir/$i" -type f >> $copied_files + done + # Sort the file names and cat the content into the hash key + sort $copied_files | xargs cat >> $hash_key + + docker --version >> $hash_key + cksum=$(sha512sum $hash_key | \ awk '{print $1}') s3url="s3://$SCCACHE_BUCKET/docker/$cksum" url="https://s3-us-west-1.amazonaws.com/$SCCACHE_BUCKET/docker/$cksum" echo "Attempting to download $s3url" rm -f /tmp/rustci_docker_cache set +e - retry curl -f -L -C - -o /tmp/rustci_docker_cache "$url" + retry curl -y 30 -Y 10 --connect-timeout 30 -f -L -C - -o /tmp/rustci_docker_cache "$url" loaded_images=$(docker load -i /tmp/rustci_docker_cache | sed 's/.* sha/sha/') set -e echo "Downloaded containers:\n$loaded_images" @@ -73,6 +80,11 @@ if [ -f "$docker_dir/$image/Dockerfile" ]; then else echo "Looks like docker image is the same as before, not uploading" fi + # Record the container image for reuse, e.g. by rustup.rs builds + info="$dist/image-$image.txt" + mkdir -p "$dist" + echo "$url" >"$info" + echo "$digest" >>"$info" fi elif [ -f "$docker_dir/disabled/$image/Dockerfile" ]; then if [ -n "$TRAVIS_OS_NAME" ]; then @@ -95,8 +107,6 @@ fi travis_fold end build_docker travis_time_finish -objdir=$root_dir/obj - mkdir -p $HOME/.cargo mkdir -p $objdir/tmp mkdir -p $objdir/cores diff --git a/src/ci/docker/scripts/android-base-apt-get.sh b/src/ci/docker/scripts/android-base-apt-get.sh index 7ae3bf39a3832..738410c58fcfa 100644 --- a/src/ci/docker/scripts/android-base-apt-get.sh +++ b/src/ci/docker/scripts/android-base-apt-get.sh @@ -1,13 +1,3 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex apt-get update diff --git a/src/ci/docker/scripts/android-ndk.sh b/src/ci/docker/scripts/android-ndk.sh index ec030496d393a..0db30e420e33a 100644 --- a/src/ci/docker/scripts/android-ndk.sh +++ b/src/ci/docker/scripts/android-ndk.sh @@ -1,13 +1,3 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex URL=https://dl.google.com/android/repository diff --git a/src/ci/docker/scripts/android-sdk.sh b/src/ci/docker/scripts/android-sdk.sh index 99c5776c2e849..e78e3795c046a 100644 --- a/src/ci/docker/scripts/android-sdk.sh +++ b/src/ci/docker/scripts/android-sdk.sh @@ -1,13 +1,3 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex export ANDROID_HOME=/android/sdk @@ -30,11 +20,19 @@ download_sysimage() { # The output from sdkmanager is so noisy that it will occupy all of the 4 MB # log extremely quickly. Thus we must silence all output. yes | sdkmanager --licenses > /dev/null - sdkmanager platform-tools emulator \ + yes | sdkmanager platform-tools \ "platforms;android-$api" \ "system-images;android-$api;default;$abi" > /dev/null } +download_emulator() { + # Download a pinned version of the emulator since upgrades can cause issues + curl -fo emulator.zip "https://dl.google.com/android/repository/emulator-linux-$1.zip" + rm -rf "${ANDROID_HOME}/emulator" + unzip -q emulator.zip -d "${ANDROID_HOME}" + rm -f emulator.zip +} + create_avd() { abi=$1 api=$2 @@ -50,11 +48,12 @@ download_and_create_avd() { download_sdk $1 download_sysimage $2 $3 create_avd $2 $3 + download_emulator $4 } # Usage: # -# setup_android_sdk 4333796 armeabi-v7a 18 +# download_and_create_avd 4333796 armeabi-v7a 18 5264690 # # 4333796 => # SDK tool version. @@ -63,3 +62,6 @@ download_and_create_avd() { # System image ABI # 18 => # Android API Level (18 = Android 4.3 = Jelly Bean MR2) +# 5264690 => +# Android Emulator version. +# Copy from the "build_id" in the `/android/sdk/emulator/emulator -version` output diff --git a/src/ci/docker/scripts/android-start-emulator.sh b/src/ci/docker/scripts/android-start-emulator.sh index cd3369d5eaddc..09f0d13759c79 100755 --- a/src/ci/docker/scripts/android-start-emulator.sh +++ b/src/ci/docker/scripts/android-start-emulator.sh @@ -1,13 +1,4 @@ #!/bin/sh -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -ex diff --git a/src/ci/docker/scripts/cross-apt-packages.sh b/src/ci/docker/scripts/cross-apt-packages.sh index f6c9cc960c56d..51945fd72adc7 100644 --- a/src/ci/docker/scripts/cross-apt-packages.sh +++ b/src/ci/docker/scripts/cross-apt-packages.sh @@ -1,13 +1,3 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - apt-get update && apt-get install -y --no-install-recommends \ automake \ bison \ diff --git a/src/ci/docker/scripts/crosstool-ng.sh b/src/ci/docker/scripts/crosstool-ng.sh index 4cd25ffa277a4..2773e687ebe6f 100644 --- a/src/ci/docker/scripts/crosstool-ng.sh +++ b/src/ci/docker/scripts/crosstool-ng.sh @@ -1,13 +1,3 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex url="https://github.com/crosstool-ng/crosstool-ng/archive/crosstool-ng-1.22.0.tar.gz" diff --git a/src/ci/docker/scripts/emscripten-wasm.sh b/src/ci/docker/scripts/emscripten-wasm.sh index 18499060a20f5..e4a93d7a10092 100644 --- a/src/ci/docker/scripts/emscripten-wasm.sh +++ b/src/ci/docker/scripts/emscripten-wasm.sh @@ -1,13 +1,3 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex hide_output() { diff --git a/src/ci/docker/scripts/emscripten.sh b/src/ci/docker/scripts/emscripten.sh index 1d7b33db9ed86..d3b1cded6f589 100644 --- a/src/ci/docker/scripts/emscripten.sh +++ b/src/ci/docker/scripts/emscripten.sh @@ -1,13 +1,3 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex hide_output() { diff --git a/src/ci/docker/scripts/freebsd-toolchain.sh b/src/ci/docker/scripts/freebsd-toolchain.sh index 15ed318f8ce85..b1ac490a87823 100755 --- a/src/ci/docker/scripts/freebsd-toolchain.sh +++ b/src/ci/docker/scripts/freebsd-toolchain.sh @@ -1,13 +1,5 @@ #!/bin/bash -# Copyright 2016-2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. +# ignore-tidy-linelength set -eux @@ -65,7 +57,9 @@ for lib in c++ c_nonshared compiler_rt execinfo gcc pthread rt ssp_nonshared; do files_to_extract=("${files_to_extract[@]}" "./usr/lib/lib${lib}.*") done -URL=https://download.freebsd.org/ftp/releases/${freebsd_arch}/${freebsd_version}-RELEASE/base.txz +# Originally downloaded from: +# https://download.freebsd.org/ftp/releases/${freebsd_arch}/${freebsd_version}-RELEASE/base.txz +URL=https://s3-us-west-1.amazonaws.com/rust-lang-ci2/rust-ci-mirror/2019-04-04-freebsd-${freebsd_arch}-${freebsd_version}-RELEASE-base.txz curl "$URL" | tar xJf - -C "$sysroot" --wildcards "${files_to_extract[@]}" # Fix up absolute symlinks from the system image. This can be removed diff --git a/src/ci/docker/scripts/make3.sh b/src/ci/docker/scripts/make3.sh index ec6e046c96452..47cb4158229d3 100644 --- a/src/ci/docker/scripts/make3.sh +++ b/src/ci/docker/scripts/make3.sh @@ -1,13 +1,3 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex curl -f https://ftp.gnu.org/gnu/make/make-3.81.tar.gz | tar xzf - diff --git a/src/ci/docker/scripts/musl-toolchain.sh b/src/ci/docker/scripts/musl-toolchain.sh new file mode 100644 index 0000000000000..3caf2852ede42 --- /dev/null +++ b/src/ci/docker/scripts/musl-toolchain.sh @@ -0,0 +1,74 @@ +# This script runs `musl-cross-make` to prepare C toolchain (Binutils, GCC, musl itself) +# and builds static libunwind that we distribute for static target. +# +# Versions of the toolchain components are configurable in `musl-cross-make/Makefile` and +# musl unlike GLIBC is forward compatible so upgrading it shouldn't break old distributions. +# Right now we have: Binutils 2.27, GCC 6.3.0, musl 1.1.18 +set -ex + +hide_output() { + set +x + on_err=" +echo ERROR: An error was encountered with the build. +cat /tmp/build.log +exit 1 +" + trap "$on_err" ERR + bash -c "while true; do sleep 30; echo \$(date) - building ...; done" & + PING_LOOP_PID=$! + $@ &> /tmp/build.log + trap - ERR + kill $PING_LOOP_PID + rm /tmp/build.log + set -x +} + +ARCH=$1 +TARGET=$ARCH-linux-musl + +OUTPUT=/usr/local +shift + +# Ancient binutils versions don't understand debug symbols produced by more recent tools. +# Apparently applying `-fPIC` everywhere allows them to link successfully. +export CFLAGS="-fPIC $CFLAGS" + +git clone https://github.com/richfelker/musl-cross-make -b v0.9.7 +cd musl-cross-make + +hide_output make -j$(nproc) TARGET=$TARGET +hide_output make install TARGET=$TARGET OUTPUT=$OUTPUT + +cd - + +# Install musl library to make binaries executable +ln -s $OUTPUT/$TARGET/lib/libc.so /lib/ld-musl-$ARCH.so.1 +echo $OUTPUT/$TARGET/lib >> /etc/ld-musl-$ARCH.path + + +export CC=$TARGET-gcc +export CXX=$TARGET-g++ + +LLVM=70 + +# may have been downloaded in a previous run +if [ ! -d libunwind-release_$LLVM ]; then + curl -L https://github.com/llvm-mirror/llvm/archive/release_$LLVM.tar.gz | tar xzf - + curl -L https://github.com/llvm-mirror/libunwind/archive/release_$LLVM.tar.gz | tar xzf - +fi + +# fixme(mati865): Replace it with https://github.com/rust-lang/rust/pull/59089 +mkdir libunwind-build +cd libunwind-build +cmake ../libunwind-release_$LLVM \ + -DLLVM_PATH=/build/llvm-release_$LLVM \ + -DLIBUNWIND_ENABLE_SHARED=0 \ + -DCMAKE_C_COMPILER=$CC \ + -DCMAKE_CXX_COMPILER=$CXX \ + -DCMAKE_C_FLAGS="$CFLAGS" \ + -DCMAKE_CXX_FLAGS="$CXXFLAGS" + +hide_output make -j$(nproc) +cp lib/libunwind.a $OUTPUT/$TARGET/lib +cd - && rm -rf libunwind-build + diff --git a/src/ci/docker/scripts/musl.sh b/src/ci/docker/scripts/musl.sh index 11d85471b7c08..116c16b2f35cd 100644 --- a/src/ci/docker/scripts/musl.sh +++ b/src/ci/docker/scripts/musl.sh @@ -1,13 +1,3 @@ -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex hide_output() { diff --git a/src/ci/docker/scripts/qemu-bare-bones-addentropy.c b/src/ci/docker/scripts/qemu-bare-bones-addentropy.c index 8975739e3c030..815b5b04f095d 100644 --- a/src/ci/docker/scripts/qemu-bare-bones-addentropy.c +++ b/src/ci/docker/scripts/qemu-bare-bones-addentropy.c @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #include #include #include diff --git a/src/ci/docker/scripts/rustbuild-setup.sh b/src/ci/docker/scripts/rustbuild-setup.sh index 96efccfdff386..94d7e600eacd2 100644 --- a/src/ci/docker/scripts/rustbuild-setup.sh +++ b/src/ci/docker/scripts/rustbuild-setup.sh @@ -1,13 +1,3 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -ex groupadd -r rustbuild && useradd -m -r -g rustbuild rustbuild diff --git a/src/ci/docker/scripts/sccache.sh b/src/ci/docker/scripts/sccache.sh index da52d08318115..e05246201dd0c 100644 --- a/src/ci/docker/scripts/sccache.sh +++ b/src/ci/docker/scripts/sccache.sh @@ -1,13 +1,3 @@ -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - # ignore-tidy-linelength set -ex diff --git a/src/ci/docker/test-various/Dockerfile b/src/ci/docker/test-various/Dockerfile new file mode 100644 index 0000000000000..611a24a69bd37 --- /dev/null +++ b/src/ci/docker/test-various/Dockerfile @@ -0,0 +1,64 @@ +FROM ubuntu:18.04 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + g++ \ + make \ + file \ + curl \ + ca-certificates \ + python \ + git \ + cmake \ + sudo \ + gdb \ + xz-utils \ + wget \ + patch + +# FIXME: build the `ptx-linker` instead. +RUN curl -sL https://github.com/denzp/rust-ptx-linker/releases/download/v0.9.0-alpha.2/rust-ptx-linker.linux64.tar.gz | \ + tar -xzvC /usr/bin + +RUN curl -sL https://nodejs.org/dist/v9.2.0/node-v9.2.0-linux-x64.tar.xz | \ + tar -xJ + +WORKDIR /build/ +COPY scripts/musl-toolchain.sh /build/ +RUN bash musl-toolchain.sh x86_64 && rm -rf build +WORKDIR / + +COPY scripts/sccache.sh /scripts/ +RUN sh /scripts/sccache.sh + +ENV RUST_CONFIGURE_ARGS \ + --musl-root-x86_64=/usr/local/x86_64-linux-musl \ + --set build.nodejs=/node-v9.2.0-linux-x64/bin/node \ + --set rust.lld + +# Some run-make tests have assertions about code size, and enabling debug +# assertions in libstd causes the binary to be much bigger than it would +# otherwise normally be. We already test libstd with debug assertions in lots of +# other contexts as well +ENV NO_DEBUG_ASSERTIONS=1 + +ENV WASM_TARGETS=wasm32-unknown-unknown +ENV WASM_SCRIPT python2.7 /checkout/x.py test --target $WASM_TARGETS \ + src/test/run-make \ + src/test/ui \ + src/test/run-pass \ + src/test/compile-fail \ + src/test/mir-opt \ + src/test/codegen-units \ + src/libcore + +ENV NVPTX_TARGETS=nvptx64-nvidia-cuda +ENV NVPTX_SCRIPT python2.7 /checkout/x.py test --target $NVPTX_TARGETS \ + src/test/run-make \ + src/test/assembly + +ENV MUSL_TARGETS=x86_64-unknown-linux-musl \ + CC_x86_64_unknown_linux_musl=x86_64-linux-musl-gcc \ + CXX_x86_64_unknown_linux_musl=x86_64-linux-musl-g++ +ENV MUSL_SCRIPT python2.7 /checkout/x.py test --target $MUSL_TARGETS + +ENV SCRIPT $WASM_SCRIPT && $NVPTX_SCRIPT && $MUSL_SCRIPT diff --git a/src/ci/docker/wasm32-unknown/Dockerfile b/src/ci/docker/wasm32-unknown/Dockerfile deleted file mode 100644 index 161f0c0062fa0..0000000000000 --- a/src/ci/docker/wasm32-unknown/Dockerfile +++ /dev/null @@ -1,41 +0,0 @@ -FROM ubuntu:18.04 - -RUN apt-get update && apt-get install -y --no-install-recommends \ - g++ \ - make \ - file \ - curl \ - ca-certificates \ - python \ - git \ - cmake \ - sudo \ - gdb \ - xz-utils - -RUN curl -sL https://nodejs.org/dist/v9.2.0/node-v9.2.0-linux-x64.tar.xz | \ - tar -xJ - -COPY scripts/sccache.sh /scripts/ -RUN sh /scripts/sccache.sh - -ENV TARGETS=wasm32-unknown-unknown - -ENV RUST_CONFIGURE_ARGS \ - --set build.nodejs=/node-v9.2.0-linux-x64/bin/node \ - --set rust.lld - -# Some run-make tests have assertions about code size, and enabling debug -# assertions in libstd causes the binary to be much bigger than it would -# otherwise normally be. We already test libstd with debug assertions in lots of -# other contexts as well -ENV NO_DEBUG_ASSERTIONS=1 - -ENV SCRIPT python2.7 /checkout/x.py test --target $TARGETS \ - src/test/run-make \ - src/test/ui \ - src/test/run-pass \ - src/test/compile-fail \ - src/test/mir-opt \ - src/test/codegen-units \ - src/libcore \ diff --git a/src/ci/docker/x86_64-gnu-debug/Dockerfile b/src/ci/docker/x86_64-gnu-debug/Dockerfile index bdde7ad7fe854..1c7eff68adc15 100644 --- a/src/ci/docker/x86_64-gnu-debug/Dockerfile +++ b/src/ci/docker/x86_64-gnu-debug/Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:16.04 +FROM ubuntu:18.10 RUN apt-get update && apt-get install -y --no-install-recommends \ g++ \ @@ -7,18 +7,37 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ curl \ ca-certificates \ python2.7 \ + python2.7-dev \ + libxml2-dev \ + libncurses-dev \ + libedit-dev \ + swig \ + doxygen \ git \ cmake \ sudo \ gdb \ - xz-utils + xz-utils \ + lld \ + clang COPY scripts/sccache.sh /scripts/ RUN sh /scripts/sccache.sh +ENV RUSTBUILD_FORCE_CLANG_BASED_TESTS 1 ENV RUN_CHECK_WITH_PARALLEL_QUERIES 1 + ENV RUST_CONFIGURE_ARGS \ --build=x86_64-unknown-linux-gnu \ --enable-debug \ - --enable-optimize -ENV SCRIPT python2.7 ../x.py build + --enable-lld \ + --enable-lldb \ + --enable-optimize \ + --set llvm.use-linker=lld \ + --set target.x86_64-unknown-linux-gnu.linker=clang \ + --set target.x86_64-unknown-linux-gnu.cc=clang \ + --set target.x86_64-unknown-linux-gnu.cxx=clang++ + +ENV SCRIPT \ + python2.7 ../x.py build && \ + python2.7 ../x.py test src/test/run-make-fulldeps --test-args clang diff --git a/src/ci/docker/x86_64-gnu-tools/checkregression.py b/src/ci/docker/x86_64-gnu-tools/checkregression.py index 208aab434ce1f..0cc0a6329e5bf 100755 --- a/src/ci/docker/x86_64-gnu-tools/checkregression.py +++ b/src/ci/docker/x86_64-gnu-tools/checkregression.py @@ -1,16 +1,6 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# Copyright 2018 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - import sys import json diff --git a/src/ci/docker/x86_64-gnu-tools/checktools.sh b/src/ci/docker/x86_64-gnu-tools/checktools.sh index d876cb7f37a41..af0198705a2fe 100755 --- a/src/ci/docker/x86_64-gnu-tools/checktools.sh +++ b/src/ci/docker/x86_64-gnu-tools/checktools.sh @@ -1,15 +1,5 @@ #!/bin/sh -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - set -eu X_PY="$1" @@ -33,6 +23,8 @@ python2.7 "$X_PY" test --no-fail-fast \ src/doc/nomicon \ src/doc/reference \ src/doc/rust-by-example \ + src/doc/embedded-book \ + src/doc/edition-guide \ src/tools/clippy \ src/tools/rls \ src/tools/rustfmt \ @@ -82,11 +74,13 @@ status_check() { check_dispatch $1 beta nomicon src/doc/nomicon check_dispatch $1 beta reference src/doc/reference check_dispatch $1 beta rust-by-example src/doc/rust-by-example + check_dispatch $1 beta edition-guide src/doc/edition-guide check_dispatch $1 beta rls src/tools/rls check_dispatch $1 beta rustfmt src/tools/rustfmt check_dispatch $1 beta clippy-driver src/tools/clippy # these tools are not required for beta to successfully branch check_dispatch $1 nightly miri src/tools/miri + check_dispatch $1 nightly embedded-book src/doc/embedded-book } # If this PR is intended to update one of these tools, do not let the build pass diff --git a/src/ci/docker/x86_64-gnu-tools/repo.sh b/src/ci/docker/x86_64-gnu-tools/repo.sh index 807e6fb7b642e..6364bc2aabf93 100644 --- a/src/ci/docker/x86_64-gnu-tools/repo.sh +++ b/src/ci/docker/x86_64-gnu-tools/repo.sh @@ -1,15 +1,5 @@ #!/bin/sh -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - # This file provides the function `commit_toolstate_change` for pushing a change # to the `rust-toolstate` repository. # diff --git a/src/ci/init_repo.sh b/src/ci/init_repo.sh index 8345ab3bc3376..3dfd338157617 100755 --- a/src/ci/init_repo.sh +++ b/src/ci/init_repo.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -o errexit set -o pipefail @@ -43,19 +34,19 @@ if grep -q RUST_RELEASE_CHANNEL=beta src/ci/run.sh; then git fetch origin --unshallow beta master fi -function fetch_submodule { +# Duplicated in docker/dist-various-2/shared.sh +function fetch_github_commit_archive { local module=$1 local cached="download-${module//\//-}.tar.gz" retry sh -c "rm -f $cached && \ - curl -sSL -o $cached $2" + curl -f -sSL -o $cached $2" mkdir $module touch "$module/.git" tar -C $module --strip-components=1 -xf $cached rm $cached } -included="src/llvm src/llvm-emscripten src/doc/book src/doc/rust-by-example" -included="$included src/tools/lld src/tools/clang src/tools/lldb" +included="src/llvm-project src/llvm-emscripten src/doc/book src/doc/rust-by-example" modules="$(git config --file .gitmodules --get-regexp '\.path$' | cut -d' ' -f2)" modules=($modules) use_git="" @@ -68,7 +59,7 @@ for i in ${!modules[@]}; do git rm $module url=${urls[$i]} url=${url/\.git/} - fetch_submodule $module "$url/archive/$commit.tar.gz" & + fetch_github_commit_archive $module "$url/archive/$commit.tar.gz" & continue else use_git="$use_git $module" diff --git a/src/ci/run.sh b/src/ci/run.sh index 8e0eb8fec4325..42d0d7db5964c 100755 --- a/src/ci/run.sh +++ b/src/ci/run.sh @@ -1,13 +1,4 @@ #!/usr/bin/env bash -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. set -e @@ -91,7 +82,7 @@ fi SCCACHE_IDLE_TIMEOUT=10800 sccache --start-server || true if [ "$RUN_CHECK_WITH_PARALLEL_QUERIES" != "" ]; then - $SRC/configure --enable-experimental-parallel-queries + $SRC/configure --enable-parallel-compiler CARGO_INCREMENTAL=0 python2.7 ../x.py check rm -f config.toml rm -rf build diff --git a/src/ci/shared.sh b/src/ci/shared.sh index bb6945f0fd6bb..3ba64ad412064 100644 --- a/src/ci/shared.sh +++ b/src/ci/shared.sh @@ -1,19 +1,11 @@ #!/bin/false -# Copyright 2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. # This file is intended to be sourced with `. shared.sh` or # `source shared.sh`, hence the invalid shebang and not being # marked as an executable file in git. # See http://unix.stackexchange.com/questions/82598 +# Duplicated in docker/dist-various-2/shared.sh function retry { echo "Attempting with retry:" "$@" local n=1 diff --git a/src/doc/book b/src/doc/book index 74d81d80052cb..b93ec30bbc7b1 160000 --- a/src/doc/book +++ b/src/doc/book @@ -1 +1 @@ -Subproject commit 74d81d80052cb88925f0e73b12fbd0b73ab7b5a0 +Subproject commit b93ec30bbc7b1b5c2f44223249ab359bed2ed5a6 diff --git a/src/doc/edition-guide b/src/doc/edition-guide index 419edb885ec1a..b56ddb1154845 160000 --- a/src/doc/edition-guide +++ b/src/doc/edition-guide @@ -1 +1 @@ -Subproject commit 419edb885ec1a98c0747b3907003d79e3e6b93a9 +Subproject commit b56ddb11548450a6df4edd1ed571b2bc304eb9e6 diff --git a/src/doc/embedded-book b/src/doc/embedded-book new file mode 160000 index 0000000000000..7989c723607ef --- /dev/null +++ b/src/doc/embedded-book @@ -0,0 +1 @@ +Subproject commit 7989c723607ef5b13b57208022259e6c771e11d0 diff --git a/src/doc/guide-error-handling.md b/src/doc/guide-error-handling.md index 54fa529f3aa8e..fd71d3e3c8e79 100644 --- a/src/doc/guide-error-handling.md +++ b/src/doc/guide-error-handling.md @@ -1,4 +1,4 @@ % Error Handling in Rust This content has moved into -[the Rust Programming Language book](book/error-handling.html). +[the Rust Programming Language book](book/ch09-00-error-handling.html). diff --git a/src/doc/guide-ownership.md b/src/doc/guide-ownership.md index 884f14726ca87..767dafc5baf92 100644 --- a/src/doc/guide-ownership.md +++ b/src/doc/guide-ownership.md @@ -1,4 +1,4 @@ % The (old) Rust Ownership Guide This content has moved into -[the Rust Programming Language book](book/ownership.html). +[the Rust Programming Language book](book/ch04-00-understanding-ownership.html). diff --git a/src/doc/guide-pointers.md b/src/doc/guide-pointers.md index dc80ec4399131..bafdb2fe0bbc3 100644 --- a/src/doc/guide-pointers.md +++ b/src/doc/guide-pointers.md @@ -2,6 +2,6 @@ This content has been removed, with no direct replacement. Rust only has two built-in pointer types now, -[references](book/references-and-borrowing.html) and [raw +[references](book/ch04-02-references-and-borrowing.html) and [raw pointers](book/raw-pointers.html). Older Rusts had many more pointer types, they’re gone now. diff --git a/src/doc/guide-testing.md b/src/doc/guide-testing.md index 67bcb0a5e546a..28d9fb48b73e7 100644 --- a/src/doc/guide-testing.md +++ b/src/doc/guide-testing.md @@ -1,4 +1,4 @@ % The (old) Rust Testing Guide This content has moved into -[the Rust Programming Language book](book/testing.html). +[the Rust Programming Language book](book/ch11-00-testing.html). diff --git a/src/doc/index.md b/src/doc/index.md index b79a349a453b6..0a2a80e8fd6e2 100644 --- a/src/doc/index.md +++ b/src/doc/index.md @@ -52,6 +52,12 @@ If reading multiple hundreds of pages about a language isn't your style, then a lot of words, RBE shows off a bunch of code, and keeps the talking to a minimum. It also includes exercises! +## Rustlings + +[Rustlings](https://github.com/rust-lang/rustlings) guides you through downloading and setting up the Rust toolchain, +and teaches you the basics of reading and writing Rust syntax. It's an +alternative to Rust by Example that works with your own environment. + # Use Rust Once you've gotten familiar with the language, these resources can help you @@ -71,6 +77,10 @@ accomplishing various tasks. +## The Edition Guide + +[The Edition Guide](edition-guide/index.html) describes the Rust editions. + ## The Rustc Book [The Rustc Book](rustc/index.html) describes the Rust compiler, `rustc`. @@ -107,3 +117,25 @@ Rust. It's also sometimes called "the 'nomicon." ## The Unstable Book [The Unstable Book](unstable-book/index.html) has documentation for unstable features. + +## The `rustc` Contribution Guide + +[The `rustc` Guide](https://rust-lang.github.io/rustc-guide/) documents how +the compiler works and how to contribute to it. This is useful if you want to build +or modify the Rust compiler from source (e.g. to target something non-standard). + +# Specialize Rust + +When using Rust in specific domain areas, consider using the following resources tailored to each domain. + +## Embedded Systems + +When developing for Bare Metal or Embedded Linux systems, you may find these resources maintained by the [Embedded Working Group] useful. + +[Embedded Working Group]: https://github.com/rust-embedded + +### The Embedded Rust Book + +[The Embedded Rust Book] is targeted at developers familiar with embedded development and familiar with Rust, but have not used Rust for embedded development. + +[The Embedded Rust Book]: embedded-book/index.html diff --git a/src/doc/man/rustc.1 b/src/doc/man/rustc.1 index 8f611063dbe5d..3788e3c864e82 100644 --- a/src/doc/man/rustc.1 +++ b/src/doc/man/rustc.1 @@ -261,12 +261,12 @@ full debug info with variable and type information. .RE .TP \fBopt\-level\fR=\fIVAL\fR -Optimize with possible levels 0\[en]3 +Optimize with possible levels 0\[en]3, s (optimize for size), or z (for minimal size) .SH ENVIRONMENT -Some of these affect the output of the compiler, while others affect programs -which link to the standard library. +Some of these affect only test harness programs (generated via rustc --test); +others affect all programs which link to the Rust standard library. .TP \fBRUST_TEST_THREADS\fR diff --git a/src/doc/nomicon b/src/doc/nomicon index b7eb4a087207a..c02e0e7754a76 160000 --- a/src/doc/nomicon +++ b/src/doc/nomicon @@ -1 +1 @@ -Subproject commit b7eb4a087207af2405c0669fa577f8545b894c66 +Subproject commit c02e0e7754a76886e55b976a3a4fac20100cd35d diff --git a/src/doc/redirect.inc b/src/doc/redirect.inc new file mode 100644 index 0000000000000..33e3860c2a434 --- /dev/null +++ b/src/doc/redirect.inc @@ -0,0 +1,2 @@ + + diff --git a/src/doc/reference b/src/doc/reference index 60077efda319c..98f90ff4de8e5 160000 --- a/src/doc/reference +++ b/src/doc/reference @@ -1 +1 @@ -Subproject commit 60077efda319c95a89fe39609803c5433567adbf +Subproject commit 98f90ff4de8e588f651f0fb493b5c7496551cd59 diff --git a/src/doc/robots.txt b/src/doc/robots.txt new file mode 100644 index 0000000000000..a54ec508c1bef --- /dev/null +++ b/src/doc/robots.txt @@ -0,0 +1,19 @@ +# NB: This file is not automatically deployed. After changes, it needs to be uploaded manually to doc.rust-lang.org +User-agent: * +Disallow: /0.3/ +Disallow: /0.4/ +Disallow: /0.5/ +Disallow: /0.6/ +Disallow: /0.7/ +Disallow: /0.8/ +Disallow: /0.9/ +Disallow: /0.10/ +Disallow: /0.11.0/ +Disallow: /0.12.0/ +Disallow: /1.0.0-alpha/ +Disallow: /1.0.0-alpha.2/ +Disallow: /1.0.0-beta/ +Disallow: /1.0.0-beta.2/ +Disallow: /1.0.0-beta.3/ +Disallow: /1.0.0-beta.4/ +Disallow: /1.0.0-beta.5/ diff --git a/src/doc/rust-by-example b/src/doc/rust-by-example index 2ce92beabb912..f68ef3d0f4959 160000 --- a/src/doc/rust-by-example +++ b/src/doc/rust-by-example @@ -1 +1 @@ -Subproject commit 2ce92beabb912d417a7314d6da83ac9b50dc2afb +Subproject commit f68ef3d0f4959f6a7d92a08d9994b117f0f4d32d diff --git a/src/doc/rust.css b/src/doc/rust.css index 5f216169efe2c..a92d4ff54db83 100644 --- a/src/doc/rust.css +++ b/src/doc/rust.css @@ -1,15 +1,3 @@ -/** - * Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT - * file at the top-level directory of this distribution and at - * http://rust-lang.org/COPYRIGHT. - * With elements taken from Bootstrap v3.0.2 (MIT licensed). - * - * Licensed under the Apache License, Version 2.0 or the MIT license - * , at your - * option. This file may not be copied, modified, or distributed - * except according to those terms. - */ @font-face { font-family: 'Fira Sans'; font-style: normal; @@ -26,19 +14,19 @@ font-family: 'Source Serif Pro'; font-style: normal; font-weight: 400; - src: local('Source Serif Pro'), url("SourceSerifPro-Regular.woff") format('woff'); + src: local('Source Serif Pro'), url("SourceSerifPro-Regular.ttf.woff") format('woff'); } @font-face { font-family: 'Source Serif Pro'; font-style: italic; font-weight: 400; - src: url("Heuristica-Italic.woff") format('woff'); + src: url("SourceSerifPro-It.ttf.woff") format('woff'); } @font-face { font-family: 'Source Serif Pro'; font-style: normal; font-weight: 700; - src: local('Source Serif Pro Bold'), url("SourceSerifPro-Bold.woff") format('woff'); + src: local('Source Serif Pro Bold'), url("SourceSerifPro-Bold.ttf.woff") format('woff'); } @font-face { font-family: 'Source Code Pro'; diff --git a/src/doc/rustc-guide b/src/doc/rustc-guide index 344c4e437ba4c..464cb5b166378 160000 --- a/src/doc/rustc-guide +++ b/src/doc/rustc-guide @@ -1 +1 @@ -Subproject commit 344c4e437ba4cfa5c14db643ec4d6b68dcd164c5 +Subproject commit 464cb5b166378dff64619081dd4c42533a1eb989 diff --git a/src/doc/rustc/src/SUMMARY.md b/src/doc/rustc/src/SUMMARY.md index e4c0939fd4636..34708d1847f6b 100644 --- a/src/doc/rustc/src/SUMMARY.md +++ b/src/doc/rustc/src/SUMMARY.md @@ -13,4 +13,5 @@ - [Targets](targets/index.md) - [Built-in Targets](targets/built-in.md) - [Custom Targets](targets/custom.md) -- [Contributing to `rustc`](contributing.md) \ No newline at end of file +- [Linker-plugin based LTO](linker-plugin-lto.md) +- [Contributing to `rustc`](contributing.md) diff --git a/src/doc/rustc/src/codegen-options/index.md b/src/doc/rustc/src/codegen-options/index.md index 94f21042c8fdd..a616409d9a400 100644 --- a/src/doc/rustc/src/codegen-options/index.md +++ b/src/doc/rustc/src/codegen-options/index.md @@ -187,7 +187,7 @@ This flag lets you control debug information: This flag lets you control the optimization level. -* `0`: no optimizations +* `0`: no optimizations, also turn on `cfg(debug_assertions)`. * `1`: basic optimizations * `2`: some optimizations * `3`: all optimizations diff --git a/src/doc/rustc/src/command-line-arguments.md b/src/doc/rustc/src/command-line-arguments.md index b60c55240140e..dfb40284ef6cf 100644 --- a/src/doc/rustc/src/command-line-arguments.md +++ b/src/doc/rustc/src/command-line-arguments.md @@ -42,11 +42,11 @@ This flag prints out various information about the compiler. ## `-g`: include debug information -A synonym for `-C debug-level=2`. +A synonym for `-C debuginfo=2`, for more see [here](codegen-options/index.html#debuginfo). ## `-O`: optimize your code -A synonym for `-C opt-level=2`. +A synonym for `-C opt-level=2`, for more see [here](codegen-options/index.html#opt-level). ## `-o`: filename of the output @@ -86,6 +86,13 @@ This flag will set which lints should be set to the [deny level](lints/levels.ht This flag will set which lints should be set to the [forbid level](lints/levels.html#forbid). +## `-Z`: set unstable options + +This flag will allow you to set unstable options of rustc. In order to set multiple options, +the -Z flag can be used multiple times. For example: `rustc -Z verbose -Z time`. +Specifying options with -Z is only available on nightly. To view all available options +run: `rustc -Z help`. + ## `--cap-lints`: set the most restrictive lint level This flag lets you 'cap' lints, for more, [see here](lints/levels.html#capping-lints). diff --git a/src/doc/rustc/src/contributing.md b/src/doc/rustc/src/contributing.md index 3a1cafe8a6153..25a5c97b0a120 100644 --- a/src/doc/rustc/src/contributing.md +++ b/src/doc/rustc/src/contributing.md @@ -1,6 +1,12 @@ # Contributing to rustc We'd love to have your help improving `rustc`! To that end, we've written [a -whole book](https://rust-lang.github.io/rustc-guide/) on its +whole book][rustc_guide] on its internals, how it works, and how to get started working on it. To learn more, you'll want to check that out. + +If you would like to contribute to _this_ book, you can find its source in the +rustc source at [src/doc/rustc][rustc_book]. + +[rustc_guide]: https://rust-lang.github.io/rustc-guide/ +[rustc_book]: https://github.com/rust-lang/rust/tree/master/src/doc/rustc diff --git a/src/doc/rustc/src/linker-plugin-lto.md b/src/doc/rustc/src/linker-plugin-lto.md new file mode 100644 index 0000000000000..73a2efcb33a75 --- /dev/null +++ b/src/doc/rustc/src/linker-plugin-lto.md @@ -0,0 +1,108 @@ +# Linker-plugin-LTO + +The `-C linker-plugin-lto` flag allows for deferring the LTO optimization +to the actual linking step, which in turn allows for performing +interprocedural optimizations across programming language boundaries if +all the object files being linked were created by LLVM based toolchains. +The prime example here would be linking Rust code together with +Clang-compiled C/C++ code. + +## Usage + +There are two main cases how linker plugin based LTO can be used: + + - compiling a Rust `staticlib` that is used as a C ABI dependency + - compiling a Rust binary where `rustc` invokes the linker + +In both cases the Rust code has to be compiled with `-C linker-plugin-lto` and +the C/C++ code with `-flto` or `-flto=thin` so that object files are emitted +as LLVM bitcode. + +### Rust `staticlib` as dependency in C/C++ program + +In this case the Rust compiler just has to make sure that the object files in +the `staticlib` are in the right format. For linking, a linker with the +LLVM plugin must be used (e.g. LLD). + +Using `rustc` directly: + +```bash +# Compile the Rust staticlib +rustc --crate-type=staticlib -Clinker-plugin-lto -Copt-level=2 ./lib.rs +# Compile the C code with `-flto=thin` +clang -c -O2 -flto=thin -o main.o ./main.c +# Link everything, making sure that we use an appropriate linker +clang -flto=thin -fuse-ld=lld -L . -l"name-of-your-rust-lib" -o main -O2 ./cmain.o +``` + +Using `cargo`: + +```bash +# Compile the Rust staticlib +RUSTFLAGS="-Clinker-plugin-lto" cargo build --release +# Compile the C code with `-flto=thin` +clang -c -O2 -flto=thin -o main.o ./main.c +# Link everything, making sure that we use an appropriate linker +clang -flto=thin -fuse-ld=lld -L . -l"name-of-your-rust-lib" -o main -O2 ./cmain.o +``` + +### C/C++ code as a dependency in Rust + +In this case the linker will be invoked by `rustc`. We again have to make sure +that an appropriate linker is used. + +Using `rustc` directly: + +```bash +# Compile C code with `-flto` +clang ./clib.c -flto=thin -c -o ./clib.o -O2 +# Create a static library from the C code +ar crus ./libxyz.a ./clib.o + +# Invoke `rustc` with the additional arguments +rustc -Clinker-plugin-lto -L. -Copt-level=2 -Clinker=clang -Clink-arg=-fuse-ld=lld ./main.rs +``` + +Using `cargo` directly: + +```bash +# Compile C code with `-flto` +clang ./clib.c -flto=thin -c -o ./clib.o -O2 +# Create a static library from the C code +ar crus ./libxyz.a ./clib.o + +# Set the linking arguments via RUSTFLAGS +RUSTFLAGS="-Clinker-plugin-lto -Clinker=clang -Clink-arg=-fuse-ld=lld" cargo build --release +``` + +### Explicitly specifying the linker plugin to be used by `rustc` + +If one wants to use a linker other than LLD, the LLVM linker plugin has to be +specified explicitly. Otherwise the linker cannot read the object files. The +path to the plugin is passed as an argument to the `-Clinker-plugin-lto` +option: + +```bash +rustc -Clinker-plugin-lto="/path/to/LLVMgold.so" -L. -Copt-level=2 ./main.rs +``` + + +## Toolchain Compatibility + +In order for this kind of LTO to work, the LLVM linker plugin must be able to +handle the LLVM bitcode produced by both `rustc` and `clang`. + +Best results are achieved by using a `rustc` and `clang` that are based on the +exact same version of LLVM. One can use `rustc -vV` in order to view the LLVM +used by a given `rustc` version. Note that the version number given +here is only an approximation as Rust sometimes uses unstable revisions of +LLVM. However, the approximation is usually reliable. + +The following table shows known good combinations of toolchain versions. + +| | Clang 7 | Clang 8 | +|-----------|-----------|-----------| +| Rust 1.34 | ✗ | ✓ | +| Rust 1.35 | ✗ | ✓(?) | + +Note that the compatibility policy for this feature might change in the future. diff --git a/src/doc/rustc/src/lints/levels.md b/src/doc/rustc/src/lints/levels.md index 072c7585934e8..d315e0f8ca9e5 100644 --- a/src/doc/rustc/src/lints/levels.md +++ b/src/doc/rustc/src/lints/levels.md @@ -90,7 +90,9 @@ This lint level gives you that. 'forbid' is a special lint level that's stronger than 'deny'. It's the same as 'deny' in that a lint at this level will produce an error, but unlike the 'deny' level, the 'forbid' level can not be overridden to be anything lower -than an error. +than an error. However, lint levels may still be capped with `--cap-lints` +(see below) so `rustc --cap-lints warn` will make lints set to 'forbid' just +warn. ## Configuring warning levels diff --git a/src/doc/rustc/src/lints/listing/deny-by-default.md b/src/doc/rustc/src/lints/listing/deny-by-default.md index ff9e0235a0435..fa62d1a03f53b 100644 --- a/src/doc/rustc/src/lints/listing/deny-by-default.md +++ b/src/doc/rustc/src/lints/listing/deny-by-default.md @@ -149,6 +149,26 @@ error: const items should never be #[no_mangle] | ``` +## overflowing-literals + +This lint detects literal out of range for its type. Some +example code that triggers this lint: + +```rust,compile_fail +let x: u8 = 1000; +``` + +This will produce: + +```text +error: literal out of range for u8 + --> src/main.rs:2:17 + | +2 | let x: u8 = 1000; + | ^^^^ + | +``` + ## parenthesized-params-in-types-and-modules This lint detects incorrect parentheses. Some example code that triggers this diff --git a/src/doc/rustc/src/lints/listing/warn-by-default.md b/src/doc/rustc/src/lints/listing/warn-by-default.md index b01aed0915d08..ba927b1ef3b57 100644 --- a/src/doc/rustc/src/lints/listing/warn-by-default.md +++ b/src/doc/rustc/src/lints/listing/warn-by-default.md @@ -24,7 +24,7 @@ warning: attempt to add with overflow ## dead-code -This lint detects detect unused, unexported items. Some +This lint detects unused, unexported items. Some example code that triggers this lint: ```rust @@ -44,7 +44,7 @@ warning: function is never used: `foo` ## deprecated -This lint detects detects use of deprecated items. Some +This lint detects use of deprecated items. Some example code that triggers this lint: ```rust @@ -119,7 +119,7 @@ warning: found struct without foreign-function-safe representation annotation in ## late-bound-lifetime-arguments -This lint detects detects generic lifetime arguments in path segments with +This lint detects generic lifetime arguments in path segments with late bound lifetime parameters. Some example code that triggers this lint: ```rust @@ -285,26 +285,6 @@ warning: functions generic over types must be mangled | ``` -## overflowing-literals - -This lint detects literal out of range for its type. Some -example code that triggers this lint: - -```rust -let x: u8 = 1000; -``` - -This will produce: - -```text -warning: literal out of range for u8 - --> src/main.rs:2:17 - | -2 | let x: u8 = 1000; - | ^^^^ - | -``` - ## path-statements This lint detects path statements with no effect. Some example code that @@ -381,7 +361,7 @@ extern crate macro_crate_test; ## private-in-public -This lint detects detect private items in public interfaces not caught by the old implementation. Some +This lint detects private items in public interfaces not caught by the old implementation. Some example code that triggers this lint: ```rust,ignore @@ -659,7 +639,7 @@ warning: unknown lint: `not_a_real_lint` ## unreachable-code -This lint detects detects unreachable code paths. Some example code that +This lint detects unreachable code paths. Some example code that triggers this lint: ```rust,no_run @@ -681,7 +661,7 @@ warning: unreachable statement ## unreachable-patterns -This lint detects detects unreachable patterns. Some +This lint detects unreachable patterns. Some example code that triggers this lint: ```rust @@ -716,11 +696,11 @@ annotations now. ## unused-allocation -This lint detects detects unnecessary allocations that can be eliminated. +This lint detects unnecessary allocations that can be eliminated. ## unused-assignments -This lint detects detect assignments that will never be read. Some +This lint detects assignments that will never be read. Some example code that triggers this lint: ```rust @@ -741,7 +721,7 @@ warning: value assigned to `x` is never read ## unused-attributes -This lint detects detects attributes that were not used by the compiler. Some +This lint detects attributes that were not used by the compiler. Some example code that triggers this lint: ```rust @@ -785,7 +765,7 @@ warning: comparison is useless due to type limits ## unused-doc-comment -This lint detects detects doc comments that aren't used by rustdoc. Some +This lint detects doc comments that aren't used by rustdoc. Some example code that triggers this lint: ```rust @@ -831,7 +811,7 @@ warning: unused import: `std::collections::HashMap` ## unused-macros -This lint detects detects macros that were not used. Some example code that +This lint detects macros that were not used. Some example code that triggers this lint: ```rust @@ -884,7 +864,7 @@ warning: unused `std::result::Result` that must be used ## unused-mut -This lint detects detect mut variables which don't need to be mutable. Some +This lint detects mut variables which don't need to be mutable. Some example code that triggers this lint: ```rust @@ -946,7 +926,7 @@ warning: unnecessary `unsafe` block ## unused-variables -This lint detects detect variables which are not used in any way. Some +This lint detects variables which are not used in any way. Some example code that triggers this lint: ```rust diff --git a/src/doc/rustc/src/what-is-rustc.md b/src/doc/rustc/src/what-is-rustc.md index bed1b71c24e00..9dcc9f7daa9ff 100644 --- a/src/doc/rustc/src/what-is-rustc.md +++ b/src/doc/rustc/src/what-is-rustc.md @@ -50,7 +50,7 @@ fn main() { And a `foo.rs` that had this: ```rust,ignore -fn hello() { +pub fn hello() { println!("Hello, world!"); } ``` @@ -65,4 +65,4 @@ No need to tell `rustc` about `foo.rs`; the `mod` statements give it everything that it needs. This is different than how you would use a C compiler, where you invoke the compiler on each file, and then link everything together. In other words, the *crate* is a translation unit, not a -particular module. \ No newline at end of file +particular module. diff --git a/src/doc/rustdoc/src/documentation-tests.md b/src/doc/rustdoc/src/documentation-tests.md index dd8dcb7ff9bd2..c9acd3c307b54 100644 --- a/src/doc/rustdoc/src/documentation-tests.md +++ b/src/doc/rustdoc/src/documentation-tests.md @@ -171,7 +171,7 @@ compiles, while only showing the parts that are relevant to that part of your explanation. The `#`-hiding of lines can be prevented by using two consecutive hashes -`##`. This only needs to be done with with the first `#` which would've +`##`. This only needs to be done with the first `#` which would've otherwise caused hiding. If we have a string literal like the following, which has a line that starts with a `#`: @@ -236,6 +236,23 @@ appears to the reader as the initial idea but works with doc tests: /// ``` ``` +As of version 1.34.0, one can also omit the `fn main()`, but you will have to +disambiguate the error type: + +```ignore +/// ``` +/// use std::io; +/// let mut input = String::new(); +/// io::stdin().read_line(&mut input)?; +/// # Ok::<(), io::Error>(()) +/// ``` +``` + +This is an unfortunate consequence of the `?` operator adding an implicit +conversion, so type inference fails because the type is not unique. Please note +that you must write the `(())` in one sequence without intermediate whitespace +so that rustdoc understands you want an implicit `Result`-returning function. + ## Documenting macros Here’s an example of documenting a macro: diff --git a/src/doc/rustdoc/src/unstable-features.md b/src/doc/rustdoc/src/unstable-features.md index 43cdab27e9dae..3938df1a68267 100644 --- a/src/doc/rustdoc/src/unstable-features.md +++ b/src/doc/rustdoc/src/unstable-features.md @@ -1,9 +1,8 @@ # Unstable features Rustdoc is under active development, and like the Rust compiler, some features are only available -on the nightly releases. Some of these are new and need some more testing before they're able to get -released to the world at large, and some of them are tied to features in the Rust compiler that are -themselves unstable. Several features here require a matching `#![feature(...)]` attribute to +on nightly releases. Some of these features are new and need some more testing before they're able to be +released to the world at large, and some of them are tied to features in the Rust compiler that are unstable. Several features here require a matching `#![feature(...)]` attribute to enable, and thus are more fully documented in the [Unstable Book]. Those sections will link over there as necessary. @@ -54,7 +53,7 @@ For example, in the following code: ```rust /// Does the thing. pub fn do_the_thing(_: SomeType) { - println!("Let's do the thing!"); + println!("Let's do the thing!"); } /// Token you use to [`do_the_thing`]. @@ -67,15 +66,15 @@ target out also works: ```rust pub mod some_module { - /// Token you use to do the thing. - pub struct SomeStruct; + /// Token you use to do the thing. + pub struct SomeStruct; } /// Does the thing. Requires one [`SomeStruct`] for the thing to work. /// /// [`SomeStruct`]: some_module::SomeStruct pub fn do_the_thing(_: some_module::SomeStruct) { - println!("Let's do the thing!"); + println!("Let's do the thing!"); } ``` @@ -402,3 +401,57 @@ Using `index-page` option enables `enable-index-page` option as well. ### `--enable-index-page`: generate a default index page for docs This feature allows the generation of a default index-page which lists the generated crates. + +### `--static-root-path`: control how static files are loaded in HTML output + +Using this flag looks like this: + +```bash +$ rustdoc src/lib.rs -Z unstable-options --static-root-path '/cache/' +``` + +This flag controls how rustdoc links to its static files on HTML pages. If you're hosting a lot of +crates' docs generated by the same version of rustdoc, you can use this flag to cache rustdoc's CSS, +JavaScript, and font files in a single location, rather than duplicating it once per "doc root" +(grouping of crate docs generated into the same output directory, like with `cargo doc`). Per-crate +files like the search index will still load from the documentation root, but anything that gets +renamed with `--resource-suffix` will load from the given path. + +### `--persist-doctests`: persist doctest executables after running + +Using this flag looks like this: + +```bash +$ rustdoc src/lib.rs --test -Z unstable-options --persist-doctests target/rustdoctest +``` + +This flag allows you to keep doctest executables around after they're compiled or run. +Usually, rustdoc will immediately discard a compiled doctest after it's been tested, but +with this option, you can keep those binaries around for farther testing. + +### `--show-coverage`: calculate the percentage of items with documentation + +Using this flag looks like this: + +```bash +$ rustdoc src/lib.rs -Z unstable-options --show-coverage +``` + +If you want to determine how many items in your crate are documented, pass this flag to rustdoc. +When it receives this flag, it will count the public items in your crate that have documentation, +and print out the counts and a percentage instead of generating docs. + +Some methodology notes about what rustdoc counts in this metric: + +* Rustdoc will only count items from your crate (i.e. items re-exported from other crates don't + count). +* Docs written directly onto inherent impl blocks are not counted, even though their doc comments + are displayed, because the common pattern in Rust code is to write all inherent methods into the + same impl block. +* Items in a trait implementation are not counted, as those impls will inherit any docs from the + trait itself. +* By default, only public items are counted. To count private items as well, pass + `--document-private-items` at the same time. + +Public items that are not documented can be seen with the built-in `missing_docs` lint. Private +items that are not documented can be seen with Clippy's `missing_docs_in_private_items` lint. diff --git a/src/doc/unstable-book/book.toml b/src/doc/unstable-book/book.toml new file mode 100644 index 0000000000000..5534340f0db16 --- /dev/null +++ b/src/doc/unstable-book/book.toml @@ -0,0 +1,2 @@ +[book] +title = "The Rust Unstable Book" diff --git a/src/doc/unstable-book/src/language-features/c-variadic.md b/src/doc/unstable-book/src/language-features/c-variadic.md new file mode 100644 index 0000000000000..9e7968d906fbf --- /dev/null +++ b/src/doc/unstable-book/src/language-features/c-variadic.md @@ -0,0 +1,24 @@ +# `c_variadic` + +The tracking issue for this feature is: [#44930] + +[#44930]: https://github.com/rust-lang/rust/issues/44930 + +------------------------ + +The `c_variadic` language feature enables C-variadic functions to be +defined in Rust. The may be called both from within Rust and via FFI. + +## Examples + +```rust +#![feature(c_variadic)] + +pub unsafe extern "C" fn add(n: usize, mut args: ...) -> usize { + let mut sum = 0; + for _ in 0..n { + sum += args.arg::(); + } + sum +} +``` diff --git a/src/doc/unstable-book/src/language-features/cfg-attr-multi.md b/src/doc/unstable-book/src/language-features/cfg-attr-multi.md deleted file mode 100644 index 6365d3e71c616..0000000000000 --- a/src/doc/unstable-book/src/language-features/cfg-attr-multi.md +++ /dev/null @@ -1,20 +0,0 @@ -# `cfg_attr_multi` - -The tracking issue for this feature is: [#54881] -The RFC for this feature is: [#2539] - -[#54881]: https://github.com/rust-lang/rust/issues/54881 -[#2539]: https://github.com/rust-lang/rfcs/pull/2539 - ------------------------- - -This feature flag lets you put multiple attributes into a `cfg_attr` attribute. - -Example: - -```rust,ignore -#[cfg_attr(all(), must_use, optimize)] -``` - -Because `cfg_attr` resolves before procedural macros, this does not affect -macro resolution at all. \ No newline at end of file diff --git a/src/doc/unstable-book/src/language-features/const-fn.md b/src/doc/unstable-book/src/language-features/const-fn.md index d5a2243683862..50dbbaf56743c 100644 --- a/src/doc/unstable-book/src/language-features/const-fn.md +++ b/src/doc/unstable-book/src/language-features/const-fn.md @@ -1,8 +1,8 @@ # `const_fn` -The tracking issue for this feature is: [#24111] +The tracking issue for this feature is: [#57563] -[#24111]: https://github.com/rust-lang/rust/issues/24111 +[#57563]: https://github.com/rust-lang/rust/issues/57563 ------------------------ diff --git a/src/doc/unstable-book/src/language-features/crate-visibility-modifier.md b/src/doc/unstable-book/src/language-features/crate-visibility-modifier.md index 11b3ee8edf0b1..b59859dd348e7 100644 --- a/src/doc/unstable-book/src/language-features/crate-visibility-modifier.md +++ b/src/doc/unstable-book/src/language-features/crate-visibility-modifier.md @@ -1,8 +1,8 @@ # `crate_visibility_modifier` -The tracking issue for this feature is: [#45388] +The tracking issue for this feature is: [#53120] -[#45388]: https://github.com/rust-lang/rust/issues/45388 +[#53120]: https://github.com/rust-lang/rust/issues/53120 ----- diff --git a/src/doc/unstable-book/src/language-features/extern-in-paths.md b/src/doc/unstable-book/src/language-features/extern-in-paths.md deleted file mode 100644 index 9979d7742291e..0000000000000 --- a/src/doc/unstable-book/src/language-features/extern-in-paths.md +++ /dev/null @@ -1,40 +0,0 @@ -# `extern_in_paths` - -The tracking issue for this feature is: [#44660] - -[#44660]: https://github.com/rust-lang/rust/issues/44660 - ------------------------- - -The `extern_in_paths` feature allows to refer to names from other crates "inline", without -introducing `extern crate` items, using keyword `extern`. - -For example, `extern::my_crat::a::b` will resolve to path `a::b` in crate `my_crate`. - -Absolute paths on 2018 edition (e.g. `::my_crate::a::b`) provide the same effect -and resolve to extern crates (built-in or passed with `--extern`). - -```rust,ignore -#![feature(extern_in_paths)] - -// Suppose we have a dependency crate `xcrate` available through `Cargo.toml`, or `--extern` -// options, or standard Rust distribution, or some other means. - -use extern::xcrate::Z; - -fn f() { - use extern::xcrate; - use extern::xcrate as ycrate; - let s = xcrate::S; - assert_eq!(format!("{:?}", s), "S"); - let z = ycrate::Z; - assert_eq!(format!("{:?}", z), "Z"); -} - -fn main() { - let s = extern::xcrate::S; - assert_eq!(format!("{:?}", s), "S"); - let z = Z; - assert_eq!(format!("{:?}", z), "Z"); -} -``` diff --git a/src/doc/unstable-book/src/language-features/generators.md b/src/doc/unstable-book/src/language-features/generators.md index 968534e58bd9e..426fc01a6b051 100644 --- a/src/doc/unstable-book/src/language-features/generators.md +++ b/src/doc/unstable-book/src/language-features/generators.md @@ -29,6 +29,7 @@ A syntactical example of a generator is: #![feature(generators, generator_trait)] use std::ops::{Generator, GeneratorState}; +use std::pin::Pin; fn main() { let mut generator = || { @@ -36,11 +37,11 @@ fn main() { return "foo" }; - match unsafe { generator.resume() } { + match Pin::new(&mut generator).resume() { GeneratorState::Yielded(1) => {} _ => panic!("unexpected value from resume"), } - match unsafe { generator.resume() } { + match Pin::new(&mut generator).resume() { GeneratorState::Complete("foo") => {} _ => panic!("unexpected value from resume"), } @@ -60,6 +61,7 @@ prints all numbers in order: #![feature(generators, generator_trait)] use std::ops::Generator; +use std::pin::Pin; fn main() { let mut generator = || { @@ -69,9 +71,9 @@ fn main() { }; println!("1"); - unsafe { generator.resume() }; + Pin::new(&mut generator).resume(); println!("3"); - unsafe { generator.resume() }; + Pin::new(&mut generator).resume(); println!("5"); } ``` @@ -86,13 +88,14 @@ Feedback on the design and usage is always appreciated! The `Generator` trait in `std::ops` currently looks like: ``` -# #![feature(generator_trait)] +# #![feature(arbitrary_self_types, generator_trait)] # use std::ops::GeneratorState; +# use std::pin::Pin; pub trait Generator { type Yield; type Return; - unsafe fn resume(&mut self) -> GeneratorState; + fn resume(self: Pin<&mut Self>) -> GeneratorState; } ``` @@ -167,6 +170,7 @@ Let's take a look at an example to see what's going on here: #![feature(generators, generator_trait)] use std::ops::Generator; +use std::pin::Pin; fn main() { let ret = "foo"; @@ -175,17 +179,18 @@ fn main() { return ret }; - unsafe { generator.resume() }; - unsafe { generator.resume() }; + Pin::new(&mut generator).resume(); + Pin::new(&mut generator).resume(); } ``` This generator literal will compile down to something similar to: ```rust -#![feature(generators, generator_trait)] +#![feature(arbitrary_self_types, generators, generator_trait)] use std::ops::{Generator, GeneratorState}; +use std::pin::Pin; fn main() { let ret = "foo"; @@ -200,9 +205,9 @@ fn main() { type Yield = i32; type Return = &'static str; - unsafe fn resume(&mut self) -> GeneratorState { + fn resume(mut self: Pin<&mut Self>) -> GeneratorState { use std::mem; - match mem::replace(self, __Generator::Done) { + match mem::replace(&mut *self, __Generator::Done) { __Generator::Start(s) => { *self = __Generator::Yield1(s); GeneratorState::Yielded(1) @@ -223,8 +228,8 @@ fn main() { __Generator::Start(ret) }; - unsafe { generator.resume() }; - unsafe { generator.resume() }; + Pin::new(&mut generator).resume(); + Pin::new(&mut generator).resume(); } ``` diff --git a/src/doc/unstable-book/src/language-features/irrefutable-let-patterns.md b/src/doc/unstable-book/src/language-features/irrefutable-let-patterns.md deleted file mode 100644 index 46b843778e810..0000000000000 --- a/src/doc/unstable-book/src/language-features/irrefutable-let-patterns.md +++ /dev/null @@ -1,28 +0,0 @@ -# `irrefutable_let_patterns` - -The tracking issue for this feature is: [#44495] - -[#44495]: https://github.com/rust-lang/rust/issues/44495 - ------------------------- - -This feature changes the way that "irrefutable patterns" are handled -in the `if let` and `while let` forms. An *irrefutable pattern* is one -that cannot fail to match -- for example, the `_` pattern matches any -value, and hence it is "irrefutable". Without this feature, using an -irrefutable pattern in an `if let` gives a hard error (since often -this indicates programmer error). But when the feature is enabled, the -error becomes a lint (since in some cases irrefutable patterns are -expected). This means you can use `#[allow]` to silence the lint: - -```rust -#![feature(irrefutable_let_patterns)] - -#[allow(irrefutable_let_patterns)] -fn main() { - // These two examples used to be errors, but now they - // trigger a lint (that is allowed): - if let _ = 5 {} - while let _ = 5 { break; } -} -``` diff --git a/src/doc/unstable-book/src/language-features/non-ascii-idents.md b/src/doc/unstable-book/src/language-features/non-ascii-idents.md index efb5495fe26ac..46957c00bf95c 100644 --- a/src/doc/unstable-book/src/language-features/non-ascii-idents.md +++ b/src/doc/unstable-book/src/language-features/non-ascii-idents.md @@ -1,8 +1,8 @@ # `non_ascii_idents` -The tracking issue for this feature is: [#28979] +The tracking issue for this feature is: [#55467] -[#28979]: https://github.com/rust-lang/rust/issues/28979 +[#55467]: https://github.com/rust-lang/rust/issues/55467 ------------------------ diff --git a/src/doc/unstable-book/src/language-features/non-exhaustive.md b/src/doc/unstable-book/src/language-features/non-exhaustive.md index f9840e1b83f2b..907147c17ef8e 100644 --- a/src/doc/unstable-book/src/language-features/non-exhaustive.md +++ b/src/doc/unstable-book/src/language-features/non-exhaustive.md @@ -7,10 +7,12 @@ The tracking issue for this feature is: [#44109] ------------------------ The `non_exhaustive` gate allows you to use the `#[non_exhaustive]` attribute -on structs and enums. When applied within a crate, users of the crate will need -to use the `_` pattern when matching enums and use the `..` pattern when -matching structs. Structs marked as `non_exhaustive` will not be able to be -created normally outside of the defining crate. This is demonstrated below: +on structs, enums and enum variants. When applied within a crate, users of the +crate will need to use the `_` pattern when matching enums and use the `..` +pattern when matching structs. Enum variants cannot be matched against. +Structs and enum variants marked as `non_exhaustive` will not be able to +be created normally outside of the defining crate. This is demonstrated +below: ```rust,ignore (pseudo-Rust) use std::error::Error as StdError; @@ -72,4 +74,3 @@ let config = Config { window_width: 640, window_height: 480 }; // when marked non_exhaustive. let &Config { window_width, window_height, .. } = config; ``` - diff --git a/src/doc/unstable-book/src/language-features/on-unimplemented.md b/src/doc/unstable-book/src/language-features/on-unimplemented.md index f787f629756f3..a770ab65c26f8 100644 --- a/src/doc/unstable-book/src/language-features/on-unimplemented.md +++ b/src/doc/unstable-book/src/language-features/on-unimplemented.md @@ -138,3 +138,16 @@ error[E0277]: `&str` is not an iterator = help: the trait `std::iter::Iterator` is not implemented for `&str` = note: required by `std::iter::IntoIterator::into_iter` ``` + +If you need to filter on multiple attributes, you can use `all`, `any` or +`not` in the following way: + +```rust,compile_fail +#[rustc_on_unimplemented( + on( + all(_Self="&str", T="std::string::String"), + note="you can coerce a `{T}` into a `{Self}` by writing `&*variable`" + ) +)] +pub trait From: Sized { /* ... */ } +``` diff --git a/src/doc/unstable-book/src/language-features/plugin.md b/src/doc/unstable-book/src/language-features/plugin.md index 03ea392c86307..49fe7c9e994fe 100644 --- a/src/doc/unstable-book/src/language-features/plugin.md +++ b/src/doc/unstable-book/src/language-features/plugin.md @@ -52,6 +52,7 @@ that implements Roman numeral integer literals. #![feature(plugin_registrar, rustc_private)] extern crate syntax; +extern crate syntax_pos; extern crate rustc; extern crate rustc_plugin; @@ -59,7 +60,7 @@ use syntax::parse::token; use syntax::tokenstream::TokenTree; use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacEager}; use syntax::ext::build::AstBuilder; // A trait for expr_usize. -use syntax::ext::quote::rt::Span; +use syntax_pos::Span; use rustc_plugin::Registry; fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree]) @@ -129,7 +130,7 @@ The advantages over a simple `fn(&str) -> u32` are: a way to define new literal syntax for any data type. In addition to procedural macros, you can define new -[`derive`](../reference/attributes.html#derive)-like attributes and other kinds +[`derive`](../reference/attributes/derive.html)-like attributes and other kinds of extensions. See `Registry::register_syntax_extension` and the `SyntaxExtension` enum. For a more involved macro example, see [`regex_macros`](https://github.com/rust-lang/regex/blob/master/regex_macros/src/lib.rs). @@ -173,7 +174,7 @@ quasiquote as an ordinary plugin library. # Lint plugins Plugins can extend [Rust's lint -infrastructure](../reference/attributes.html#lint-check-attributes) with +infrastructure](../reference/attributes/diagnostics.html#lint-check-attributes) with additional checks for code style, safety, etc. Now let's write a plugin [`lint_plugin_test.rs`](https://github.com/rust-lang/rust/blob/master/src/test/ui-fulldeps/auxiliary/lint_plugin_test.rs) that warns about any item named `lintme`. @@ -252,7 +253,7 @@ mostly use the same infrastructure as lint plugins, and provide examples of how to access type information. Lints defined by plugins are controlled by the usual [attributes and compiler -flags](../reference/attributes.html#lint-check-attributes), e.g. +flags](../reference/attributes/diagnostics.html#lint-check-attributes), e.g. `#[allow(test_lint)]` or `-A test-lint`. These identifiers are derived from the first argument to `declare_lint!`, with appropriate case and punctuation conversion. diff --git a/src/doc/unstable-book/src/language-features/re-rebalance-coherence.md b/src/doc/unstable-book/src/language-features/re-rebalance-coherence.md new file mode 100644 index 0000000000000..1e74652a890f6 --- /dev/null +++ b/src/doc/unstable-book/src/language-features/re-rebalance-coherence.md @@ -0,0 +1,23 @@ +# `re_rebalance_coherence` + +The tracking issue for this feature is: [#55437] + +[#55437]: https://github.com/rust-lang/rust/issues/55437 + +------------------------ + +The `re_rebalance_coherence` feature tweaks the rules regarding which trait +impls are allowed in crates. +The following rule is used: + +Given `impl Trait for T0`, an impl is valid only if at +least one of the following is true: +- `Trait` is a local trait +- All of + - At least one of the types `T0..=Tn` must be a local type. Let `Ti` be the + first such type. + - No uncovered type parameters `P1..=Pn` may appear in `T0..Ti` (excluding + `Ti`) + + +See the [RFC](https://github.com/rust-lang/rfcs/blob/master/text/2451-re-rebalancing-coherence.md) for details. diff --git a/src/doc/unstable-book/src/language-features/repr-align-enum.md b/src/doc/unstable-book/src/language-features/repr-align-enum.md new file mode 100644 index 0000000000000..415c6ebe8b4bc --- /dev/null +++ b/src/doc/unstable-book/src/language-features/repr-align-enum.md @@ -0,0 +1,42 @@ +# `repr_align_enum` + +The tracking issue for this feature is: [#57996] + +[#57996]: https://github.com/rust-lang/rust/issues/57996 + +------------------------ + +The `repr_align_enum` feature allows using the `#[repr(align(x))]` attribute +on enums, similarly to structs. + +# Examples + +```rust +#![feature(repr_align_enum)] + +#[repr(align(8))] +enum Aligned { + Foo, + Bar { value: u32 }, +} + +fn main() { + assert_eq!(std::mem::align_of::(), 8); +} +``` + +This is equivalent to using an aligned wrapper struct everywhere: + +```rust +#[repr(align(8))] +struct Aligned(Unaligned); + +enum Unaligned { + Foo, + Bar { value: u32 }, +} + +fn main() { + assert_eq!(std::mem::align_of::(), 8); +} +``` diff --git a/src/doc/unstable-book/src/language-features/repr-packed.md b/src/doc/unstable-book/src/language-features/repr-packed.md deleted file mode 100644 index 2dd763d04b0ab..0000000000000 --- a/src/doc/unstable-book/src/language-features/repr-packed.md +++ /dev/null @@ -1,8 +0,0 @@ -# `repr_packed` - -The tracking issue for this feature is [#33158] - -[#33158]: https://github.com/rust-lang/rust/issues/33158 - ------------------------- - diff --git a/src/doc/unstable-book/src/language-features/type-alias-enum-variants.md b/src/doc/unstable-book/src/language-features/type-alias-enum-variants.md new file mode 100644 index 0000000000000..bcdeafc4b1137 --- /dev/null +++ b/src/doc/unstable-book/src/language-features/type-alias-enum-variants.md @@ -0,0 +1,36 @@ +# `type_alias_enum_variants` + +The tracking issue for this feature is: [#49683] + +[#49683]: https://github.com/rust-lang/rust/issues/49683 + +------------------------ + +The `type_alias_enum_variants` feature enables the use of variants on type +aliases that refer to enums, as both a constructor and a pattern. That is, +it allows for the syntax `EnumAlias::Variant`, which behaves exactly the same +as `Enum::Variant` (assuming that `EnumAlias` is an alias for some enum type +`Enum`). + +Note that since `Self` exists as a type alias, this feature also enables the +use of the syntax `Self::Variant` within an impl block for an enum type. + +```rust +#![feature(type_alias_enum_variants)] + +enum Foo { + Bar(i32), + Baz { i: i32 }, +} + +type Alias = Foo; + +fn main() { + let t = Alias::Bar(0); + let t = Alias::Baz { i: 0 }; + match t { + Alias::Bar(_i) => {} + Alias::Baz { i: _i } => {} + } +} +``` diff --git a/src/doc/unstable-book/src/library-features/borrow-state.md b/src/doc/unstable-book/src/library-features/borrow-state.md new file mode 100644 index 0000000000000..304b8dffe9867 --- /dev/null +++ b/src/doc/unstable-book/src/library-features/borrow-state.md @@ -0,0 +1,7 @@ +# `borrow_state` + +The tracking issue for this feature is: [#27733] + +[#27733]: https://github.com/rust-lang/rust/issues/27733 + +------------------------ diff --git a/src/doc/unstable-book/src/library-features/c-variadic.md b/src/doc/unstable-book/src/library-features/c-variadic.md new file mode 100644 index 0000000000000..77762116e6b1c --- /dev/null +++ b/src/doc/unstable-book/src/library-features/c-variadic.md @@ -0,0 +1,26 @@ +# `c_variadic` + +The tracking issue for this feature is: [#44930] + +[#44930]: https://github.com/rust-lang/rust/issues/44930 + +------------------------ + +The `c_variadic` library feature exposes the `VaList` structure, +Rust's analogue of C's `va_list` type. + +## Examples + +```rust +#![feature(c_variadic)] + +use std::ffi::VaList; + +pub unsafe extern "C" fn vadd(n: usize, mut args: VaList) -> usize { + let mut sum = 0; + for _ in 0..n { + sum += args.arg::(); + } + sum +} +``` diff --git a/src/doc/unstable-book/src/library-features/fnbox.md b/src/doc/unstable-book/src/library-features/fnbox.md new file mode 100644 index 0000000000000..cb3386b715211 --- /dev/null +++ b/src/doc/unstable-book/src/library-features/fnbox.md @@ -0,0 +1,32 @@ +# `fnbox` + +The tracking issue for this feature is [#28796] + +[#28796]: https://github.com/rust-lang/rust/issues/28796 + +------------------------ + +This had been a temporary alternative to the following impls: + +```rust,ignore +impl FnOnce for Box where F: FnOnce + ?Sized {} +impl FnMut for Box where F: FnMut + ?Sized {} +impl Fn for Box where F: Fn + ?Sized {} +``` + +The impls are parallel to these (relatively old) impls: + +```rust,ignore +impl FnOnce for &mut F where F: FnMut + ?Sized {} +impl FnMut for &mut F where F: FnMut + ?Sized {} +impl Fn for &mut F where F: Fn + ?Sized {} +impl FnOnce for &F where F: Fn + ?Sized {} +impl FnMut for &F where F: Fn + ?Sized {} +impl Fn for &F where F: Fn + ?Sized {} +``` + +Before the introduction of [`unsized_locals`][unsized_locals], we had been unable to provide the former impls. That means, unlike `&dyn Fn()` or `&mut dyn FnMut()` we could not use `Box` at that time. + +[unsized_locals]: language-features/unsized-locals.html + +`FnBox()` is an alternative approach to `Box` is delegated to `FnBox::call_box` which doesn't need unsized locals. As we now have `Box` working, the `fnbox` feature is going to be removed. diff --git a/src/doc/unstable-book/src/library-features/is-sorted.md b/src/doc/unstable-book/src/library-features/is-sorted.md new file mode 100644 index 0000000000000..e3b7dc3b28eb2 --- /dev/null +++ b/src/doc/unstable-book/src/library-features/is-sorted.md @@ -0,0 +1,11 @@ +# `is_sorted` + +The tracking issue for this feature is: [#53485] + +[#53485]: https://github.com/rust-lang/rust/issues/53485 + +------------------------ + +Add the methods `is_sorted`, `is_sorted_by` and `is_sorted_by_key` to `[T]`; +add the methods `is_sorted`, `is_sorted_by` and `is_sorted_by_key` to +`Iterator`. diff --git a/src/etc/cat-and-grep.sh b/src/etc/cat-and-grep.sh index 361e8d8e60eed..77dc52a935070 100755 --- a/src/etc/cat-and-grep.sh +++ b/src/etc/cat-and-grep.sh @@ -1,16 +1,6 @@ #!/bin/sh set -eu -# Copyright 2017 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - # Performs `cat` and `grep` simultaneously for `run-make` tests in the Rust CI. # # This program will read lines from stdin and print them to stdout immediately. diff --git a/src/etc/debugger_pretty_printers_common.py b/src/etc/debugger_pretty_printers_common.py index b99e401929e62..385ce8efab87b 100644 --- a/src/etc/debugger_pretty_printers_common.py +++ b/src/etc/debugger_pretty_printers_common.py @@ -1,13 +1,3 @@ -# Copyright 2015 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - """ This module provides an abstraction layer over common Rust pretty printing functionality needed by both GDB and LLDB. @@ -352,8 +342,7 @@ def extract_length_ptr_and_cap_from_std_vec(vec_val): vec_ptr_val = buf.get_child_at_index(0) capacity = buf.get_child_at_index(1).as_integer() - unique_ptr_val = vec_ptr_val.get_child_at_index(0) - data_ptr = unique_ptr_val.get_child_at_index(0) + data_ptr = vec_ptr_val.get_child_at_index(0) assert data_ptr.type.get_dwarf_type_kind() == DWARF_TYPE_CODE_PTR return (length, data_ptr, capacity) @@ -370,8 +359,7 @@ def extract_tail_head_ptr_and_cap_from_std_vecdeque(vec_val): vec_ptr_val = buf.get_child_at_index(0) capacity = buf.get_child_at_index(1).as_integer() - unique_ptr_val = vec_ptr_val.get_child_at_index(0) - data_ptr = unique_ptr_val.get_child_at_index(0) + data_ptr = vec_ptr_val.get_child_at_index(0) assert data_ptr.type.get_dwarf_type_kind() == DWARF_TYPE_CODE_PTR return (tail, head, data_ptr, capacity) diff --git a/src/etc/dec2flt_table.py b/src/etc/dec2flt_table.py index 9fdab1fcfca28..2ffaf13dc8f65 100644 --- a/src/etc/dec2flt_table.py +++ b/src/etc/dec2flt_table.py @@ -1,14 +1,4 @@ #!/usr/bin/env python2.7 -# -# Copyright 2015 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. """ Generate powers of ten using William Clinger's ``AlgorithmM`` for use in @@ -93,16 +83,6 @@ def error(f, e, z): return float(ulp_err) HEADER = """ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Tables of approximations of powers of ten. //! DO NOT MODIFY: Generated by `src/etc/dec2flt_table.py` """ diff --git a/src/etc/gdb_load_rust_pretty_printers.py b/src/etc/gdb_load_rust_pretty_printers.py index 755cac153d10d..c551346bb00bf 100644 --- a/src/etc/gdb_load_rust_pretty_printers.py +++ b/src/etc/gdb_load_rust_pretty_printers.py @@ -1,12 +1,2 @@ -# Copyright 2014 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - import gdb_rust_pretty_printing gdb_rust_pretty_printing.register_printers(gdb.current_objfile()) diff --git a/src/etc/gdb_rust_pretty_printing.py b/src/etc/gdb_rust_pretty_printing.py index f02c7d87590ac..a6b09722e1c94 100755 --- a/src/etc/gdb_rust_pretty_printing.py +++ b/src/etc/gdb_rust_pretty_printing.py @@ -1,13 +1,3 @@ -# Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - import gdb import re import sys @@ -26,7 +16,7 @@ # This fix went in 8.1, so check for that. # See https://github.com/rust-lang/rust/issues/56730 gdb_81 = False -_match = re.match('([0-9]+)\\.([0-9]+)', gdb.VERSION) +_match = re.search('([0-9]+)\\.([0-9]+)', gdb.VERSION) if _match: if int(_match.group(1)) > 8 or (int(_match.group(1)) == 8 and int(_match.group(2)) >= 1): gdb_81 = True @@ -332,9 +322,7 @@ def children(self): # Yield each key (and optionally value) from a BoxedNode. def children_of_node(boxed_node, height, want_values): - ptr = boxed_node['ptr']['pointer'] - # This is written oddly because we don't want to rely on the field name being `__0`. - node_ptr = ptr[ptr.type.fields()[0]] + node_ptr = boxed_node['ptr']['pointer'] if height > 0: type_name = str(node_ptr.type.target()).replace('LeafNode', 'InternalNode') node_type = gdb.lookup_type(type_name) @@ -342,19 +330,20 @@ def children_of_node(boxed_node, height, want_values): leaf = node_ptr['data'] else: leaf = node_ptr.dereference() - keys = leaf['keys']['value']['value'] + keys = leaf['keys'] if want_values: - values = leaf['vals']['value']['value'] + values = leaf['vals'] length = int(leaf['len']) for i in xrange(0, length + 1): if height > 0: - for child in children_of_node(node_ptr['edges'][i], height - 1, want_values): + child_ptr = node_ptr['edges'][i]['value']['value'] + for child in children_of_node(child_ptr, height - 1, want_values): yield child if i < length: if want_values: - yield (keys[i], values[i]) + yield (keys[i]['value']['value'], values[i]['value']['value']) else: - yield keys[i] + yield keys[i]['value']['value'] class RustStdBTreeSetPrinter(object): def __init__(self, val): diff --git a/src/etc/generate-deriving-span-tests.py b/src/etc/generate-deriving-span-tests.py index 31a438958e942..1c525101c76f6 100755 --- a/src/etc/generate-deriving-span-tests.py +++ b/src/etc/generate-deriving-span-tests.py @@ -1,14 +1,4 @@ #!/usr/bin/env python -# -# Copyright 2013 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. """ This script creates a pile of compile-fail tests check that all the @@ -18,23 +8,12 @@ sample usage: src/etc/generate-deriving-span-tests.py """ -import os, datetime, stat, re +import os, stat TEST_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), '../test/ui/derives/')) -YEAR = datetime.datetime.now().year - -TEMPLATE = """// Copyright {year} The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - +TEMPLATE = """\ // This file was auto-generated using 'src/etc/generate-deriving-span-tests.py' {error_deriving} @@ -82,19 +61,11 @@ def create_test_case(type, trait, super_traits, error_count): errors = '\n'.join('//~%s ERROR' % ('^' * n) for n in range(error_count)) code = string.format(traits = all_traits, errors = errors) - return TEMPLATE.format(year = YEAR, error_deriving=error_deriving, code = code) + return TEMPLATE.format(error_deriving=error_deriving, code = code) def write_file(name, string): test_file = os.path.join(TEST_DIR, 'derives-span-%s.rs' % name) - with open(test_file) as f: - old_str = f.read() - old_str_ignoring_date = re.sub(r'^// Copyright \d+', - '// Copyright {year}'.format(year = YEAR), old_str) - if old_str_ignoring_date == string: - # if all we're doing is updating the copyright year, ignore it - return 0 - # set write permission if file exists, so it can be changed if os.path.exists(test_file): os.chmod(test_file, stat.S_IWUSR) @@ -105,8 +76,6 @@ def write_file(name, string): # mark file read-only os.chmod(test_file, stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH) - return 1 - ENUM = 1 STRUCT = 2 @@ -129,15 +98,11 @@ def write_file(name, string): ('Hash', [], 1)]: traits[trait] = (ALL, supers, errs) -files = 0 - for (trait, (types, super_traits, error_count)) in traits.items(): mk = lambda ty: create_test_case(ty, trait, super_traits, error_count) if types & ENUM: - files += write_file(trait + '-enum', mk(ENUM_TUPLE)) - files += write_file(trait + '-enum-struct-variant', mk(ENUM_STRUCT)) + write_file(trait + '-enum', mk(ENUM_TUPLE)) + write_file(trait + '-enum-struct-variant', mk(ENUM_STRUCT)) if types & STRUCT: - files += write_file(trait + '-struct', mk(STRUCT_FIELDS)) - files += write_file(trait + '-tuple-struct', mk(STRUCT_TUPLE)) - -print('Generated {files} deriving span test{}.'.format('s' if files != 1 else '', files = files)) + write_file(trait + '-struct', mk(STRUCT_FIELDS)) + write_file(trait + '-tuple-struct', mk(STRUCT_TUPLE)) diff --git a/src/etc/generate-keyword-tests.py b/src/etc/generate-keyword-tests.py index 1d79f95a4d2bb..bc046a8f42d0b 100755 --- a/src/etc/generate-keyword-tests.py +++ b/src/etc/generate-keyword-tests.py @@ -1,14 +1,5 @@ #!/usr/bin/env python -# -# Copyright 2013 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. + """ This script takes a list of keywords and generates a testcase, that checks if using the keyword as identifier fails, for every keyword. The generate @@ -24,18 +15,7 @@ import stat -template = """// Copyright %d The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// compile-flags: -Z parse-only - +template = """\ // This file was auto-generated using 'src/etc/generate-keyword-tests.py %s' fn main() { @@ -55,7 +35,7 @@ os.chmod(test_file, stat.S_IWUSR) with open(test_file, 'wt') as f: - f.write(template % (datetime.datetime.now().year, kw, kw, kw)) + f.write(template % (kw, kw, kw)) # mark file read-only os.chmod(test_file, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH) diff --git a/src/etc/htmldocck.py b/src/etc/htmldocck.py index 91010262981e8..e8be2b9b53710 100644 --- a/src/etc/htmldocck.py +++ b/src/etc/htmldocck.py @@ -1,12 +1,5 @@ -# Copyright 2015 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. +#!/usr/bin/env python +# -*- coding: utf-8 -*- r""" htmldocck.py is a custom checker script for Rustdoc HTML outputs. @@ -108,7 +101,10 @@ """ -from __future__ import print_function +from __future__ import absolute_import, print_function, unicode_literals + +import codecs +import io import sys import os.path import re @@ -120,14 +116,10 @@ from HTMLParser import HTMLParser from xml.etree import cElementTree as ET -# ⇤/⇥ are not in HTML 4 but are in HTML 5 try: - from html.entities import entitydefs + from html.entities import name2codepoint except ImportError: - from htmlentitydefs import entitydefs -entitydefs['larrb'] = u'\u21e4' -entitydefs['rarrb'] = u'\u21e5' -entitydefs['nbsp'] = ' ' + from htmlentitydefs import name2codepoint # "void elements" (no closing tag) from the HTML Standard section 12.1.2 VOID_ELEMENTS = set(['area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', @@ -167,11 +159,11 @@ def handle_data(self, data): self.__builder.data(data) def handle_entityref(self, name): - self.__builder.data(entitydefs[name]) + self.__builder.data(unichr(name2codepoint[name])) def handle_charref(self, name): code = int(name[1:], 16) if name.startswith(('x', 'X')) else int(name, 10) - self.__builder.data(unichr(code).encode('utf-8')) + self.__builder.data(unichr(code)) def close(self): HTMLParser.close(self) @@ -220,11 +212,11 @@ def concat_multi_lines(f): (?<=(?!?) (?P[A-Za-z]+(?:-[A-Za-z]+)*) (?P.*)$ -''', re.X) +''', re.X | re.UNICODE) def get_commands(template): - with open(template, 'rU') as f: + with io.open(template, encoding='utf-8') as f: for lineno, line in concat_multi_lines(f): m = LINE_PATTERN.search(line) if not m: @@ -236,7 +228,10 @@ def get_commands(template): if args and not args[:1].isspace(): print_err(lineno, line, 'Invalid template syntax') continue - args = shlex.split(args) + try: + args = shlex.split(args) + except UnicodeEncodeError: + args = [arg.decode('utf-8') for arg in shlex.split(args.encode('utf-8'))] yield Command(negated=negated, cmd=cmd, args=args, lineno=lineno+1, context=line) @@ -290,7 +285,7 @@ def get_file(self, path): if not(os.path.exists(abspath) and os.path.isfile(abspath)): raise FailedCheck('File does not exist {!r}'.format(path)) - with open(abspath) as f: + with io.open(abspath, encoding='utf-8') as f: data = f.read() self.files[path] = data return data @@ -304,9 +299,9 @@ def get_tree(self, path): if not(os.path.exists(abspath) and os.path.isfile(abspath)): raise FailedCheck('File does not exist {!r}'.format(path)) - with open(abspath) as f: + with io.open(abspath, encoding='utf-8') as f: try: - tree = ET.parse(f, CustomHTMLParser()) + tree = ET.fromstringlist(f.readlines(), CustomHTMLParser()) except Exception as e: raise RuntimeError('Cannot parse an HTML file {!r}: {}'.format(path, e)) self.trees[path] = tree @@ -323,7 +318,7 @@ def check_string(data, pat, regexp): if not pat: return True # special case a presence testing elif regexp: - return re.search(pat, data) is not None + return re.search(pat, data, flags=re.UNICODE) is not None else: data = ' '.join(data.split()) pat = ' '.join(pat.split()) @@ -360,7 +355,7 @@ def check_tree_text(tree, path, pat, regexp): break except Exception as e: print('Failed to get path "{}"'.format(path)) - raise e + raise return ret @@ -369,7 +364,12 @@ def get_tree_count(tree, path): return len(tree.findall(path)) def stderr(*args): - print(*args, file=sys.stderr) + if sys.version_info.major < 3: + file = codecs.getwriter('utf-8')(sys.stderr) + else: + file = sys.stderr + + print(*args, file=file) def print_err(lineno, context, err, message=None): global ERR_COUNT diff --git a/src/etc/lldb_batchmode.py b/src/etc/lldb_batchmode.py index b0220c84ef2fa..537b419b3279f 100644 --- a/src/etc/lldb_batchmode.py +++ b/src/etc/lldb_batchmode.py @@ -1,13 +1,3 @@ -# Copyright 2014 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - # This script allows to use LLDB in a way similar to GDB's batch mode. That is, given a text file # containing LLDB commands (one command per line), this script will execute the commands one after # the other. @@ -28,10 +18,15 @@ import os import sys import threading -import thread import re import time +try: + import thread +except ModuleNotFoundError: + # The `thread` module was renamed to `_thread` in Python 3. + import _thread as thread + # Set this to True for additional output DEBUG_OUTPUT = False diff --git a/src/etc/lldb_rust_formatters.py b/src/etc/lldb_rust_formatters.py index 2bbd4372721cb..fdc1c4fa0cc38 100644 --- a/src/etc/lldb_rust_formatters.py +++ b/src/etc/lldb_rust_formatters.py @@ -1,13 +1,3 @@ -# Copyright 2014 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - import lldb import re import debugger_pretty_printers_common as rustpp @@ -300,6 +290,8 @@ def render_element(i): def read_utf8_string(ptr_val, byte_count): + if byte_count == 0: + return '""' error = lldb.SBError() process = ptr_val.get_wrapped_value().GetProcess() data = process.ReadMemory(ptr_val.as_integer(), byte_count, error) diff --git a/src/etc/platform-intrinsics/aarch64.json b/src/etc/platform-intrinsics/aarch64.json deleted file mode 100644 index c8cda4077b790..0000000000000 --- a/src/etc/platform-intrinsics/aarch64.json +++ /dev/null @@ -1,592 +0,0 @@ -{ - "platform": "aarch64_v", - "intrinsic_prefix": "", - "llvm_prefix": "llvm.aarch64.neon.", - "number_info": { - "signed": { - "kind": "s", - "data_type": { "pattern": "s{bitwidth}" } - }, - "unsigned": { - "kind": "u", - "data_type": { "pattern": "u{bitwidth}" } - }, - "float": { - "kind": "f", - "data_type": { "pattern": "f{bitwidth}" } - } - }, - "width_info": { - "64": { "width": "" }, - "128": { "width": "q" } - }, - "intrinsics": [ - { - "intrinsic": "hadd{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}hadd.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "rhadd{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}rhadd.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "qadd{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}qadd.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "uqadd_{0.data_type}", - "width": [128], - "llvm": "suqadd.{0.llvm_name}", - "ret": "s(8-64)", - "args": ["0", "0u"] - }, - { - "intrinsic": "sqadd_{0.data_type}", - "width": [128], - "llvm": "usqadd.{0.llvm_name}", - "ret": "u(8-64)", - "args": ["0", "0s"] - }, - { - "intrinsic": "raddhn_{1.data_type}", - "width": [64], - "llvm": "raddhn.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0w", "0w"] - }, - { - "intrinsic": "fmulx{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "fmulx.{0.llvm_name}", - "ret": "f(32-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "fma{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "!llvm.fma.{0.llvm_name}", - "ret": "f(32-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "qdmulh{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "sqdmulh.{0.llvm_name}", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "qrdmulh{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "sqrdmulh.{0.llvm_name}", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "mull_{1.data_type}", - "width": [128], - "llvm": "{0.kind}mull.{0.llvm_name}", - "ret": "i(16-64)", - "args": ["0n", "0n"] - }, - { - "intrinsic": "qdmull{0.width}_{1.data_type}", - "width": [128], - "llvm": "sqdmull.{0.llvm_name}", - "ret": "s(16-32)", - "args": ["0n", "0n"] - }, - { - "intrinsic": "hsub{0.width}_{1.data_type}", - "width": [64, 128], - "llvm": "{0.kind}hsub.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "qsub{0.width}_{1.data_type}", - "width": [64, 128], - "llvm": "{0.kind}qsub.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "rsubhn_{1.data_type}", - "width": [64], - "llvm": "rsubhn.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0w", "0w"] - }, - { - "intrinsic": "abd{0.width}_{1.data_type}", - "width": [64, 128], - "llvm": "{0.kind}abd.{0.llvm_name}", - "ret": ["i(8-32)","f(32-64)"], - "args": ["0", "0"] - }, - { - "intrinsic": "max{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}max.{0.llvm_name}", - "ret": ["i(8-32)","f(32-64)"], - "args": ["0", "0"] - }, - { - "intrinsic": "min{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}min.{0.llvm_name}", - "ret": ["i(8-32)","f(32-64)"], - "args": ["0", "0"] - }, - { - "intrinsic": "maxnm{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}maxnm.{0.llvm_name}", - "ret": "f(32-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "minnm{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}minnm.{0.llvm_name}", - "ret": "f(32-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "shl{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}shl.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0s"] - }, - { - "intrinsic": "qshl{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}qshl.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0s"] - }, - { - "intrinsic": "rshl{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}rshl.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0s"] - }, - { - "intrinsic": "qrshl{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}qrshl.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0s"] - }, - { - "intrinsic": "qshrun_n_{1.data_type}", - "width": [64], - "llvm": "sqshrun.{0.llvm_name}", - "ret": "s(8-32)", - "args": ["0w", "U32"] - }, - { - "intrinsic": "qrshrun_n_{1.data_type}", - "width": [64], - "llvm": "sqrshrun.{0.llvm_name}", - "ret": "s(8-32)", - "args": ["0w", "U32"] - }, - { - "intrinsic": "qshrn_n_{1.data_type}", - "width": [64], - "llvm": "{0.kind}qshrn.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0w", "U32"] - }, - { - "intrinsic": "rshrn_n_{1.data_type}", - "width": [64], - "llvm": "rshrn.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0w", "U32"] - }, - { - "intrinsic": "qrshrn_n_{1.data_type}", - "width": [64], - "llvm": "{0.kind}qrshrn.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0w", "U32"] - }, - { - "intrinsic": "sri{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "vsri.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "sli{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "vsli.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "vqmovn_{1.data_type}", - "width": [64], - "llvm": "{0.kind}qxtn.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0w"] - }, - { - "intrinsic": "abs{0.width}_{0.data_type}", - "width": [64,128], - "llvm": "abs.{0.llvm_name}", - "ret": "s(8-64)", - "args": ["0"] - }, - { - "intrinsic": "abs{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "!llvm.fabs.{0.llvm_name}", - "ret": "f(32-64)", - "args": ["0"] - }, - { - "intrinsic": "qabs{0.width}_{0.data_type}", - "width": [64,128], - "llvm": "sqabs.{0.llvm_name}", - "ret": "s(8-64)", - "args": ["0"] - }, - { - "intrinsic": "qneg{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "sqneg.{0.llvm_name}", - "ret": "s(8-64)", - "args": ["0"] - }, - { - "intrinsic": "clz{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "!llvm.ctlz.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0"] - }, - { - "intrinsic": "cls{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "cls.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0"] - }, - { - "intrinsic": "cnt{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "!llvm.ctpop.{0.llvm_name}", - "ret": "i8", - "args": ["0"] - }, - { - "intrinsic": "recpe{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}recpe.{0.llvm_name}", - "ret": ["u32","f(32-64)"], - "args": ["0"] - }, - { - "intrinsic": "recps{0.width}_{0.data_type}", - "width": [64,128], - "llvm": "frecps.{0.llvm_name}", - "ret": "f(32-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "sqrt{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "!llvm.sqrt.{0.llvm_name}", - "ret": "f(32-64)", - "args": ["0"] - }, - { - "intrinsic": "rsqrte{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}rsqrte.{0.llvm_name}", - "ret": ["u32","f(32-64)"], - "args": ["0"] - }, - { - "intrinsic": "rsqrts{0.width}_{0.data_type}", - "width": [64,128], - "llvm": "frsqrts.{0.llvm_name}", - "ret": "f(32-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "rbit{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "rbit.{0.llvm_name}", - "ret": "i8", - "args": ["0"] - }, - { - "intrinsic": "ld2{0[0].width}_{0[0].data_type}", - "width": [64, 128], - "llvm": "ld2.{0[0].llvm_name}.{1.llvm_name}", - "ret": ["[i(8-64);2]","[f(32-64);2]"], - "args": ["0.0SPc/0.0"] - }, - { - "intrinsic": "ld3{0[0].width}_{0[0].data_type}", - "width": [64, 128], - "llvm": "ld3.{0[0].llvm_name}.{1.llvm_name}", - "ret": ["[i(8-64);3]","[f(32-64);3]"], - "args": ["0.0SPc/0.0"] - }, - { - "intrinsic": "ld4{0[0].width}_{0[0].data_type}", - "width": [64, 128], - "llvm": "ld4.{0[0].llvm_name}.{1.llvm_name}", - "ret": ["[i(8-64);4]","[f(32-64);4]"], - "args": ["0.0SPc/0.0"] - }, - { - "intrinsic": "ld2{0[0].width}_dup_{0[0].data_type}", - "width": [64, 128], - "llvm": "ld2.{0[0].llvm_name}.{1.llvm_name}", - "ret": ["[i(8-64);2]","[f(32-64);2]"], - "args": ["0.0SPc"] - }, - { - "intrinsic": "ld3{0[0].width}_dup_{0[0].data_type}", - "width": [64, 128], - "llvm": "ld3.{0[0].llvm_name}.{1.llvm_name}", - "ret": ["[i(8-64);3]","[f(32-64);3]"], - "args": ["0.0SPc"] - }, - { - "intrinsic": "ld4{0[0].width}_dup_{0[0].data_type}", - "width": [64, 128], - "llvm": "ld4.{0[0].llvm_name}.{1.llvm_name}", - "ret": ["[i(8-64);4]","[f(32-64);4]"], - "args": ["0.0SPc"] - }, - { - "intrinsic": "padd{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "addp.{0.llvm_name}", - "ret": ["i(8-32)","f32"], - "args": ["0", "0"] - }, - { - "intrinsic": "padd{0.width}_{0.data_type}", - "width": [128], - "llvm": "addp.{0.llvm_name}", - "ret": ["i64","f64"], - "args": ["0", "0"] - }, - { - "intrinsic": "paddl{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}addlp.{0.llvm_name}.{1.llvm_name}", - "ret": "i(16-64)", - "args": ["0dn"] - }, - { - "intrinsic": "pmax{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}maxp.{0.llvm_name}", - "ret": ["i(8-32)","f32"], - "args": ["0", "0"] - }, - { - "intrinsic": "pmax{0.width}_{0.data_type}", - "width": [128], - "llvm": "{0.kind}maxp.{0.llvm_name}", - "ret": ["i64","f64"], - "args": ["0", "0"] - }, - { - "intrinsic": "pmin{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}minp.{0.llvm_name}", - "ret": ["i(8-32)","f32"], - "args": ["0", "0"] - }, - { - "intrinsic": "pmin{0.width}_{0.data_type}", - "width": [128], - "llvm": "{0.kind}minp.{0.llvm_name}", - "ret": ["i64","f64"], - "args": ["0", "0"] - }, - { - "intrinsic": "pmaxnm{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}maxnmp.{0.llvm_name}", - "ret": ["i(8-32)","f32"], - "args": ["0", "0"] - }, - { - "intrinsic": "pmaxnm{0.width}_{0.data_type}", - "width": [128], - "llvm": "{0.kind}maxnmp.{0.llvm_name}", - "ret": ["i64","f64"], - "args": ["0", "0"] - }, - { - "intrinsic": "pminnm{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}minnmp.{0.llvm_name}", - "ret": "f32", - "args": ["0", "0"] - }, - { - "intrinsic": "pminnm{0.width}_{0.data_type}", - "width": [128], - "llvm": "{0.kind}minnmp.{0.llvm_name}", - "ret": "f64", - "args": ["0", "0"] - }, - { - "intrinsic": "addv{1.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}addv.{0.llvm_name}.{1.llvm_name}", - "ret": ["I(8-32)","F32"], - "args": ["0v"] - }, - { - "intrinsic": "addv{1.width}_{0.data_type}", - "width": [128], - "llvm": "{0.kind}addv.{0.llvm_name}.{1.llvm_name}", - "ret": ["I64","F64"], - "args": ["0v"] - }, - { - "intrinsic": "addlv{1.width}_{1.data_type}", - "width": [64, 128], - "llvm": "{0.kind}addlv.{0.llvm_name}.{1.llvm_name}", - "ret": "I(16-64)", - "args": ["0vdn"] - }, - { - "intrinsic": "maxv{1.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}maxv.{0.llvm_name}.{1.llvm_name}", - "ret": ["I(8-32)","F32"], - "args": ["0v"] - }, - { - "intrinsic": "maxv{1.width}_{0.data_type}", - "width": [128], - "llvm": "{0.kind}maxv.{0.llvm_name}.{1.llvm_name}", - "ret": "F64", - "args": ["0v"] - }, - { - "intrinsic": "minv{1.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}minv.{0.llvm_name}.{1.llvm_name}", - "ret": ["I(8-32)","F32"], - "args": ["0v"] - }, - { - "intrinsic": "minv{1.width}_{0.data_type}", - "width": [128], - "llvm": "{0.kind}minv.{0.llvm_name}.{1.llvm_name}", - "ret": "F64", - "args": ["0v"] - }, - { - "intrinsic": "maxnmv{1.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}maxnmv.{0.llvm_name}.{1.llvm_name}", - "ret": "F32", - "args": ["0v"] - }, - { - "intrinsic": "maxnmv{1.width}_{0.data_type}", - "width": [128], - "llvm": "{0.kind}maxnmv.{0.llvm_name}.{1.llvm_name}", - "ret": "F64", - "args": ["0v"] - }, - { - "intrinsic": "minnmv{1.width}_{0.data_type}", - "width": [64, 128], - "llvm": "{0.kind}minnmv.{0.llvm_name}.{1.llvm_name}", - "ret": "F32", - "args": ["0v"] - }, - { - "intrinsic": "minnmv{1.width}_{0.data_type}", - "width": [128], - "llvm": "{0.kind}minnmv.{0.llvm_name}.{1.llvm_name}", - "ret": "F64", - "args": ["0v"] - }, - { - "intrinsic": "qtbl1{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "tbl1.{0.llvm_name}", - "ret": "i8", - "args": ["0x128", "0u"] - }, - { - "intrinsic": "qtbx1{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "tbx1.{0.llvm_name}", - "ret": "i8", - "args": ["0", "0x128", "0u"] - }, - { - "intrinsic": "qtbl2{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "tbl2.{0.llvm_name}", - "ret": "i8", - "args": ["(0x128,0x128)f", "0u"] - }, - { - "intrinsic": "qtbx2{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "tbx2.{0.llvm_name}", - "ret": "i8", - "args": ["(0x128,0x128)f", "0u"] - }, - { - "intrinsic": "qtbl3{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "tbl3.{0.llvm_name}", - "ret": "i8", - "args": ["(0x128,0x128,0x128)f", "0u"] - }, - { - "intrinsic": "qtbx3{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "tbx3.{0.llvm_name}", - "ret": "i8", - "args": ["0", "(0x128,0x128,0x128)f", "0u"] - }, - { - "intrinsic": "qtbl4{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "tbl4.{0.llvm_name}", - "ret": "i8", - "args": ["(0x128,0x128,0x128,0x128)f", "0u"] - }, - { - "intrinsic": "qtbx4{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "tbx4.{0.llvm_name}", - "ret": "i8", - "args": ["0", "(0x128,0x128,0x128,0x128)f", "0u"] - } - ] -} diff --git a/src/etc/platform-intrinsics/arm.json b/src/etc/platform-intrinsics/arm.json deleted file mode 100644 index d008320713c3b..0000000000000 --- a/src/etc/platform-intrinsics/arm.json +++ /dev/null @@ -1,396 +0,0 @@ -{ - "platform": "arm_v", - "intrinsic_prefix": "", - "llvm_prefix": "llvm.arm.neon.v", - "number_info": { - "signed": { - "kind": "s", - "data_type": { "pattern": "s{bitwidth}" } - }, - "unsigned": { - "kind": "u", - "data_type": { "pattern": "u{bitwidth}" } - }, - "float": { - "kind": "f", - "data_type": { "pattern": "f{bitwidth}" } - } - }, - "width_info": { - "64": { "width": "" }, - "128": { "width": "q" } - }, - "intrinsics": [ - { - "intrinsic": "hadd{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "hadd{0.kind}.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "rhadd{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "rhadd{0.kind}.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "qadd{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "qadd{0.kind}.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "raddhn_{1.data_type}", - "width": [64], - "llvm": "raddhn.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0w", "0w"] - }, - { - "intrinsic": "fma{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "!llvm.fma.{0.llvm_name}", - "ret": "f32", - "args": ["0", "0"] - }, - { - "intrinsic": "qdmulh{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "sqdmulh.{0.llvm_name}", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "qrdmulh{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "sqrdmulh.{0.llvm_name}", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "mull_{1.data_type}", - "width": [128], - "llvm": "mull{0.kind}.{0.llvm_name}", - "ret": "i(16-64)", - "args": ["0n", "0n"] - }, - { - "intrinsic": "qdmull{0.width}_{1.data_type}", - "width": [128], - "llvm": "sqdmull.{0.llvm_name}", - "ret": "s(16-32)", - "args": ["0n", "0n"] - }, - { - "intrinsic": "hsub{0.width}_{1.data_type}", - "width": [64, 128], - "llvm": "hsub{0.kind}.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "qsub{0.width}_{1.data_type}", - "width": [64, 128], - "llvm": "qsub{0.kind}.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "rsubhn_{1.data_type}", - "width": [64], - "llvm": "rsubhn.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0w", "0w"] - }, - { - "intrinsic": "abd{0.width}_{1.data_type}", - "width": [64, 128], - "llvm": "abd{0.kind}.{0.llvm_name}", - "ret": ["i(8-32)","f32"], - "args": ["0", "0"] - }, - { - "intrinsic": "max{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "max{0.kind}.{0.llvm_name}", - "ret": ["i(8-32)","f32"], - "args": ["0", "0"] - }, - { - "intrinsic": "min{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "min{0.kind}.{0.llvm_name}", - "ret": ["i(8-32)","f32"], - "args": ["0", "0"] - }, - { - "intrinsic": "shl{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "shl{0.kind}.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0s"] - }, - { - "intrinsic": "qshl{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "qshl{0.kind}.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0s"] - }, - { - "intrinsic": "rshl{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "rshl{0.kind}.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0s"] - }, - { - "intrinsic": "qrshl{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "qrshl{0.kind}.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0s"] - }, - { - "intrinsic": "qshrun_n_{1.data_type}", - "width": [64], - "llvm": "sqshrun.{0.llvm_name}", - "ret": "s(8-32)", - "args": ["0w", "U32"] - }, - { - "intrinsic": "qrshrun_n_{1.data_type}", - "width": [64], - "llvm": "sqrshrun.{0.llvm_name}", - "ret": "s(8-32)", - "args": ["0w", "U32"] - }, - { - "intrinsic": "qshrn_n_{1.data_type}", - "width": [64], - "llvm": "qshrn{0.kind}.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0w", "U32"] - }, - { - "intrinsic": "rshrn_n_{1.data_type}", - "width": [64], - "llvm": "rshrn.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0w", "U32"] - }, - { - "intrinsic": "qrshrn_n_{1.data_type}", - "width": [64], - "llvm": "qrshrn{0.kind}.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0w", "U32"] - }, - { - "intrinsic": "sri{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "vsri.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "sli{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "vsli.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "vqmovn_{1.data_type}", - "width": [64], - "llvm": "qxtn{0.kind}.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0w"] - }, - { - "intrinsic": "abs{0.width}_{0.data_type}", - "width": [64,128], - "llvm": "abs.{0.llvm_name}", - "ret": "s(8-32)", - "args": ["0"] - }, - { - "intrinsic": "abs{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "!llvm.fabs.{0.llvm_name}", - "ret": "f32", - "args": ["0"] - }, - { - "intrinsic": "qabs{0.width}_{0.data_type}", - "width": [64,128], - "llvm": "sqabs.{0.llvm_name}", - "ret": "s(8-32)", - "args": ["0"] - }, - { - "intrinsic": "qneg{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "sqneg.{0.llvm_name}", - "ret": "s(8-32)", - "args": ["0"] - }, - { - "intrinsic": "clz{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "!llvm.ctlz.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0"] - }, - { - "intrinsic": "cls{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "cls.{0.llvm_name}", - "ret": "i(8-32)", - "args": ["0"] - }, - { - "intrinsic": "cnt{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "!llvm.ctpop.{0.llvm_name}", - "ret": "i8", - "args": ["0"] - }, - { - "intrinsic": "recpe{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "recpe.{0.llvm_name}", - "ret": ["u32","f32"], - "args": ["0"] - }, - { - "intrinsic": "recps{0.width}_{0.data_type}", - "width": [64,128], - "llvm": "frecps.{0.llvm_name}", - "ret": "f32", - "args": ["0", "0"] - }, - { - "intrinsic": "sqrt{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "!llvm.sqrt.{0.llvm_name}", - "ret": "f32", - "args": ["0"] - }, - { - "intrinsic": "rsqrte{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "rsqrte.{0.llvm_name}", - "ret": ["u32","f32"], - "args": ["0"] - }, - { - "intrinsic": "rsqrts{0.width}_{0.data_type}", - "width": [64,128], - "llvm": "rsqrts.{0.llvm_name}", - "ret": "f32", - "args": ["0", "0"] - }, - { - "intrinsic": "bsl{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "bsl.{0.llvm_name}", - "ret": "i(8-64)", - "args": ["0u", "0"] - }, - { - "intrinsic": "padd{0.width}_{0.data_type}", - "width": [64], - "llvm": "padd.{0.llvm_name}", - "ret": ["i(8-32)","f32"], - "args": ["0", "0"] - }, - { - "intrinsic": "paddl{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "paddl{0.kind}.{0.llvm_name}.{1.llvm_name}", - "ret": "i(16-64)", - "args": ["0dn"] - }, - { - "intrinsic": "padal{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "padal{0.kind}.{0.llvm_name}.{1.llvm_name}", - "ret": "i(16-64)", - "args": ["0", "0dn"] - }, - { - "intrinsic": "pmax{0.width}_{0.data_type}", - "width": [64], - "llvm": "pmax{0.kind}.{0.llvm_name}", - "ret": ["i(8-32)","f32"], - "args": ["0", "0"] - }, - { - "intrinsic": "pmin{0.width}_{0.data_type}", - "width": [64, 128], - "llvm": "pmin{0.kind}.{0.llvm_name}", - "ret": ["i(8-32)","f32"], - "args": ["0", "0"] - }, - { - "intrinsic": "tbl1_{0.data_type}", - "width": [64], - "llvm": "tbl1", - "ret": "i8", - "args": ["0", "0u"] - }, - { - "intrinsic": "tbx1_{0.data_type}", - "width": [64], - "llvm": "tbx1", - "ret": "i8", - "args": ["0", "0", "0u"] - }, - { - "intrinsic": "tbl2_{0.data_type}", - "width": [64], - "llvm": "tbl2", - "ret": "i8", - "args": ["(0,0)f", "0u"] - }, - { - "intrinsic": "tbx2_{0.data_type}", - "width": [64], - "llvm": "tbx2", - "ret": "i8", - "args": ["(0,0)f", "0u"] - }, - { - "intrinsic": "tbl3_{0.data_type}", - "width": [64], - "llvm": "tbl3", - "ret": "i8", - "args": ["(0,0,0)f", "0u"] - }, - { - "intrinsic": "tbx3_{0.data_type}", - "width": [64], - "llvm": "tbx3", - "ret": "i8", - "args": ["0", "(0,0,0)f", "0u"] - }, - { - "intrinsic": "tbl4_{0.data_type}", - "width": [64], - "llvm": "tbl4", - "ret": "i8", - "args": ["(0,0,0,0)f", "0u"] - }, - { - "intrinsic": "tbx4_{0.data_type}", - "width": [64], - "llvm": "tbx4", - "ret": "i8", - "args": ["0", "(0,0,0,0)f", "0u"] - } - ] -} diff --git a/src/etc/platform-intrinsics/generator.py b/src/etc/platform-intrinsics/generator.py deleted file mode 100644 index 046ea48638baf..0000000000000 --- a/src/etc/platform-intrinsics/generator.py +++ /dev/null @@ -1,874 +0,0 @@ -# Copyright 2015 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -from __future__ import division, print_function -import json -import argparse -import sys -import re -import textwrap -import itertools - -SPEC = re.compile( - r'^(?:(?PV)|(?P[iusfIUSF])(?:\((?P\d+)-(?P\d+)\)|' - r'(?P\d+)(:?/(?P\d+))?)' - r'|(?P\d+))(?P\.\d+)?(?P[vShdnwusfDMCNW]*)(?Px\d+)?' - r'(?:(?PPm|Pc)(?P/.*)?|(?P->.*))?$' -) - -class PlatformInfo(object): - def __init__(self, json): - self._platform = json['platform'] - - def platform_prefix(self): - return self._platform - -class IntrinsicSet(object): - def __init__(self, platform, json): - self._llvm_prefix = json['llvm_prefix'] - self._type_info = json['number_info'] - self._intrinsics = json['intrinsics'] - self._widths = json['width_info'] - self._platform = platform - self._intrinsic_prefix = json['intrinsic_prefix'] - - def intrinsics(self): - for raw in self._intrinsics: - yield GenericIntrinsic(self, - raw['intrinsic'], raw['width'], raw['llvm'], - raw['ret'], raw['args']) - - def platform(self): - return self._platform - - def intrinsic_prefix(self): - return self._intrinsic_prefix - - def llvm_prefix(self): - return self._llvm_prefix - - def width_info(self, bitwidth): - return self._widths[str(bitwidth)] - - def number_type_info(self, value): - data = self._type_info[value.__class__.__name__.lower()] - bitwidth = value.bitwidth() - def lookup(raw): - if not isinstance(raw, dict): - return raw - - try: - return raw[str(bitwidth)] - except KeyError: - return raw['pattern'].format(bitwidth = bitwidth) - - return PlatformTypeInfo(value.llvm_name(), - {k: lookup(v) for k, v in data.items()}) - -class PlatformTypeInfo(object): - def __init__(self, llvm_name, properties, elems = None): - if elems is None: - self.properties = properties - self.llvm_name = llvm_name - else: - assert properties is None and llvm_name is None - self.properties = {} - self.elems = elems - - def __repr__(self): - return ''.format(self.llvm_name, self.properties) - - def __getattr__(self, name): - return self.properties[name] - - def __getitem__(self, idx): - return self.elems[idx] - - def vectorize(self, length, width_info): - props = self.properties.copy() - props.update(width_info) - return PlatformTypeInfo('v{}{}'.format(length, self.llvm_name), props) - - def pointer(self, llvm_elem): - name = self.llvm_name if llvm_elem is None else llvm_elem.llvm_name - return PlatformTypeInfo('p0{}'.format(name), self.properties) - -BITWIDTH_POINTER = '' - -class Type(object): - def __init__(self, bitwidth): - self._bitwidth = bitwidth - - def bitwidth(self): - return self._bitwidth - - def modify(self, spec, width, previous): - raise NotImplementedError() - - def __ne__(self, other): - return not (self == other) - -class Void(Type): - def __init__(self): - Type.__init__(self, 0) - - @staticmethod - def compiler_ctor(): - return '::VOID' - - def compiler_ctor_ref(self): - return '&' + self.compiler_ctor() - - @staticmethod - def rust_name(): - return '()' - - @staticmethod - def type_info(platform_info): - return None - - def __eq__(self, other): - return isinstance(other, Void) - -class Number(Type): - def __init__(self, bitwidth): - Type.__init__(self, bitwidth) - - def modify(self, spec, width, previous): - if spec == 'u': - return Unsigned(self.bitwidth()) - elif spec == 's': - return Signed(self.bitwidth()) - elif spec == 'f': - return Float(self.bitwidth()) - elif spec == 'w': - return self.__class__(self.bitwidth() * 2) - elif spec == 'n': - return self.__class__(self.bitwidth() // 2) - elif spec == 'v': - return Vector(self, width // self.bitwidth()) - else: - raise ValueError('unknown modification spec {}', spec) - - def type_info(self, platform_info): - return platform_info.number_type_info(self) - - def __eq__(self, other): - # print(self, other) - return self.__class__ == other.__class__ and self.bitwidth() == other.bitwidth() - -class Signed(Number): - def __init__(self, bitwidth, llvm_bitwidth = None): - Number.__init__(self, bitwidth) - self._llvm_bitwidth = llvm_bitwidth - - - def compiler_ctor(self): - if self._llvm_bitwidth is None: - return '::I{}'.format(self.bitwidth()) - else: - return '::I{}_{}'.format(self.bitwidth(), self._llvm_bitwidth) - - def compiler_ctor_ref(self): - return '&' + self.compiler_ctor() - - def llvm_name(self): - bw = self._llvm_bitwidth or self.bitwidth() - return 'i{}'.format(bw) - - def rust_name(self): - return 'i{}'.format(self.bitwidth()) - -class Unsigned(Number): - def __init__(self, bitwidth, llvm_bitwidth = None): - Number.__init__(self, bitwidth) - self._llvm_bitwidth = llvm_bitwidth - - def compiler_ctor(self): - if self._llvm_bitwidth is None: - return '::U{}'.format(self.bitwidth()) - else: - return '::U{}_{}'.format(self.bitwidth(), self._llvm_bitwidth) - - def compiler_ctor_ref(self): - return '&' + self.compiler_ctor() - - def llvm_name(self): - bw = self._llvm_bitwidth or self.bitwidth() - return 'i{}'.format(bw) - - def rust_name(self): - return 'u{}'.format(self.bitwidth()) - -class Float(Number): - def __init__(self, bitwidth): - assert bitwidth in (32, 64) - Number.__init__(self, bitwidth) - - def compiler_ctor(self): - return '::F{}'.format(self.bitwidth()) - - def compiler_ctor_ref(self): - return '&' + self.compiler_ctor() - - def llvm_name(self): - return 'f{}'.format(self.bitwidth()) - - def rust_name(self): - return 'f{}'.format(self.bitwidth()) - -class Vector(Type): - def __init__(self, elem, length, bitcast = None): - assert isinstance(elem, Type) and not isinstance(elem, Vector) - Type.__init__(self, - elem.bitwidth() * length) - self._length = length - self._elem = elem - assert bitcast is None or (isinstance(bitcast, Vector) and - bitcast._bitcast is None and - bitcast._elem.bitwidth() == elem.bitwidth()) - if bitcast is not None and bitcast._elem != elem: - self._bitcast = bitcast._elem - else: - self._bitcast = None - - def modify(self, spec, width, previous): - if spec == 'S': - return self._elem - elif spec == 'h': - return Vector(self._elem, self._length // 2) - elif spec == 'd': - return Vector(self._elem, self._length * 2) - elif spec == 'N': - elem = self._elem.__class__(self._elem.bitwidth() // 2) - return Vector(elem, self._length * 2) - elif spec == 'W': - elem = self._elem.__class__(self._elem.bitwidth() * 2) - return Vector(elem, self._length // 2) - elif spec.startswith('x'): - new_bitwidth = int(spec[1:]) - return Vector(self._elem, new_bitwidth // self._elem.bitwidth()) - elif spec.startswith('->'): - bitcast_to = TypeSpec(spec[2:]) - choices = list(bitcast_to.enumerate(width, previous)) - assert len(choices) == 1 - bitcast_to = choices[0] - return Vector(self._elem, self._length, bitcast_to) - else: - return Vector(self._elem.modify(spec, width, previous), self._length) - - def compiler_ctor(self): - if self._bitcast is None: - return '{}x{}'.format(self._elem.compiler_ctor(), - self._length) - else: - return '{}x{}_{}'.format(self._elem.compiler_ctor(), - self._length, - self._bitcast.compiler_ctor() - .replace('::', '')) - - def compiler_ctor_ref(self): - return '&' + self.compiler_ctor() - - def rust_name(self): - return '{}x{}'.format(self._elem.rust_name(), self._length) - - def type_info(self, platform_info): - elem_info = self._elem.type_info(platform_info) - return elem_info.vectorize(self._length, - platform_info.width_info(self.bitwidth())) - - def __eq__(self, other): - return isinstance(other, Vector) and self._length == other._length and \ - self._elem == other._elem and self._bitcast == other._bitcast - -class Pointer(Type): - def __init__(self, elem, llvm_elem, const): - self._elem = elem - self._llvm_elem = llvm_elem - self._const = const - Type.__init__(self, BITWIDTH_POINTER) - - def modify(self, spec, width, previous): - if spec == 'D': - return self._elem - elif spec == 'M': - return Pointer(self._elem, self._llvm_elem, False) - elif spec == 'C': - return Pointer(self._elem, self._llvm_elem, True) - else: - return Pointer(self._elem.modify(spec, width, previous), self._llvm_elem, self._const) - - def compiler_ctor(self): - if self._llvm_elem is None: - llvm_elem = 'None' - else: - llvm_elem = 'Some({})'.format(self._llvm_elem.compiler_ctor_ref()) - return 'Type::Pointer({}, {}, {})'.format(self._elem.compiler_ctor_ref(), - llvm_elem, - 'true' if self._const else 'false') - - def compiler_ctor_ref(self): - return "{{ static PTR: Type = {}; &PTR }}".format(self.compiler_ctor()) - - - def rust_name(self): - return '*{} {}'.format('const' if self._const else 'mut', - self._elem.rust_name()) - - def type_info(self, platform_info): - if self._llvm_elem is None: - llvm_elem = None - else: - llvm_elem = self._llvm_elem.type_info(platform_info) - return self._elem.type_info(platform_info).pointer(llvm_elem) - - def __eq__(self, other): - return isinstance(other, Pointer) and self._const == other._const \ - and self._elem == other._elem and self._llvm_elem == other._llvm_elem - -class Aggregate(Type): - def __init__(self, flatten, elems): - self._flatten = flatten - self._elems = elems - Type.__init__(self, sum(elem.bitwidth() for elem in elems)) - - def __repr__(self): - return ''.format(self._elems) - - def modify(self, spec, width, previous): - if spec.startswith('.'): - num = int(spec[1:]) - return self._elems[num] - else: - print(spec) - raise NotImplementedError() - - def compiler_ctor(self): - parts = "{{ static PARTS: [&'static Type; {}] = [{}]; &PARTS }}" - elems = ', '.join(elem.compiler_ctor_ref() for elem in self._elems) - parts = parts.format(len(self._elems), elems) - return 'Type::Aggregate({}, {})'.format('true' if self._flatten else 'false', - parts) - - def compiler_ctor_ref(self): - return "{{ static AGG: Type = {}; &AGG }}".format(self.compiler_ctor()) - - def rust_name(self): - return '({})'.format(', '.join(elem.rust_name() for elem in self._elems)) - - def type_info(self, platform_info): - return PlatformTypeInfo(None, None, [elem.type_info(platform_info) for elem in self._elems]) - - def __eq__(self, other): - return isinstance(other, Aggregate) and self._flatten == other._flatten and \ - self._elems == other._elems - - -TYPE_ID_LOOKUP = {'i': [Signed, Unsigned], - 's': [Signed], - 'u': [Unsigned], - 'f': [Float]} - -def ptrify(match, elem, width, previous): - ptr = match.group('pointer') - if ptr is None: - return elem - else: - llvm_ptr = match.group('llvm_pointer') - if llvm_ptr is None: - llvm_elem = None - else: - assert llvm_ptr.startswith('/') - options = list(TypeSpec(llvm_ptr[1:]).enumerate(width, previous)) - assert len(options) == 1 - llvm_elem = options[0] - assert ptr in ('Pc', 'Pm') - return Pointer(elem, llvm_elem, ptr == 'Pc') - -class TypeSpec(object): - def __init__(self, spec): - if not isinstance(spec, list): - spec = [spec] - - self.spec = spec - - def enumerate(self, width, previous): - for spec in self.spec: - match = SPEC.match(spec) - if match is not None: - id = match.group('id') - reference = match.group('reference') - - modifiers = [] - index = match.group('index') - if index is not None: - modifiers.append(index) - modifiers += list(match.group('modifiers') or '') - force = match.group('force_width') - if force is not None: - modifiers.append(force) - bitcast = match.group('bitcast') - if bitcast is not None: - modifiers.append(bitcast) - - if match.group('void') is not None: - assert spec == 'V' - yield Void() - elif id is not None: - is_vector = id.islower() - type_ctors = TYPE_ID_LOOKUP[id.lower()] - - start = match.group('start') - if start is not None: - end = match.group('end') - llvm_width = None - else: - start = end = match.group('width') - llvm_width = match.group('llvm_width') - start = int(start) - end = int(end) - - bitwidth = start - while bitwidth <= end: - for ctor in type_ctors: - if llvm_width is not None: - assert not is_vector - llvm_width = int(llvm_width) - assert llvm_width < bitwidth - scalar = ctor(bitwidth, llvm_width) - else: - scalar = ctor(bitwidth) - - if is_vector: - elem = Vector(scalar, width // bitwidth) - else: - assert bitcast is None - elem = scalar - - for x in modifiers: - elem = elem.modify(x, width, previous) - yield ptrify(match, elem, width, previous) - bitwidth *= 2 - elif reference is not None: - reference = int(reference) - assert reference < len(previous), \ - 'referring to argument {}, but only {} are known'.format(reference, - len(previous)) - ret = previous[reference] - for x in modifiers: - ret = ret.modify(x, width, previous) - yield ptrify(match, ret, width, previous) - else: - assert False, 'matched `{}`, but didn\'t understand it?'.format(spec) - elif spec.startswith('('): - if spec.endswith(')'): - true_spec = spec[1:-1] - flatten = False - elif spec.endswith(')f'): - true_spec = spec[1:-2] - flatten = True - else: - assert False, 'found unclosed aggregate `{}`'.format(spec) - - for elems in itertools.product(*(TypeSpec(subspec).enumerate(width, previous) - for subspec in true_spec.split(','))): - yield Aggregate(flatten, elems) - elif spec.startswith('['): - if spec.endswith(']'): - true_spec = spec[1:-1] - flatten = False - elif spec.endswith(']f'): - true_spec = spec[1:-2] - flatten = True - else: - assert False, 'found unclosed aggregate `{}`'.format(spec) - elem_spec, count = true_spec.split(';') - - count = int(count) - for elem in TypeSpec(elem_spec).enumerate(width, previous): - yield Aggregate(flatten, [elem] * count) - else: - assert False, 'Failed to parse `{}`'.format(spec) - -class GenericIntrinsic(object): - def __init__(self, platform, intrinsic, widths, llvm_name, ret, args): - self._platform = platform - self.intrinsic = intrinsic - self.widths = map(int, widths) - self.llvm_name = llvm_name - self.ret = TypeSpec(ret) - self.args = list(map(TypeSpec, args)) - - def monomorphise(self): - for width in self.widths: - # must be a power of two - assert width & (width - 1) == 0 - def recur(processed, untouched): - if not untouched: - ret = processed[0] - args = processed[1:] - yield MonomorphicIntrinsic(self._platform, self.intrinsic, width, - self.llvm_name, - ret, args) - else: - raw_arg = untouched[0] - rest = untouched[1:] - for arg in raw_arg.enumerate(width, processed): - for intr in recur(processed + [arg], rest): - yield intr - - for x in recur([], [self.ret] + self.args): - yield x - -class MonomorphicIntrinsic(object): - def __init__(self, platform, intrinsic, width, llvm_name, ret, args): - self._platform = platform - self._intrinsic = intrinsic - self._width = '' if width == 64 else 'q' - self._llvm_name = llvm_name - self._ret_raw = ret - self._ret = ret.type_info(platform) - self._args_raw = args - self._args = [arg.type_info(platform) for arg in args] - - def llvm_name(self): - if self._llvm_name.startswith('!'): - return self._llvm_name[1:].format(self._ret, *self._args) - else: - return self._platform.llvm_prefix() + self._llvm_name.format(self._ret, *self._args) - - def intrinsic_suffix(self): - return self._intrinsic.format(self._ret, - *self._args, - width = self._width) - - def platform_prefix(self): - return self._platform.platform().platform_prefix() - - def intrinsic_set_name(self): - return self._platform.intrinsic_prefix() - - def intrinsic_name(self): - return self._platform.intrinsic_prefix() + self.intrinsic_suffix() - - def compiler_args(self): - return ', '.join(arg.compiler_ctor_ref() for arg in self._args_raw) - - def compiler_ret(self): - return self._ret_raw.compiler_ctor_ref() - - def compiler_signature(self): - return '({}) -> {}'.format(self.compiler_args(), self.compiler_ret()) - - def intrinsic_signature(self): - names = 'xyzwabcdef' - return '({}) -> {}'.format(', '.join('{}: {}'.format(name, arg.rust_name()) - for name, arg in zip(names, self._args_raw)), - self._ret_raw.rust_name()) - -def parse_args(): - parser = argparse.ArgumentParser( - formatter_class = argparse.RawDescriptionHelpFormatter, - description = 'Render an intrinsic definition JSON to various formats.', - epilog = textwrap.dedent('''\ - Quick How-To: - - There are two operating modes: single file and multiple files. - - For example, ARM is specified as a single file. To generate the - compiler-definitions for ARM just pass the script the "arm.json" file: - - python generator.py --format compiler-defs arm.json - - The X86 architecture is specified as multiple files (for the different - instruction sets that x86 supports). To generate the compiler - definitions one needs to pass the script a "platform information file" - (with the -i flag) next to the files of the different instruction sets. - For example, to generate the X86 compiler-definitions for SSE4.2, just: - - python generator.py --format compiler-defs -i x86/info.json sse42.json - - And to generate the compiler-definitions for SSE4.1 and SSE4.2, just: - - python generator.py --format compiler-defs -i x86/info.json sse41.json sse42.json - - An intrinsic definition consists of a map with fields: - - intrinsic: pattern for the name(s) of the vendor's C intrinsic(s) - - llvm: pattern for the name(s) of the internal llvm intrinsic(s) - - width: a vector of vector bit-widths the pattern works with - - ret: type specifier for the return value - - arguments: vector of type specifiers for arguments - - The width and types describe a range of possible intrinsics, - and these are fed back into the intrinsic and llvm patterns to - create the appropriate definitions. - - ## Type specifier grammar - - ``` - type := core_type modifier* suffix? - - core_type := void | vector | scalar | aggregate | reference - - modifier := 'v' | 'h' | 'd' | 'n' | 'w' | 'u' | 's' | - 'x' number | '.' number - suffix := pointer | bitcast - pointer := 'Pm' llvm_pointer? | 'Pc' llvm_pointer? - llvm_pointer := '/' type - bitcast := '->' type - - void := 'V' - - vector := vector_elem width | - vector_elem := 'i' | 'u' | 's' | 'f' - - scalar := scalar_type number llvm_width? - scalar_type := 'U' | 'S' | 'F' - llvm_width := '/' number - - aggregate := '(' (type),* ')' 'f'? | '[' type ';' number ']' 'f'? - - reference := number - - width = number | '(' number '-' number ')' - - number = [0-9]+ - ``` - - ## Void - - The `V` type corresponds to `void` in LLVM (`()` in - Rust). It's likely to only work in return position. - - ## Vectors - - The vector grammar is a pattern describing many possibilities - for arguments/return value. The `vector_elem` describes the - types of elements to use, and the `width` describes the (range - of) widths for those elements, which are then placed into a - vector with the `width` bitwidth. E.g. if an intrinsic has a - `width` that includes 128, and the return value is `i(8-32)`, - then some instantiation of that intrinsic will be `u8x16`, - `u32x4`, `i32x4`, etc. - - ### Elements - - - i: integer, both signed and unsigned - - u: unsigned integer - - s: signed integer - - f: float - - ## Scalars - - Similar to vectors, but these describe a single concrete type, - not a range. The number is the bitwidth. The optional - `llvm_width` is the bitwidth of the integer that should be - passed to LLVM (by truncating the Rust argument): this only - works with scalar integers and the LLVM width must be smaller - than the Rust width. - - ### Types - - - U: unsigned integer - - S: signed integer - - F: float - - ## Aggregates - - An aggregate is a collection of multiple types; a tuple in - Rust terms, or an unnamed struct in LLVM. The `f` modifiers - forces the tuple to be flattened in the LLVM - intrinsic. E.g. if `llvm.foo` takes `(F32,S32)`: - - - no `f` corresponds to `declare ... @llvm.foo({float, i32})`. - - having an `f` corresponds to `declare ... @llvm.foo(float, i32)`. - - The `[type;number]` form is a just shorter way to write - `(...)`, except avoids doing a cartesian product of generic - types, e.g. `[S32;2]` is the same as `(S32, S32)`, while - `[I32;2]` is describing just the two types `(S32,S32)` and - `(U32,U32)` (i.e. doesn't include `(S32,U32)`, `(U32,S32)` as - `(I32,I32)` would). - - (Currently aggregates can not contain other aggregates.) - - ## References - - A reference uses the type of another argument, with possible - modifications. The number refers to the type to use, starting - with 0 == return value, 1 == first argument, 2 == second - argument, etc. - - ## Affixes - - The `modifier` and `suffix` adaptors change the precise - representation. - - ### Modifiers - - - 'v': put a scalar into a vector of the current width (u32 -> u32x4, when width == 128) - - 'S': get the scalar element of a vector (u32x4 -> u32) - - 'h': half the length of the vector (u32x4 -> u32x2) - - 'd': double the length of the vector (u32x2 -> u32x4) - - 'n': narrow the element of the vector (u32x4 -> u16x4) - - 'w': widen the element of the vector (u16x4 -> u32x4) - - 'N': half the length of the vector element (u32x4 -> u16x8) - - 'W': double the length of the vector element (u16x8 -> u32x4) - - 'u': force a number (vector or scalar) to be unsigned int (f32x4 -> u32x4) - - 's': force a number (vector or scalar) to be signed int (u32x4 -> i32x4) - - 'f': force a number (vector or scalar) to be float (u32x4 -> f32x4) - - 'x' number: force the type to be a vector of bitwidth `number`. - - '.' number: get the `number`th element of an aggregate - - 'D': dereference a pointer (*mut u32 -> u32) - - 'C': make a pointer const (*mut u32 -> *const u32) - - 'M': make a pointer mut (*const u32 -> *mut u32) - - ### Pointers - - Pointers can be created of any type by appending a `P*` - suffix. The `m` vs. `c` chooses mut vs. const. e.g. `S32Pm` - corresponds to `*mut i32`, and `i32Pc` corresponds (with width - 128) to `*const i8x16`, `*const u32x4`, etc. - - The type after the `/` (optional) represents the type used - internally to LLVM, e.g. `S32pm/S8` is exposed as `*mut i32` - in Rust, but is `i8*` in LLVM. (This defaults to the main - type). - - ### Bitcast - - The `'->' type` bitcast suffix will cause the value to be - bitcast to the right-hand type when calling the intrinsic, - e.g. `s32->f32` will expose the intrinsic as `i32x4` at the - Rust level, but will cast that vector to `f32x4` when calling - the LLVM intrinsic. - ''')) - parser.add_argument('--format', choices=FORMATS, required=True, - help = 'Output format.') - parser.add_argument('-o', '--out', type=argparse.FileType('w'), default=sys.stdout, - help = 'File to output to (default stdout).') - parser.add_argument('-i', '--info', type=argparse.FileType('r'), - help = 'File containing platform specific information to merge into ' - 'the input files\' header.') - parser.add_argument('in_', metavar="FILE", type=argparse.FileType('r'), nargs='+', - help = 'JSON files to load') - return parser.parse_args() - - -class ExternBlock(object): - def __init__(self): - pass - - @staticmethod - def open(platform): - return 'extern "platform-intrinsic" {' - - @staticmethod - def render(mono): - return ' fn {}{}{};'.format(mono.platform_prefix(), - mono.intrinsic_name(), - mono.intrinsic_signature()) - - @staticmethod - def close(): - return '}' - -class CompilerDefs(object): - def __init__(self): - pass - - @staticmethod - def open(platform): - return '''\ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// DO NOT EDIT: autogenerated by etc/platform-intrinsics/generator.py -// ignore-tidy-linelength - -#![allow(unused_imports)] - -use {{Intrinsic, Type}}; -use IntrinsicDef::Named; - -pub fn find(name: &str) -> Option {{ - if !name.starts_with("{0}") {{ return None }} - Some(match &name["{0}".len()..] {{'''.format(platform.platform_prefix()) - - @staticmethod - def render(mono): - return '''\ - "{}" => Intrinsic {{ - inputs: {{ static INPUTS: [&'static Type; {}] = [{}]; &INPUTS }}, - output: {}, - definition: Named("{}") - }},'''.format(mono.intrinsic_set_name() + mono.intrinsic_suffix(), - len(mono._args_raw), - mono.compiler_args(), - mono.compiler_ret(), - mono.llvm_name()) - - @staticmethod - def close(): - return '''\ - _ => return None, - }) -}''' - -FORMATS = { - 'extern-block': ExternBlock(), - 'compiler-defs': CompilerDefs(), -} - - -def main(): - args = parse_args() - ins = args.in_ - out = args.out - out_format = FORMATS[args.format] - info = args.info - one_file_no_info = False - if len(ins) > 1 and info is None: - print('error: cannot have multiple inputs without an info header.', file=sys.stderr) - sys.exit(1) - - elif info is None: - info = ins[0] - one_file_no_info = True - info_json = json.load(info) - platform = PlatformInfo(info_json) - - print(out_format.open(platform), file=out) - - for in_ in ins: - - if one_file_no_info: - data = info_json - else: - data = json.load(in_) - data.update(info_json) - - intrinsics = IntrinsicSet(platform, data) - for intr in intrinsics.intrinsics(): - for mono in intr.monomorphise(): - print(out_format.render(mono), file=out) - - print(out_format.close(), file=out) - -if __name__ == '__main__': - main() diff --git a/src/etc/platform-intrinsics/hexagon/hvx_v60.json b/src/etc/platform-intrinsics/hexagon/hvx_v60.json deleted file mode 100644 index a1897e6bf6100..0000000000000 --- a/src/etc/platform-intrinsics/hexagon/hvx_v60.json +++ /dev/null @@ -1,1326 +0,0 @@ -{ - "platform": "Q6_", - "intrinsic_prefix": "", - "llvm_prefix": "llvm.hexagon.V6.", - "number_info": { - "signed": { - "kind": "s", - "data_type": { "8": "b", "16": "h", "32": "w" }, - "data_type_plain": { "8": "b", "16": "h", "32": "w" } - }, - "unsigned": { - "kind": "u", - "data_type": { "8": "ub", "16": "uh", "32": "uw" }, - "data_type_plain": { "8": "b", "16": "h", "32": "w" } - }, - "float": { - "kind": "f", - "data_type": { "8": "b", "16": "h", "32": "w" }, - "data_type_plain": { "8": "b", "16": "h", "32": "w" } - } - }, - "width_info": { - "64": { "width_b": "64", "width_suffix": "" }, - "128": { "width_b": "128", "width_suffix": ".128B" }, - "512": { "width_b": "64", "width_suffix": "" }, - "1024": { "widthd_b": "64", "width_b": "128", "width_suffix": ".128B", "widthd_suffix": "" }, - "2048": { "widthd_b": "128", "widthd_suffix": ".128B" } - }, - "intrinsics": [ - { - "intrinsic": "R_vextract{1.width_b}", - "width": [512, 1024], - "llvm": "extractw{1.width_suffix}", - "ret": "U32", - "args": ["u32", "U32"] - }, - { - "intrinsic": "V_lo{0.width_b}", - "width": [512, 1024], - "llvm": "lo{0.width_suffix}", - "ret": "u32", - "args": ["0d"] - }, - { - "intrinsic": "V_hi{0.width_b}", - "width": [512, 1024], - "llvm": "hi{0.width_suffix}", - "ret": "u32", - "args": ["0d"] - }, - { - "intrinsic": "V_vsplat_R{0.width_b}", - "width": [512, 1024], - "llvm": "lvsplat{1.data_type}{0.width_suffix}", - "ret": "u32", - "args": ["0S"] - }, - { - "intrinsic": "Q_and_QQ{0.width_b}", - "width": [64, 128], - "llvm": "pred.and{0.width_suffix}", - "ret": "u32", - "args": ["0", "0"] - }, - { - "intrinsic": "Q_not_Q{0.width_b}", - "width": [64, 128], - "llvm": "pred.not{0.width_suffix}", - "ret": "u32", - "args": ["0"] - }, - { - "intrinsic": "Q_or_QQ{0.width_b}", - "width": [64, 128], - "llvm": "pred.or{0.width_suffix}", - "ret": "u32", - "args": ["0", "0"] - }, - { - "intrinsic": "Q_xor_QQ{0.width_b}", - "width": [64, 128], - "llvm": "pred.xor{0.width_suffix}", - "ret": "u32", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vabsdiff_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vabsdiff{1.data_type}{0.width_suffix}", - "ret": "u(8-16)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vabsdiff_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vabsdiff{1.data_type}{0.width_suffix}", - "ret": "u(16-32)", - "args": ["0s", "0s"] - }, - { - "intrinsic": "V{0.data_type}_vabs_V{1.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vabs{1.data_type}{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0"] - }, - { - "intrinsic": "V{0.data_type}_vabs_V{1.data_type}_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vabs{1.data_type}.sat{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0"] - }, - { - "intrinsic": "V{0.data_type}_vadd_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vadd{0.data_type}{0.width_suffix}", - "ret": "s(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vadd_V{1.data_type}V{2.data_type}_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vadd{0.data_type}sat{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vadd_V{1.data_type}V{2.data_type}_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vadd{0.data_type}sat{0.width_suffix}", - "ret": "u(8-16)", - "args": ["0", "0"] - }, - { - "intrinsic": "W{0.data_type}_vadd_W{1.data_type}W{2.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vadd{0.data_type}.dv{0.widthd_suffix}", - "ret": "s(8-32)d", - "args": ["0", "0"] - }, - { - "intrinsic": "W{0.data_type}_vadd_W{1.data_type}W{2.data_type}_sat{0.widthd_b}", - "width": [512, 1024], - "llvm": "vadd{0.data_type}sat.dv{0.widthd_suffix}", - "ret": "s(16-32)d", - "args": ["0", "0"] - }, - { - "intrinsic": "W{0.data_type}_vadd_W{1.data_type}W{2.data_type}_sat{0.widthd_b}", - "width": [512, 1024], - "llvm": "vadd{0.data_type}sat.dv{0.widthd_suffix}", - "ret": "u(8-16)d", - "args": ["0", "0"] - }, - { - "intrinsic": "V_valign_VVR{0.width_b}", - "width": [512, 1024], - "llvm": "valignb{0.width_suffix}", - "ret": "u8", - "args": ["0", "0", "U32"] - }, - { - "intrinsic": "V_valign_VVI{0.width_b}", - "width": [512, 1024], - "llvm": "valignbi{0.width_suffix}", - "ret": "u8", - "args": ["0", "0", "U32"] - }, - { - "intrinsic": "V_vlalign_VVR{0.width_b}", - "width": [512, 1024], - "llvm": "vlalignb{0.width_suffix}", - "ret": "u8", - "args": ["0", "0", "U32"] - }, - { - "intrinsic": "V_vlalign_VVI{0.width_b}", - "width": [512, 1024], - "llvm": "vlalignbi{0.width_suffix}", - "ret": "u8", - "args": ["0", "0", "U32"] - }, - { - "intrinsic": "V_vand_VV{0.width_b}", - "width": [512, 1024], - "llvm": "vand{0.width_suffix}", - "ret": "u16", - "args": ["0", "0"] - }, - { - "intrinsic": "V_vand_QR{0.width_b}", - "width": [512, 1024], - "llvm": "vandqrt{0.width_suffix}", - "ret": "u8", - "args": ["u32hhh", "U32"] - }, - { - "intrinsic": "V_vandor_VQR{0.width_b}", - "width": [512, 1024], - "llvm": "vandqrt.acc{0.width_suffix}", - "ret": "u8", - "args": ["0", "u32hhh", "U32"] - }, - { - "intrinsic": "Q_vand_VR{0.width_b}", - "width": [512, 1024], - "llvm": "vandvrt{0.width_suffix}", - "ret": "u32hhh", - "args": ["u8", "U32"] - }, - { - "intrinsic": "Q_vandor_QVR{0.width_b}", - "width": [512, 1024], - "llvm": "vandvrt{0.width_suffix}", - "ret": "u32hhh", - "args": ["0", "u8", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vasl_V{1.data_type}R{0.width_b}", - "width": [512, 1024], - "llvm": "vasl{0.data_type}{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vasl_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vasl{0.data_type}v{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vaslacc_V{1.data_type}V{2.data_type}R{0.width_b}", - "width": [512, 1024], - "llvm": "vasl{0.data_type}.acc{0.width_suffix}", - "ret": "s32", - "args": ["0", "0", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vasr_V{1.data_type}R{0.width_b}", - "width": [512, 1024], - "llvm": "vasr{0.data_type}{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vasr_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vasr{0.data_type}v{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vasracc_V{1.data_type}V{2.data_type}R{0.width_b}", - "width": [512, 1024], - "llvm": "vasr{0.data_type}.acc{0.width_suffix}", - "ret": "s32", - "args": ["0", "0", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vasr_V{1.data_type}V{2.data_type}R{0.width_b}", - "width": [512, 1024], - "llvm": "vasr{0.data_type}{1.data_type}{0.width_suffix}", - "ret": "s16", - "args": ["0hw", "0hw", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vasr_V{1.data_type}V{2.data_type}R_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vasr{1.data_type}{0.data_type_plain}sat{0.width_suffix}", - "ret": "i(8-16)", - "args": ["0hws", "0hws", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vasr_V{1.data_type}V{2.data_type}R_rnd_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vasr{1.data_type}{0.data_type_plain}rndsat{0.width_suffix}", - "ret": "i(8-16)", - "args": ["0hws", "0hws", "U32"] - }, - { - "intrinsic": "V_equals_V{0.width_b}", - "width": [512, 1024], - "llvm": "vassign{0.width_suffix}", - "ret": "u32", - "args": ["0"] - }, - { - "intrinsic": "W_equals_W{0.widthd_b}", - "width": [512, 1024], - "llvm": "vassignp{0.widthd_suffix}", - "ret": "u32d", - "args": ["0"] - }, - { - "intrinsic": "V{0.data_type}_vavg_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vavg{0.data_type}{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vavg_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vavg{0.data_type}{0.width_suffix}", - "ret": "u(8-16)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vavg_V{1.data_type}V{2.data_type}_rnd{0.width_b}", - "width": [512, 1024], - "llvm": "vavgrnd{0.data_type}{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vavg_V{1.data_type}V{2.data_type}_rnd{0.width_b}", - "width": [512, 1024], - "llvm": "vavgrnd{0.data_type}{0.width_suffix}", - "ret": "u(8-16)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vcl0_V{1.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vcl0{0.data_type_plain}{0.width_suffix}", - "ret": "u(16-32)", - "args": ["0"] - }, - { - "intrinsic": "W_vcombine_VV{0.widthd_b}", - "width": [512, 1024], - "llvm": "vcombine{0.widthd_suffix}", - "ret": "u8d", - "args": ["0h", "0h"] - }, - { - "intrinsic": "V_vzero{0.width_b}", - "width": [512, 1024], - "llvm": "vd0{0.width_suffix}", - "ret": "u32", - "args": [] - }, - { - "intrinsic": "V{0.data_type}_vdeal_V{1.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vdeal{1.data_type}{0.width_suffix}", - "ret": "s(8-16)", - "args": ["0"] - }, - { - "intrinsic": "V{0.data_type}_vdeale_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vdeal{1.data_type}4w{0.width_suffix}", - "ret": "s8", - "args": ["0", "0"] - }, - { - "intrinsic": "W_vdeal_VVR{0.widthd_b}", - "width": [512, 1024], - "llvm": "vdealvdd{0.widthd_suffix}", - "ret": "u8d", - "args": ["0h", "0h", "U32"] - }, - { - "intrinsic": "V_vdelta_VV{0.width_b}", - "width": [512, 1024], - "llvm": "vdelta{0.width_suffix}", - "ret": "u8", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vdmpy_V{1.data_type}Rb{0.width_b}", - "width": [512, 1024], - "llvm": "vdmpybus{0.width_suffix}", - "ret": "s16", - "args": ["u8", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vdmpyacc_V{1.data_type}V{2.data_type}Rb{0.width_b}", - "width": [512, 1024], - "llvm": "vdmpybus.acc{0.width_suffix}", - "ret": "s16", - "args": ["s16", "u8", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vdmpy_W{1.data_type}Rb{0.widthd_b}", - "width": [512, 1024], - "llvm": "vdmpybus.dv{0.widthd_suffix}", - "ret": "s16d", - "args": ["u8d", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vdmpyacc_W{1.data_type}W{2.data_type}Rb{0.widthd_b}", - "width": [512, 1024], - "llvm": "vdmpybus.dv.acc{0.widthd_suffix}", - "ret": "s16d", - "args": ["s16d", "u8d", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vdmpy_V{1.data_type}Rb{0.width_b}", - "width": [512, 1024], - "llvm": "vdmpyhb{0.width_suffix}", - "ret": "s32", - "args": ["s16", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vdmpyacc_V{1.data_type}V{2.data_type}Rb{0.width_b}", - "width": [512, 1024], - "llvm": "vdmpyhb.acc{0.width_suffix}", - "ret": "s32", - "args": ["s32", "s16", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vdmpy_W{1.data_type}Rb{0.widthd_b}", - "width": [512, 1024], - "llvm": "vdmpyhb.dv{0.widthd_suffix}", - "ret": "s32d", - "args": ["s16d", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vdmpyacc_W{1.data_type}W{2.data_type}Rb{0.widthd_b}", - "width": [512, 1024], - "llvm": "vdmpyhb.dv.acc{0.widthd_suffix}", - "ret": "s32d", - "args": ["s32d", "s16d", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vdmpy_W{1.data_type}Rh_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vdmpyhisat{0.width_suffix}", - "ret": "s32", - "args": ["0d", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vdmpy_V{1.data_type}Rh_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vdmpyhsat{0.width_suffix}", - "ret": "s32", - "args": ["s16", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vdmpy_W{1.data_type}Ruh_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vdmpyhsuisat{0.width_suffix}", - "ret": "s32", - "args": ["s16d", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vdmpy_V{1.data_type}Ruh_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vdmpyhsusat{0.width_suffix}", - "ret": "s32", - "args": ["s16", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vdmpy_V{1.data_type}V{2.data_type}_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vdmpyhvsat{0.width_suffix}", - "ret": "s32", - "args": ["s16", "s16"] - }, - { - "intrinsic": "V{0.data_type}_vdmpyacc_V{1.data_type}W{2.data_type}Rh_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vdmpyhisat_acc{0.width_suffix}", - "ret": "s32", - "args": ["0", "0d", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vdsad_W{1.data_type}Ruh{0.widthd_b}", - "width": [512, 1024], - "llvm": "vdsaduh{0.widthd_suffix}", - "ret": "u32d", - "args": ["u16d", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vdsadacc_W{1.data_type}W{2.data_type}Ruh{0.widthd_b}", - "width": [512, 1024], - "llvm": "vdsaduh.acc{0.widthd_suffix}", - "ret": "u32d", - "args": ["0", "u16d", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vdmpyacc_V{1.data_type}V{2.data_type}Rh_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vdmpyhsat_acc{0.width_suffix}", - "ret": "s32", - "args": ["0", "s16", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vdmpyacc_V{1.data_type}W{2.data_type}Ruh_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vdmpyhsuisat_acc{0.width_suffix}", - "ret": "s32", - "args": ["0", "s16d", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vdmpyacc_V{1.data_type}V{2.data_type}Ruh_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vdmpyhsusat_acc{0.width_suffix}", - "ret": "s32", - "args": ["0", "s16", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vdmpyacc_V{1.data_type}V{2.data_type}V{3.data_type}_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vdmpyhvsat_acc{0.width_suffix}", - "ret": "s32", - "args": ["0", "s16", "s16"] - }, - { - "intrinsic": "Q_vcmp_eq_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "veq{1.data_type}{0.width_suffix}", - "ret": "u32hhh", - "args": ["s(8-32)", "1"] - }, - { - "intrinsic": "Q_vcmp_eqand_QV{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "veq{2.data_type}.and{0.width_suffix}", - "ret": "u32hhh", - "args": ["0", "s(8-32)", "2"] - }, - { - "intrinsic": "Q_vcmp_eqor_QV{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "veq{2.data_type}.or{0.width_suffix}", - "ret": "u32hhh", - "args": ["0", "s(8-32)", "2"] - }, - { - "intrinsic": "Q_vcmp_eqxacc_QV{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "veq{2.data_type}.xor{0.width_suffix}", - "ret": "u32hhh", - "args": ["0", "s(8-32)", "2"] - }, - { - "intrinsic": "Q_vcmp_gt_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vgt{1.data_type}{0.width_suffix}", - "ret": "u32hhh", - "args": ["s(8-32)", "1"] - }, - { - "intrinsic": "Q_vcmp_gt_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vgt{1.data_type}{0.width_suffix}", - "ret": "u32hhh", - "args": ["u(8-16)", "1"] - }, - { - "intrinsic": "Q_vcmp_gtand_QV{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vgt{2.data_type}.and{0.width_suffix}", - "ret": "u32hhh", - "args": ["0", "s(8-32)", "2"] - }, - { - "intrinsic": "Q_vcmp_gtand_QV{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vgt{2.data_type}.and{0.width_suffix}", - "ret": "u32hhh", - "args": ["0", "u(8-16)", "2"] - }, - { - "intrinsic": "Q_vcmp_gtor_QV{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vgt{2.data_type}.or{0.width_suffix}", - "ret": "u32hhh", - "args": ["0", "s(8-32)", "2"] - }, - { - "intrinsic": "Q_vcmp_gtor_QV{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vgt{2.data_type}.or{0.width_suffix}", - "ret": "u32hhh", - "args": ["0", "u(8-16)", "2"] - }, - { - "intrinsic": "Q_vcmp_gtxacc_QV{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vgt{2.data_type}.xor{0.width_suffix}", - "ret": "u32hhh", - "args": ["0", "s(8-32)", "2"] - }, - { - "intrinsic": "Q_vcmp_gtxacc_QV{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vgt{2.data_type}.xor{0.width_suffix}", - "ret": "u32hhh", - "args": ["0", "u(8-16)", "2"] - }, - { - "intrinsic": "V{0.data_type}_vinsert_V{1.data_type}R{0.width_b}", - "width": [512, 1024], - "llvm": "vinsertwr{0.width_suffix}", - "ret": "s32", - "args": ["S32"] - }, - { - "intrinsic": "V{0.data_type}_vlsr_V{1.data_type}R{0.width_b}", - "width": [512, 1024], - "llvm": "vlsr{0.data_type_plain}{0.width_suffix}", - "ret": "u(16-32)", - "args": ["0", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vlsr_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vlsr{0.data_type}v{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vlut32_V{1.data_type}V{2.data_type}R{0.width_b}", - "width": [512, 1024], - "llvm": "vlutvv{0.data_type}{0.width_suffix}", - "ret": "s8", - "args": ["0", "0", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vlut16_V{1.data_type}V{2.data_type}R{0.widthd_b}", - "width": [512, 1024], - "llvm": "vlutvw{0.data_type}{0.widthd_suffix}", - "ret": "s16d", - "args": ["s8", "s16", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vlut32or_V{1.data_type}V{2.data_type}V{3.data_type}R{0.width_b}", - "width": [512, 1024], - "llvm": "vlutvv{0.data_type}.oracc{0.width_suffix}", - "ret": "s8", - "args": ["0", "0", "0", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vlut16or_W{1.data_type}V{2.data_type}V{3.data_type}R{0.widthd_b}", - "width": [512, 1024], - "llvm": "vlutvw{0.data_type}.oracc{0.widthd_suffix}", - "ret": "s16d", - "args": ["0", "s8", "s16", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vmax_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vmax{0.data_type}{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vmax_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vmax{0.data_type}{0.width_suffix}", - "ret": "u(8-16)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vmin_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vmin{0.data_type}{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vmin_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vmin{0.data_type}{0.width_suffix}", - "ret": "u(8-16)", - "args": ["0", "0"] - }, - { - "intrinsic": "W{0.data_type}_vmpa_W{1.data_type}Rb{0.widthd_b}", - "width": [512, 1024], - "llvm": "vmpabus{0.widthd_suffix}", - "ret": "s16d", - "args": ["u8d", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vmpaacc_W{1.data_type}W{2.data_type}Rb{0.widthd_b}", - "width": [512, 1024], - "llvm": "vmpabus.acc{0.widthd_suffix}", - "ret": "s16d", - "args": ["0", "u8d", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vmpa_W{1.data_type}W{2.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vmpab{1.kind}{2.kind}v{0.widthd_suffix}", - "ret": "s16d", - "args": ["u8d", "i8d"] - }, - { - "intrinsic": "W{0.data_type}_vmpa_W{1.data_type}Rb{0.widthd_b}", - "width": [512, 1024], - "llvm": "vmpahb{0.widthd_suffix}", - "ret": "s32d", - "args": ["s16d", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vmpaacc_W{1.data_type}W{2.data_type}Rb{0.widthd_b}", - "width": [512, 1024], - "llvm": "vmpahb.acc{0.widthd_suffix}", - "ret": "s32d", - "args": ["0", "s16d", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vmpy_V{1.data_type}V{2.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vmpy{1.data_type}us{0.widthd_suffix}", - "ret": "s(16-32)d", - "args": ["0n", "0nu"] - }, - { - "intrinsic": "W{0.data_type}_vmpyacc_W{1.data_type}V{2.data_type}V{3.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vmpy{2.data_type}us.acc{0.widthd_suffix}", - "ret": "s(16-32)d", - "args": ["0", "0n", "0nu"] - }, - { - "intrinsic": "W{0.data_type}_vmpy_V{1.data_type}V{2.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vmpybusv{0.widthd_suffix}", - "ret": "s16d", - "args": ["u8", "s8"] - }, - { - "intrinsic": "W{0.data_type}_vmpyacc_W{1.data_type}V{2.data_type}V{3.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vmpybusv.acc{0.widthd_suffix}", - "ret": "s16d", - "args": ["0", "0nu", "0n"] - }, - { - "intrinsic": "W{0.data_type}_vmpy_V{1.data_type}V{2.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vmpy{1.data_type}v{0.widthd_suffix}", - "ret": "i(16-32)d", - "args": ["0n", "0n"] - }, - { - "intrinsic": "W{0.data_type}_vmpyacc_W{1.data_type}V{2.data_type}V{3.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vmpy{2.data_type}v.acc{0.widthd_suffix}", - "ret": "i(16-32)d", - "args": ["0", "0n", "0n"] - }, - { - "intrinsic": "V{0.data_type}_vmpye_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyewuh{0.width_suffix}", - "ret": "s32", - "args": ["s32", "u16"] - }, - { - "intrinsic": "W{0.data_type}_vmpy_V{1.data_type}R{1.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vmpy{1.data_type}{0.widthd_suffix}", - "ret": "i32d", - "args": ["0n", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vmpyacc_W{1.data_type}V{2.data_type}R{2.data_type}_sat{0.widthd_b}", - "width": [512, 1024], - "llvm": "vmpy{2.data_type}sat.acc{0.widthd_suffix}", - "ret": "s32d", - "args": ["0", "0n", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vmpy_V{1.data_type}R{1.data_type}_s1_rnd_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vmpy{1.data_type}srs{0.width_suffix}", - "ret": "s32", - "args": ["0nd", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vmpy_V{1.data_type}R{1.data_type}_s1_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vmpy{1.data_type}ss{0.width_suffix}", - "ret": "s32", - "args": ["0nd", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vmpy_V{1.data_type}V{2.data_type}_s1_rnd_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vmpy{1.data_type}vsrs{0.width_suffix}", - "ret": "s16", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vmpyieo_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyieo{1.data_type}{0.width_suffix}", - "ret": "s32", - "args": ["0nd", "0nd"] - }, - { - "intrinsic": "V{0.data_type}_vmpyieacc_V{1.data_type}V{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyie{2.data_type}{3.data_type}.acc{0.width_suffix}", - "ret": "s32", - "args": ["0", "0", "i16"] - }, - { - "intrinsic": "V{0.data_type}_vmpyie_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyie{1.data_type}{2.data_type}{0.width_suffix}", - "ret": "s32", - "args": ["0", "u16"] - }, - { - "intrinsic": "V{0.data_type}_vmpyi_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyi{1.data_type}{0.width_suffix}", - "ret": "s16", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vmpyiacc_V{1.data_type}V{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyi{1.data_type}.acc{0.width_suffix}", - "ret": "s16", - "args": ["0", "0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vmpyi_V{1.data_type}Rb{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyi{1.data_type}b{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vmpyiacc_V{1.data_type}V{2.data_type}Rb{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyi{1.data_type}b.acc{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0", "0", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vmpyi_V{1.data_type}Rh{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyi{1.data_type}h{0.width_suffix}", - "ret": "s32", - "args": ["0", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vmpyiacc_V{1.data_type}V{2.data_type}Rh{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyi{1.data_type}h.acc{0.width_suffix}", - "ret": "s32", - "args": ["0", "0", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vmpyi_V{1.data_type}Rub{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyi{1.data_type}ub{0.width_suffix}", - "ret": "s32", - "args": ["0", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vmpyiacc_V{1.data_type}V{2.data_type}Rub{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyi{1.data_type}ub.acc{0.width_suffix}", - "ret": "s32", - "args": ["0", "0", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vmpyo_V{1.data_type}V{2.data_type}_s1_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyo{1.data_type}{2.data_type}{0.width_suffix}", - "ret": "s32", - "args": ["0", "0nd"] - }, - { - "intrinsic": "V{0.data_type}_vmpyo_V{1.data_type}V{2.data_type}_s1_rnd_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyo{1.data_type}{2.data_type}.rnd{0.width_suffix}", - "ret": "s32", - "args": ["0", "0nd"] - }, - { - "intrinsic": "V{0.data_type}_vmpyo_V{1.data_type}V{2.data_type}_s1_rnd_sat_shift{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyo{1.data_type}{2.data_type}.rnd.sacc{0.width_suffix}", - "ret": "s32", - "args": ["0", "0nd"] - }, - { - "intrinsic": "V{0.data_type}_vmpyo_V{1.data_type}V{2.data_type}_s1_sat_shift{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyo{1.data_type}{2.data_type}.sacc{0.width_suffix}", - "ret": "s32", - "args": ["0", "0nd"] - }, - { - "intrinsic": "V{0.data_type}_vmpyio_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vmpyio{1.data_type}{2.data_type}{0.width_suffix}", - "ret": "s32", - "args": ["0", "0nd"] - }, - { - "intrinsic": "W{0.data_type}_vmpy_V{1.data_type}R{1.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vmpy{1.data_type}{0.widthd_suffix}", - "ret": "u16d", - "args": ["0n", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vmpyacc_W{1.data_type}V{2.data_type}R{2.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vmpy{2.data_type}.acc{0.widthd_suffix}", - "ret": "u(16-32)d", - "args": ["0", "0n", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vmux_QVV{0.width_b}", - "width": [512, 1024], - "llvm": "vmux{0.width_suffix}", - "ret": "u32", - "args": ["0hhh", "0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vnavg_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vnavg{0.data_type}{0.width_suffix}", - "ret": "i(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vnavg_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vnavg{0.data_type}{0.width_suffix}", - "ret": "u8", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vnormamt_V{1.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vnormamt{0.data_type}{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0"] - }, - { - "intrinsic": "V_vnot_VV{0.width_b}", - "width": [512, 1024], - "llvm": "vnot{0.width_suffix}", - "ret": "u16", - "args": ["0"] - }, - { - "intrinsic": "V_vor_VV{0.width_b}", - "width": [512, 1024], - "llvm": "vor{0.width_suffix}", - "ret": "u16", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vpacke_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vpack{1.data_type}e{0.width_suffix}", - "ret": "s(8-16)", - "args": ["0hw", "0hw"] - }, - { - "intrinsic": "V{0.data_type}_vpacko_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vpack{1.data_type}o{0.width_suffix}", - "ret": "s(8-16)", - "args": ["0hw", "0hw"] - }, - { - "intrinsic": "V{0.data_type}_vpack_V{1.data_type}V{2.data_type}_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vpack{1.data_type}{0.data_type}.sat{0.width_suffix}", - "ret": "i(8-16)", - "args": ["0hws", "0hws"] - }, - { - "intrinsic": "V{0.data_type}_vpopcount_V{1.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vpopcount{0.data_type}{0.width_suffix}", - "ret": "s16", - "args": ["0"] - }, - { - "intrinsic": "V_vrdelta_VV{0.width_b}", - "width": [512, 1024], - "llvm": "vrdelta{0.width_suffix}", - "ret": "u8", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vrmpy_V{1.data_type}Rb{0.width_b}", - "width": [512, 1024], - "llvm": "vrmpybus{0.width_suffix}", - "ret": "s32", - "args": ["u8", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vrmpyacc_V{1.data_type}V{2.data_type}Rb{0.width_b}", - "width": [512, 1024], - "llvm": "vrmpybus.acc{0.width_suffix}", - "ret": "s32", - "args": ["0", "u8", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vrmpy_W{1.data_type}RbI{0.widthd_b}", - "width": [512, 1024], - "llvm": "vrmpybusi{0.widthd_suffix}", - "ret": "s32d", - "args": ["u8d", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vrmpyacc_W{1.data_type}W{2.data_type}RbI{0.widthd_b}", - "width": [512, 1024], - "llvm": "vrmpybusi.acc{0.widthd_suffix}", - "ret": "s32d", - "args": ["0", "u8d", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vrmpy_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vrmpybusv{0.width_suffix}", - "ret": "s32", - "args": ["u8", "s8"] - }, - { - "intrinsic": "V{0.data_type}_vrmpyacc_V{1.data_type}V{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vrmpybusv.acc{0.width_suffix}", - "ret": "s32", - "args": ["0", "u8", "s8"] - }, - { - "intrinsic": "V{0.data_type}_vrmpy_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vrmpy{1.data_type}v{0.width_suffix}", - "ret": "i32", - "args": ["0nndd", "0nndd"] - }, - { - "intrinsic": "V{0.data_type}_vrmpyacc_V{1.data_type}V{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vrmpy{1.data_type}v.acc{0.width_suffix}", - "ret": "i32", - "args": ["0", "0nndd", "0nndd"] - }, - { - "intrinsic": "V{0.data_type}_vrmpy_V{1.data_type}Rub{0.width_b}", - "width": [512, 1024], - "llvm": "vrmpyub{0.width_suffix}", - "ret": "u32", - "args": ["u8", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vrmpyacc_V{1.data_type}V{2.data_type}Rub{0.width_b}", - "width": [512, 1024], - "llvm": "vrmpyub.acc{0.width_suffix}", - "ret": "u32", - "args": ["0", "u8", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vrmpy_W{1.data_type}RubI{0.widthd_b}", - "width": [512, 1024], - "llvm": "vrmpyubi{0.widthd_suffix}", - "ret": "u32d", - "args": ["u8d", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vrmpyacc_W{1.data_type}W{2.data_type}RubI{0.widthd_b}", - "width": [512, 1024], - "llvm": "vrmpyubi.acc{0.widthd_suffix}", - "ret": "u32d", - "args": ["0", "u8d", "U32"] - }, - { - "intrinsic": "V_vror_VR{0.width_b}", - "width": [512, 1024], - "llvm": "vror{0.width_suffix}", - "ret": "u8", - "args": ["0", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vround_V{1.data_type}V{2.data_type}_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vround{1.data_type}{0.data_type}{0.width_suffix}", - "ret": "i(8-16)", - "args": ["0hws", "0hws"] - }, - { - "intrinsic": "W{0.data_type}_vrsad_W{1.data_type}RubI{0.widthd_b}", - "width": [512, 1024], - "llvm": "vrsadubi{0.widthd_suffix}", - "ret": "u32d", - "args": ["u8d", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vrsadacc_W{1.data_type}W{2.data_type}RubI{0.widthd_b}", - "width": [512, 1024], - "llvm": "vrsadubi.acc{0.widthd_suffix}", - "ret": "u32d", - "args": ["0", "u8d", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vsat_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vsat{1.data_type}{0.data_type}{0.width_suffix}", - "ret": "u8", - "args": ["0hws", "0hws"] - }, - { - "intrinsic": "V{0.data_type}_vsat_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vsat{1.data_type}{0.data_type}{0.width_suffix}", - "ret": "s16", - "args": ["0hw", "0hw"] - }, - { - "intrinsic": "W{0.data_type}_vsxt_V{1.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vs{1.data_type}{0.widthd_suffix}", - "ret": "s(16-32)d", - "args": ["0n"] - }, - { - "intrinsic": "W{0.data_type}_vzxt_V{1.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vz{1.data_type_plain}{0.widthd_suffix}", - "ret": "u(16-32)d", - "args": ["0n"] - }, - { - "intrinsic": "V{0.data_type}_condacc_QV{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vadd{0.data_type}q{0.width_suffix}", - "ret": "s(8-32)", - "args": ["u32hhh", "0", "0"] - }, - { - "intrinsic": "V{0.data_type}_condacc_QnV{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vadd{0.data_type}nq{0.width_suffix}", - "ret": "s(8-32)", - "args": ["u32hhh", "0", "0"] - }, - { - "intrinsic": "V{0.data_type}_condnac_QV{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vsub{0.data_type}q{0.width_suffix}", - "ret": "s(8-32)", - "args": ["u32hhh", "0", "0"] - }, - { - "intrinsic": "V{0.data_type}_condnac_QnV{2.data_type}V{3.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vsub{0.data_type}nq{0.width_suffix}", - "ret": "s(8-32)", - "args": ["u32hhh", "0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vshuffe_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vshufe{1.data_type}{0.width_suffix}", - "ret": "s16", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vshuffo_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vshufo{1.data_type}{0.width_suffix}", - "ret": "s16", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vshuff_V{1.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vshuff{1.data_type}{0.width_suffix}", - "ret": "s(8-16)", - "args": ["0"] - }, - { - "intrinsic": "V{0.data_type}_vshuffe_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vshuffe{1.data_type}{0.width_suffix}", - "ret": "s8", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vshuffo_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vshuffo{1.data_type}{0.width_suffix}", - "ret": "s8", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vshuffoe_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vshuffoe{1.data_type}{0.width_suffix}", - "ret": "s(8-16)", - "args": ["0", "0"] - }, - { - "intrinsic": "W_vshuff_VVR{0.widthd_b}", - "width": [512, 1024], - "llvm": "vshufvvd{0.widthd_suffix}", - "ret": "u8d", - "args": ["0h", "0h", "U32"] - }, - { - "intrinsic": "V{0.data_type}_vsub_V{1.data_type}V{2.data_type}{0.width_b}", - "width": [512, 1024], - "llvm": "vsub{0.data_type}{0.width_suffix}", - "ret": "s(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vsub_V{1.data_type}V{2.data_type}_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vsub{0.data_type}sat{0.width_suffix}", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "V{0.data_type}_vsub_V{1.data_type}V{2.data_type}_sat{0.width_b}", - "width": [512, 1024], - "llvm": "vsub{0.data_type}sat{0.width_suffix}", - "ret": "u(8-16)", - "args": ["0", "0"] - }, - { - "intrinsic": "W{0.data_type}_vsub_W{1.data_type}W{2.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vsub{0.data_type}.dv{0.widthd_suffix}", - "ret": "s(8-32)d", - "args": ["0", "0"] - }, - { - "intrinsic": "W{0.data_type}_vsub_W{1.data_type}W{2.data_type}_sat{0.widthd_b}", - "width": [512, 1024], - "llvm": "vsub{0.data_type}sat.dv{0.widthd_suffix}", - "ret": "s(16-32)d", - "args": ["0", "0"] - }, - { - "intrinsic": "W{0.data_type}_vsub_W{1.data_type}W{2.data_type}_sat{0.widthd_b}", - "width": [512, 1024], - "llvm": "vsub{0.data_type}sat.dv{0.widthd_suffix}", - "ret": "u(8-16)d", - "args": ["0", "0"] - }, - { - "intrinsic": "W_vswap_QVV{0.widthd_b}", - "width": [512, 1024], - "llvm": "vswap{0.widthd_suffix}", - "ret": "u8d", - "args": ["u32hhh", "0h", "0h"] - }, - { - "intrinsic": "W{0.data_type}_vtmpy_W{1.data_type}Rb{0.widthd_b}", - "width": [512, 1024], - "llvm": "vtmpyb{0.widthd_suffix}", - "ret": "s16d", - "args": ["0nd", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vtmpyacc_W{1.data_type}W{2.data_type}Rb{0.widthd_b}", - "width": [512, 1024], - "llvm": "vtmpyb.acc{0.widthd_suffix}", - "ret": "s16d", - "args": ["0", "0nd", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vtmpy_W{1.data_type}Rb{0.widthd_b}", - "width": [512, 1024], - "llvm": "vtmpybus{0.widthd_suffix}", - "ret": "s16d", - "args": ["u8d", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vtmpyacc_W{1.data_type}W{2.data_type}Rb{0.widthd_b}", - "width": [512, 1024], - "llvm": "vtmpybus.acc{0.widthd_suffix}", - "ret": "s16d", - "args": ["0", "u8d", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vtmpy_W{1.data_type}Rb{0.widthd_b}", - "width": [512, 1024], - "llvm": "vtmpyhb{0.widthd_suffix}", - "ret": "s32d", - "args": ["0nd", "U32"] - }, - { - "intrinsic": "W{0.data_type}_vunpack_V{1.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vunpack{1.data_type}{0.widthd_suffix}", - "ret": "i(16-32)d", - "args": ["0n"] - }, - { - "intrinsic": "W{0.data_type}_vunpackoor_W{1.data_type}V{2.data_type}{0.widthd_b}", - "width": [512, 1024], - "llvm": "vunpacko{2.data_type}{0.widthd_suffix}", - "ret": "s(16-32)d", - "args": ["0", "0n"] - }, - { - "intrinsic": "W{0.data_type}_vtmpyacc_W{1.data_type}W{2.data_type}Rb{0.widthd_b}", - "width": [512, 1024], - "llvm": "vtmpyhb.acc{0.widthd_suffix}", - "ret": "s32d", - "args": ["0", "0nd", "U32"] - }, - { - "intrinsic": "V_vxor_VV{0.width_b}", - "width": [512, 1024], - "llvm": "vxor{0.width_suffix}", - "ret": "u16", - "args": ["0", "0"] - } - ] -} diff --git a/src/etc/platform-intrinsics/nvptx/cuda.json b/src/etc/platform-intrinsics/nvptx/cuda.json deleted file mode 100644 index 1beaaeb5d8718..0000000000000 --- a/src/etc/platform-intrinsics/nvptx/cuda.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "intrinsic_prefix": "_", - "llvm_prefix": "llvm.cuda.", - "intrinsics": [ - { - "intrinsic": "syncthreads", - "width": ["0"], - "llvm": "syncthreads", - "ret": "V", - "args": [] - } - ] -} diff --git a/src/etc/platform-intrinsics/nvptx/info.json b/src/etc/platform-intrinsics/nvptx/info.json deleted file mode 100644 index 80332c54e0414..0000000000000 --- a/src/etc/platform-intrinsics/nvptx/info.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "platform": "nvptx", - "number_info": { - "signed": {} - }, - "width_info": {} -} diff --git a/src/etc/platform-intrinsics/nvptx/sreg.json b/src/etc/platform-intrinsics/nvptx/sreg.json deleted file mode 100644 index 33d97f2694699..0000000000000 --- a/src/etc/platform-intrinsics/nvptx/sreg.json +++ /dev/null @@ -1,90 +0,0 @@ -{ - "intrinsic_prefix": "_", - "llvm_prefix": "llvm.nvvm.read.ptx.sreg.", - "intrinsics": [ - { - "intrinsic": "block_dim_x", - "width": ["0"], - "llvm": "ntid.x", - "ret": "S32", - "args": [] - }, - { - "intrinsic": "block_dim_y", - "width": ["0"], - "llvm": "ntid.y", - "ret": "S32", - "args": [] - }, - { - "intrinsic": "block_dim_z", - "width": ["0"], - "llvm": "ntid.z", - "ret": "S32", - "args": [] - }, - { - "intrinsic": "block_idx_x", - "width": ["0"], - "llvm": "ctaid.x", - "ret": "S32", - "args": [] - }, - { - "intrinsic": "block_idx_y", - "width": ["0"], - "llvm": "ctaid.y", - "ret": "S32", - "args": [] - }, - { - "intrinsic": "block_idx_z", - "width": ["0"], - "llvm": "ctaid.z", - "ret": "S32", - "args": [] - }, - { - "intrinsic": "grid_dim_x", - "width": ["0"], - "llvm": "nctaid.x", - "ret": "S32", - "args": [] - }, - { - "intrinsic": "grid_dim_y", - "width": ["0"], - "llvm": "nctaid.y", - "ret": "S32", - "args": [] - }, - { - "intrinsic": "grid_dim_z", - "width": ["0"], - "llvm": "nctaid.z", - "ret": "S32", - "args": [] - }, - { - "intrinsic": "thread_idx_x", - "width": ["0"], - "llvm": "tid.x", - "ret": "S32", - "args": [] - }, - { - "intrinsic": "thread_idx_y", - "width": ["0"], - "llvm": "tid.y", - "ret": "S32", - "args": [] - }, - { - "intrinsic": "thread_idx_z", - "width": ["0"], - "llvm": "tid.z", - "ret": "S32", - "args": [] - } - ] -} diff --git a/src/etc/platform-intrinsics/powerpc.json b/src/etc/platform-intrinsics/powerpc.json deleted file mode 100644 index acb6813887c5c..0000000000000 --- a/src/etc/platform-intrinsics/powerpc.json +++ /dev/null @@ -1,294 +0,0 @@ -{ - "platform": "powerpc", - "intrinsic_prefix": "_vec_", - "llvm_prefix": "llvm.ppc.altivec.", - "number_info": { - "unsigned": { - "kind" : "u", - "data_type_short": { "8": "b", "16": "h", "32": "w", "64": "d" } - }, - "signed": { - "kind" : "s", - "data_type_short": { "8": "b", "16": "h", "32": "w", "64": "d" } - }, - "float": {} - }, - "width_info": { - "128": { "width": "" } - }, - "intrinsics": [ - { - "intrinsic": "perm", - "width": [128], - "llvm": "vperm", - "ret": "s32", - "args": ["0", "0", "s8"] - }, - { - "intrinsic": "mradds", - "width": [128], - "llvm": "vmhraddshs", - "ret": "s16", - "args": ["0", "0", "0"] - }, - { - "intrinsic": "cmpb", - "width": [128], - "llvm": "vcmpbfp", - "ret": "s32", - "args": ["f32", "f32"] - }, - { - "intrinsic": "cmpeq{0.data_type_short}", - "width": [128], - "llvm": "vcmpequ{0.data_type_short}", - "ret": "s(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "cmpgt{1.kind}{1.data_type_short}", - "width": [128], - "llvm": "vcmpgt{1.kind}{1.data_type_short}", - "ret": "s(8-32)", - "args": ["0u", "1"] - }, - { - "intrinsic": "cmpgt{1.kind}{1.data_type_short}", - "width": [128], - "llvm": "vcmpgt{1.kind}{1.data_type_short}", - "ret": "s(8-32)", - "args": ["0", "1"] - }, - { - "intrinsic": "max{0.kind}{0.data_type_short}", - "width": [128], - "llvm": "vmax{0.kind}{0.data_type_short}", - "ret": "i(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "min{0.kind}{0.data_type_short}", - "width": [128], - "llvm": "vmin{0.kind}{0.data_type_short}", - "ret": "i(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "sub{0.kind}{0.data_type_short}s", - "width": [128], - "llvm": "vsub{0.kind}{0.data_type_short}s", - "ret": "i(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "subc", - "width": [128], - "llvm": "vsubcuw", - "ret": "u32", - "args": ["0", "0"] - }, - { - "intrinsic": "add{0.kind}{0.data_type_short}s", - "width": [128], - "llvm": "vadd{0.kind}{0.data_type_short}s", - "ret": "i(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "addc", - "width": [128], - "llvm": "vaddcuw", - "ret": "u32", - "args": ["0", "0"] - }, - { - "intrinsic": "mule{1.kind}{1.data_type_short}", - "width": [128], - "llvm": "vmule{0.kind}{1.data_type_short}", - "ret": "i(16-32)", - "args": ["0N", "1"] - }, - { - "intrinsic": "mulo{1.kind}{1.data_type_short}", - "width": [128], - "llvm": "vmulo{0.kind}{1.data_type_short}", - "ret": "i(16-32)", - "args": ["0N", "1"] - }, - { - "intrinsic": "avg{0.kind}{0.data_type_short}", - "width": [128], - "llvm": "vavg{0.kind}{0.data_type_short}", - "ret": "i(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "packs{0.kind}{1.data_type_short}", - "width": [128], - "llvm": "vpk{0.kind}{1.data_type_short}{0.kind}s", - "ret": "i(8-16)", - "args": ["0W", "1"] - }, - { - "intrinsic": "packsu{1.kind}{1.data_type_short}", - "width": [128], - "llvm": "vpk{1.kind}{1.data_type_short}{0.kind}s", - "ret": "u(8-16)", - "args": ["0Ws", "1"] - }, - { - "intrinsic": "packpx", - "width": [128], - "llvm": "vpkpx", - "ret": "s16", - "args": ["s32", "s32"] - }, - { - "intrinsic": "unpackl{1.kind}{1.data_type_short}", - "width": [128], - "llvm": "vupkl{1.kind}{1.data_type_short}", - "ret": "s(16-32)", - "args": ["0N"] - }, - { - "intrinsic": "unpackh{1.kind}{1.data_type_short}", - "width": [128], - "llvm": "vupkh{1.kind}{1.data_type_short}", - "ret": "s(16-32)", - "args": ["0N"] - }, - { - "intrinsic": "madds", - "width": [128], - "llvm": "vmhaddshs", - "ret": "s16", - "args": ["0", "0", "0"] - }, - { - "intrinsic": "msumu{1.data_type_short}m", - "width": [128], - "llvm": "vmsumu{1.data_type_short}m", - "ret": "u32", - "args": ["u(8-16)", "1", "u32"] - }, - { - "intrinsic": "msummbm", - "width": [128], - "llvm": "vmsummbm", - "ret": "s32", - "args": ["s8", "u8", "s32"] - }, - { - "intrinsic": "msumshm", - "width": [128], - "llvm": "vmsumshm", - "ret": "s32", - "args": ["s16", "s16", "s32"] - }, - { - "intrinsic": "msum{0.kind}hs", - "width": [128], - "llvm": "vmsum{0.kind}hs", - "ret": "i32", - "args": ["0N", "0N", "0"] - }, - { - "intrinsic": "sum2s", - "width": [128], - "llvm": "vsum2sws", - "ret": "s32", - "args": ["0", "0"] - }, - { - "intrinsic": "sum4{0.kind}bs", - "width": [128], - "llvm": "vsum4{0.kind}bs", - "ret": "i32", - "args": ["0NN", "0"] - }, - { - "intrinsic": "sum4shs", - "width": [128], - "llvm": "vsum4shs", - "ret": "s32", - "args": ["0N", "0"] - }, - { - "intrinsic": "sums", - "width": [128], - "llvm": "vsumsws", - "ret": "s32", - "args": ["0", "0"] - }, - { - "intrinsic": "madd", - "width": [128], - "llvm": "vmaddfp", - "ret": "f32", - "args": ["0", "0", "0"] - }, - { - "intrinsic": "nmsub", - "width": [128], - "llvm": "vnmsubfp", - "ret": "f32", - "args": ["0", "0", "0"] - }, - { - "intrinsic": "expte", - "width": [128], - "llvm": "vexptefp", - "ret": "f32", - "args": ["0"] - }, - { - "intrinsic": "floor", - "width": [128], - "llvm": "vrfim", - "ret": "f32", - "args": ["0"] - }, - { - "intrinsic": "ceil", - "width": [128], - "llvm": "vrfip", - "ret": "f32", - "args": ["0"] - }, - { - "intrinsic": "round", - "width": [128], - "llvm": "vrfin", - "ret": "f32", - "args": ["0"] - }, - { - "intrinsic": "trunc", - "width": [128], - "llvm": "vrfiz", - "ret": "f32", - "args": ["0"] - }, - { - "intrinsic": "loge", - "width": [128], - "llvm": "vlogefp", - "ret": "f32", - "args": ["0"] - }, - { - "intrinsic": "re", - "width": [128], - "llvm": "vrefp", - "ret": "f32", - "args": ["0"] - }, - { - "intrinsic": "rsqrte", - "width": [128], - "llvm": "vrsqrtefp", - "ret": "f32", - "args": ["0"] - } - ] -} diff --git a/src/etc/platform-intrinsics/x86/avx.json b/src/etc/platform-intrinsics/x86/avx.json deleted file mode 100644 index 1f41e2ecf3e6d..0000000000000 --- a/src/etc/platform-intrinsics/x86/avx.json +++ /dev/null @@ -1,272 +0,0 @@ -{ - "intrinsic_prefix": "_mm", - "llvm_prefix": "llvm.x86.avx.", - "intrinsics": [ - { - "intrinsic": "256_addsub_{0.data_type}", - "width": [256], - "llvm": "addsub.{0.data_type}.256", - "ret": "f(32-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "256_blendv_{0.data_type}", - "width": [256], - "llvm": "blendv.{0.data_type}.256", - "ret": "f(32-64)", - "args": ["0", "0", "0"] - }, - { - "intrinsic": "256_broadcast_{0.data_type}", - "width": [256], - "llvm": "vbroadcastf128.{0.data_type}.256", - "ret": "f(32-64)", - "args": ["s8SPc"] - }, - { - "intrinsic": "256_cmp_{0.data_type}", - "width": [256], - "llvm": "cmp.{1.data_type}.256", - "ret": "f(32-64)", - "args": ["0", "0", "s8S"] - }, - { - "intrinsic": "256_cvtepi32_pd", - "width": [256], - "llvm": "cvtdq2.pd.256", - "ret": "f64", - "args": ["s32h"] - }, - { - "intrinsic": "256_cvtepi32_ps", - "width": [256], - "llvm": "cvtdq2.ps.256", - "ret": "f32", - "args": ["s32"] - }, - { - "intrinsic": "256_cvtpd_epi32", - "width": [256], - "llvm": "cvt.pd2dq.256", - "ret": "s32h", - "args": ["f64"] - }, - { - "intrinsic": "256_cvtpd_ps", - "width": [256], - "llvm": "cvt.pd2.ps.256", - "ret": "f32h", - "args": ["f64"] - }, - { - "intrinsic": "256_cvtps_epi32", - "width": [256], - "llvm": "cvt.ps2dq.256", - "ret": "s32", - "args": ["f32"] - }, - { - "intrinsic": "256_cvtps_pd", - "width": [256], - "llvm": "cvt.ps2.pd.256", - "ret": "f64", - "args": ["f32h"] - }, - { - "intrinsic": "256_cvttpd_epi32", - "width": [256], - "llvm": "cvtt.pd2dq.256", - "ret": "s32h", - "args": ["f64"] - }, - { - "intrinsic": "256_cvttps_epi32", - "width": [256], - "llvm": "cvtt.ps2dq.256", - "ret": "s32", - "args": ["f32"] - }, - { - "intrinsic": "256_dp_ps", - "width": [256], - "llvm": "dp.ps.256", - "ret": "f32", - "args": ["0", "0", "S32/8"] - }, - { - "intrinsic": "256_hadd_{0.data_type}", - "width": [256], - "llvm": "hadd.{0.data_type}.256", - "ret": "f(32-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "256_hsub_{0.data_type}", - "width": [256], - "llvm": "hsub.{0.data_type}.256", - "ret": "f(32-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "256_max_{0.data_type}", - "width": [256], - "llvm": "max.{0.data_type}.256", - "ret": "f(32-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "{0.width_mm}_maskload_{0.data_type}", - "width": [128, 256], - "llvm": "maskload.{0.data_type_short}{0.width_suffix}", - "ret": ["f(32-64)"], - "args": ["0SPc/S8", "0s->0"] - }, - { - "intrinsic": "{3.width_mm}_maskstore_{3.data_type}", - "width": [128, 256], - "llvm": "maskstore.{3.data_type_short}{3.width_suffix}", - "ret": "V", - "args": ["F(32-64)Pm/S8", "1Dsv->1Dv", "1Dv"] - }, - { - "intrinsic": "256_min_{0.data_type}", - "width": [256], - "llvm": "min.{0.data_type}.256", - "ret": "f(32-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "256_movemask_ps", - "width": [256], - "llvm": "movmsk.ps.256", - "ret": "S32", - "args": ["f32"] - }, - { - "intrinsic": "256_movemask_pd", - "width": [256], - "llvm": "movmsk.pd.256", - "ret": "S32", - "args": ["f64"] - }, - { - "intrinsic": "{0.width_mm}_permutevar_{0.data_type}", - "width": [128, 256], - "llvm": "vpermilvar.{0.data_type}{0.width_suffix}", - "ret": "f(32-64)", - "args": ["0", "0s"] - }, - { - "intrinsic": "256_rcp_ps", - "width": [256], - "llvm": "rcp.ps.256", - "ret": "f32", - "args": ["f32"] - }, - { - "intrinsic": "256_rsqrt_ps", - "width": [256], - "llvm": "rsqrt.ps.256", - "ret": "f32", - "args": ["f32"] - }, - { - "intrinsic": "256_storeu_{2.data_type}", - "width": [256], - "llvm": "storeu.ps.256", - "ret": "V", - "args": ["f(32-64)Pm/U8", "1D"] - }, - { - "intrinsic": "256_storeu_si256", - "width": [256], - "llvm": "storeu.dq.256", - "ret": "V", - "args": ["u8Pm/U8", "1D"] - }, - { - "intrinsic": "256_sqrt_{0.data_type}", - "width": [256], - "llvm": "!llvm.sqrt.{0.llvm_name}", - "ret": "f(32-64)", - "args": ["0"] - }, - { - "intrinsic": "{1.width_mm}_testc_ps", - "width": [128, 256], - "llvm": "vtestc.ps{1.width_suffix}", - "ret": "S32", - "args": ["f32", "f32"] - }, - { - "intrinsic": "{1.width_mm}_testc_pd", - "width": [128, 256], - "llvm": "vtestc.pd{1.width_suffix}", - "ret": "S32", - "args": ["f64", "f64"] - }, - { - "intrinsic": "256_testc_si256", - "width": [256], - "llvm": "ptestc.256", - "ret": "S32", - "args": ["u64", "u64"] - }, - { - "intrinsic": "{1.width_mm}_testnzc_ps", - "width": [128, 256], - "llvm": "vtestnzc.ps{1.width_suffix}", - "ret": "S32", - "args": ["f32", "f32"] - }, - { - "intrinsic": "{1.width_mm}_testnzc_pd", - "width": [128, 256], - "llvm": "vtestnzc.pd{1.width_suffix}", - "ret": "S32", - "args": ["f64", "f64"] - }, - { - "intrinsic": "256_testnzc_si256", - "width": [256], - "llvm": "ptestnzc.256", - "ret": "S32", - "args": ["u64", "u64"] - }, - { - "intrinsic": "{1.width_mm}_testz_ps", - "width": [128, 256], - "llvm": "vtestz.ps{1.width_suffix}", - "ret": "S32", - "args": ["f32", "f32"] - }, - { - "intrinsic": "{1.width_mm}_testz_pd", - "width": [128, 256], - "llvm": "vtestz.pd{1.width_suffix}", - "ret": "S32", - "args": ["f64", "f64"] - }, - { - "intrinsic": "256_testz_si256", - "width": [256], - "llvm": "ptestz.256", - "ret": "S32", - "args": ["u64", "u64"] - }, - { - "intrinsic": "256_zeroall", - "width": [256], - "llvm": "vzeroall", - "ret": "V", - "args": [] - }, - { - "intrinsic": "256_zeroupper", - "width": [256], - "llvm": "vzeroupper", - "ret": "V", - "args": [] - } - ] -} diff --git a/src/etc/platform-intrinsics/x86/avx2.json b/src/etc/platform-intrinsics/x86/avx2.json deleted file mode 100644 index dc055b583c568..0000000000000 --- a/src/etc/platform-intrinsics/x86/avx2.json +++ /dev/null @@ -1,202 +0,0 @@ -{ - "intrinsic_prefix": "_mm", - "llvm_prefix": "llvm.x86.avx2.", - "intrinsics": [ - { - "intrinsic": "256_abs_{0.data_type}", - "width": [256], - "llvm": "pabs.{0.data_type_short}", - "ret": "s(8-32)", - "args": ["0"] - }, - { - "intrinsic": "256_adds_{0.data_type}", - "width": [256], - "llvm": "padd{0.kind_short}s.{0.data_type_short}", - "ret": "i(8-16)", - "args": ["0", "0"] - }, - { - "intrinsic": "256_avg_{0.data_type}", - "width": [256], - "llvm": "pavg.{0.data_type_short}", - "ret": "u(8-16)", - "args": ["0", "0"] - }, - { - "intrinsic": "256_hadd_{0.data_type}", - "width": [256], - "llvm": "phadd.{0.data_type_short}", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "256_hadds_epi16", - "width": [256], - "llvm": "phadd.sw", - "ret": "s16", - "args": ["0", "0"] - }, - { - "intrinsic": "256_hsub_{0.data_type}", - "width": [256], - "llvm": "phsub.{0.data_type_short}", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "256_hsubs_epi16", - "width": [256], - "llvm": "phsub.sw", - "ret": "s16", - "args": ["0", "0"] - }, - { - "intrinsic": "256_madd_epi16", - "width": [256], - "llvm": "pmadd.wd", - "ret": "s32", - "args": ["s16", "s16"] - }, - { - "intrinsic": "256_maddubs_epi16", - "width": [256], - "llvm": "pmadd.ub.sw", - "ret": "s16", - "args": ["s8", "s8"] - }, - { - "intrinsic": "{0.width_mm}_mask_i32gather_{0.data_type}", - "width": [128, 256], - "llvm": "gather.d.{0.data_type_short}{0.width_suffix}", - "ret": ["s32", "f32"], - "args": ["0", "0SPc/S8", "s32", "0s->0", "S32/8"] - }, - { - "intrinsic": "{0.width_mm}_mask_i32gather_{0.data_type}", - "width": [128, 256], - "llvm": "gather.d.{0.data_type_short}{0.width_suffix}", - "ret": ["s64", "f64"], - "args": ["0", "0SPc/S8", "s32x128", "0s->0", "S32/8"] - }, - { - "intrinsic": "{3.width_mm}_mask_i64gather_{0.data_type}", - "width": [128, 256], - "llvm": "gather.q.{0.data_type_short}{0.width_suffix}", - "ret": ["s32x128", "f32x128"], - "args": ["0", "0SPc/S8", "s64", "0s->0", "S32/8"] - }, - { - "intrinsic": "{0.width_mm}_mask_i64gather_{0.data_type}", - "width": [128, 256], - "llvm": "gather.q.{0.data_type_short}{0.width_suffix}", - "ret": ["s64", "f64"], - "args": ["0", "0SPc/S8", "s64", "0s->0", "S32/8"] - }, - { - "intrinsic": "{0.width_mm}_maskload_{0.data_type}", - "width": [128, 256], - "llvm": "maskload.{0.data_type_short}{0.width_suffix}", - "ret": ["s(32-64)"], - "args": ["0Pc/S8", "0"] - }, - { - "intrinsic": "{2.width_mm}_maskstore_{2.data_type}", - "width": [128, 256], - "llvm": "maskstore.{2.data_type_short}{2.width_suffix}", - "ret": "V", - "args": ["S(32-64)Pm/S8", "1Dv", "2"] - }, - { - "intrinsic": "256_max_{0.data_type}", - "width": [256], - "llvm": "pmax{0.kind}.{0.data_type_short}", - "ret": "i(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "256_min_{0.data_type}", - "width": [256], - "llvm": "pmin{0.kind}.{0.data_type_short}", - "ret": "i(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "256_movemask_epi8", - "width": [256], - "llvm": "pmovmskb", - "ret": "S32", - "args": ["s8"] - }, - { - "intrinsic": "256_mpsadbw_epu8", - "width": [256], - "llvm": "mpsadbw", - "ret": "u16", - "args": ["u8", "u8", "S32/8"] - }, - { - "intrinsic": "256_mul_{0.data_type}", - "width": [256], - "llvm": "pmul{0.data_type_short}.dq", - "ret": "i64", - "args": ["0dn", "0dn"] - }, - { - "intrinsic": "256_mulhi_{0.data_type}", - "width": [256], - "llvm": "pmulh{0.data_type_short}.w", - "ret": "i16", - "args": ["0", "0"] - }, - { - "intrinsic": "256_mulhrs_epi16", - "width": [256], - "llvm": "pmul.hr.sw", - "ret": "s16", - "args": ["0", "0"] - }, - { - "intrinsic": "256_pack{0.kind_short}s_{1.data_type}", - "width": [256], - "llvm": "pack{0.kind}s{1.data_type_short}{0.data_type_short}", - "ret": "i(8-16)", - "args": ["0hws", "0hws"] - }, - { - "intrinsic": "256_permutevar8x32_{0.data_type}", - "width": [256], - "llvm": "perm{0.data_type_short}", - "ret": ["s32", "f32"], - "args": ["0", "0s"] - }, - { - "intrinsic": "256_sad_epu8", - "width": [256], - "llvm": "psad.bw", - "ret": "u64", - "args": ["0", "0"] - }, - { - "intrinsic": "256_shuffle_epi8", - "width": [256], - "llvm": "pshuf.b", - "ret": "s8", - "args": ["0", "0"] - }, - { - "intrinsic": "256_sign_{0.data_type}", - "width": [256], - "llvm": "psign.{0.data_type_short}", - "ret": "s(8-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "256_subs_{0.data_type}", - "width": [256], - "llvm": "psub{0.kind_short}s.{0.data_type_short}", - "ret": "i(8-16)", - "args": ["0", "0"] - } - ] -} diff --git a/src/etc/platform-intrinsics/x86/bmi.json b/src/etc/platform-intrinsics/x86/bmi.json deleted file mode 100644 index 24e2cbcf8aad1..0000000000000 --- a/src/etc/platform-intrinsics/x86/bmi.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "intrinsic_prefix": "_bmi", - "llvm_prefix": "llvm.x86.bmi.", - "intrinsics": [ - { - "intrinsic": "_bextr_{0.bitwidth}", - "width": ["0"], - "llvm": "bextr.{0.bitwidth}", - "ret": "S(32-64)u", - "args": ["0", "0"] - } - ] -} diff --git a/src/etc/platform-intrinsics/x86/bmi2.json b/src/etc/platform-intrinsics/x86/bmi2.json deleted file mode 100644 index f5a0db5ef51da..0000000000000 --- a/src/etc/platform-intrinsics/x86/bmi2.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "intrinsic_prefix": "_bmi2", - "llvm_prefix": "llvm.x86.bmi.", - "intrinsics": [ - { - "intrinsic": "_bzhi_{0.bitwidth}", - "width": ["0"], - "llvm": "bzhi.{0.bitwidth}", - "ret": "S(32-64)u", - "args": ["0", "0"] - }, - { - "intrinsic": "_pdep_{0.bitwidth}", - "width": ["0"], - "llvm": "pdep.{0.bitwidth}", - "ret": "S(32-64)u", - "args": ["0", "0"] - }, - { - "intrinsic": "_pext_{0.bitwidth}", - "width": ["0"], - "llvm": "pext.{0.bitwidth}", - "ret": "S(32-64)u", - "args": ["0", "0"] - } - ] -} diff --git a/src/etc/platform-intrinsics/x86/fma.json b/src/etc/platform-intrinsics/x86/fma.json deleted file mode 100644 index dcc26cd501c9b..0000000000000 --- a/src/etc/platform-intrinsics/x86/fma.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "intrinsic_prefix": "_mm", - "llvm_prefix": "llvm.x86.fma.", - "intrinsics": [ - { - "intrinsic": "{0.width_mm}_fmadd_{0.data_type}", - "width": [128, 256], - "llvm": "vfmadd.{0.data_type_short}{0.width_suffix}", - "ret": "f(32-64)", - "args": ["0", "0", "0"] - }, - { - "intrinsic": "{0.width_mm}_fmaddsub_{0.data_type}", - "width": [128, 256], - "llvm": "vfmaddsub.{0.data_type_short}{0.width_suffix}", - "ret": "f(32-64)", - "args": ["0", "0", "0"] - }, - { - "intrinsic": "{0.width_mm}_fmsub_{0.data_type}", - "width": [128, 256], - "llvm": "vfmsub.{0.data_type_short}{0.width_suffix}", - "ret": "f(32-64)", - "args": ["0", "0", "0"] - }, - { - "intrinsic": "{0.width_mm}_fmsubadd_{0.data_type}", - "width": [128, 256], - "llvm": "vfmsubadd.{0.data_type_short}{0.width_suffix}", - "ret": "f(32-64)", - "args": ["0", "0", "0"] - }, - { - "intrinsic": "{0.width_mm}_fnmadd_{0.data_type}", - "width": [128, 256], - "llvm": "vfnmadd.{0.data_type_short}{0.width_suffix}", - "ret": "f(32-64)", - "args": ["0", "0", "0"] - }, - { - "intrinsic": "{0.width_mm}_fnmsub_{0.data_type}", - "width": [128, 256], - "llvm": "vfnmsub.{0.data_type_short}{0.width_suffix}", - "ret": "f(32-64)", - "args": ["0", "0", "0"] - } - ] -} diff --git a/src/etc/platform-intrinsics/x86/info.json b/src/etc/platform-intrinsics/x86/info.json deleted file mode 100644 index 8e90b8579c49e..0000000000000 --- a/src/etc/platform-intrinsics/x86/info.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "platform": "x86", - "number_info": { - "signed": { - "kind": "s", - "kind_short": "", - "data_type": { "pattern": "epi{bitwidth}" }, - "bitwidth": { "pattern": "{bitwidth}" }, - "data_type_short": { "8": "b", "16": "w", "32": "d", "64": "q" } - }, - "unsigned": { - "kind": "u", - "kind_short": "u", - "data_type": { "pattern": "epu{bitwidth}" }, - "bitwidth": { "pattern": "{bitwidth}" }, - "data_type_short": { "8": "b", "16": "w", "32": "d", "64": "q" } - }, - "float": { - "kind": "f", - "data_type": { "32": "ps", "64": "pd" }, - "bitwidth": { "pattern": "{bitwidth}" }, - "data_type_short": { "32": "ps", "64": "pd" } - } - }, - "width_info": { - "32": { "width_mm": "32", "width_suffix": "" }, - "64": { "width_mm": "64", "width_suffix": "" }, - "128": { "width_mm": "", "width_suffix": "" }, - "256": { "width_mm": "256", "width_suffix": ".256" }, - "512": { "width_mm": "512", "width_suffix": ".512" } - } -} diff --git a/src/etc/platform-intrinsics/x86/rdrand.json b/src/etc/platform-intrinsics/x86/rdrand.json deleted file mode 100644 index fa2feb4224b9f..0000000000000 --- a/src/etc/platform-intrinsics/x86/rdrand.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "intrinsic_prefix": "_rdrand", - "llvm_prefix": "llvm.x86.rdrand.", - "intrinsics": [ - { - "intrinsic": "16_step", - "width": ["0"], - "llvm": "16", - "ret": "(U16,S32)", - "args": [] - }, - { - "intrinsic": "32_step", - "width": ["0"], - "llvm": "32", - "ret": "(U32,S32)", - "args": [] - }, - { - "intrinsic": "64_step", - "width": ["0"], - "llvm": "64", - "ret": "(U64,S32)", - "args": [] - } - ] -} diff --git a/src/etc/platform-intrinsics/x86/rdseed.json b/src/etc/platform-intrinsics/x86/rdseed.json deleted file mode 100644 index 7be64b583e0ac..0000000000000 --- a/src/etc/platform-intrinsics/x86/rdseed.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "intrinsic_prefix": "_rdseed", - "llvm_prefix": "llvm.x86.rdseed.", - "intrinsics": [ - { - "intrinsic": "16_step", - "width": ["0"], - "llvm": "16", - "ret": "(U16,S32)", - "args": [] - }, - { - "intrinsic": "32_step", - "width": ["0"], - "llvm": "32", - "ret": "(U32,S32)", - "args": [] - }, - { - "intrinsic": "64_step", - "width": ["0"], - "llvm": "64", - "ret": "(U64,S32)", - "args": [] - } - ] -} diff --git a/src/etc/platform-intrinsics/x86/sse.json b/src/etc/platform-intrinsics/x86/sse.json deleted file mode 100644 index d8eef8a3514ae..0000000000000 --- a/src/etc/platform-intrinsics/x86/sse.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "intrinsic_prefix": "_mm", - "llvm_prefix": "llvm.x86.sse.", - "intrinsics": [ - { - "intrinsic": "_movemask_ps", - "width": [128], - "llvm": "movmsk.ps", - "ret": "S32", - "args": ["f32"] - }, - { - "intrinsic": "_max_ps", - "width": [128], - "llvm": "max.ps", - "ret": "f32", - "args": ["0", "0"] - }, - { - "intrinsic": "_min_ps", - "width": [128], - "llvm": "min.ps", - "ret": "f32", - "args": ["0", "0"] - }, - { - "intrinsic": "_rsqrt_ps", - "width": [128], - "llvm": "rsqrt.ps", - "ret": "f32", - "args": ["0"] - }, - { - "intrinsic": "_rcp_ps", - "width": [128], - "llvm": "rcp.ps", - "ret": "f32", - "args": ["0"] - }, - { - "intrinsic": "_sqrt_ps", - "width": [128], - "llvm": "!llvm.sqrt.v4f32", - "ret": "f32", - "args": ["0"] - }, - { - "intrinsic": "_storeu_ps", - "width": [128], - "llvm": "storeu.ps", - "ret": "V", - "args": ["F32Pm/S8", "f32"] - } - ] -} diff --git a/src/etc/platform-intrinsics/x86/sse2.json b/src/etc/platform-intrinsics/x86/sse2.json deleted file mode 100644 index 4d6317d80a57e..0000000000000 --- a/src/etc/platform-intrinsics/x86/sse2.json +++ /dev/null @@ -1,160 +0,0 @@ -{ - "intrinsic_prefix": "_mm", - "llvm_prefix": "llvm.x86.sse2.", - "intrinsics": [ - { - "intrinsic": "_adds_{0.data_type}", - "width": [128], - "llvm": "padd{0.kind_short}s.{0.data_type_short}", - "ret": "i(8-16)", - "args": ["0", "0"] - }, - { - "intrinsic": "_avg_{0.data_type}", - "width": [128], - "llvm": "pavg.{0.data_type_short}", - "ret": "u(8-16)", - "args": ["0", "0"] - }, - { - "intrinsic": "_lfence", - "width": [128], - "llvm": "lfence", - "ret": "V", - "args": [] - }, - { - "intrinsic": "_madd_epi16", - "width": [128], - "llvm": "pmadd.wd", - "ret": "s32", - "args": ["s16", "s16"] - }, - { - "intrinsic": "_maskmoveu_si128", - "width": [128], - "llvm": "maskmov.dqu", - "ret": "V", - "args": ["u8", "u8", "U8Pm"] - }, - { - "intrinsic": "_max_{0.data_type}", - "width": [128], - "llvm": "pmax{0.kind}.{0.data_type_short}", - "ret": ["s16", "u8"], - "args": ["0", "0"] - }, - { - "intrinsic": "_max_pd", - "width": [128], - "llvm": "max.pd", - "ret": "f64", - "args": ["0", "0"] - }, - { - "intrinsic": "_mfence", - "width": [128], - "llvm": "fence", - "ret": "V", - "args": [] - }, - { - "intrinsic": "_min_{0.data_type}", - "width": [128], - "llvm": "pmin{0.kind}.{0.data_type_short}", - "ret": ["s16", "u8"], - "args": ["0", "0"] - }, - { - "intrinsic": "_min_pd", - "width": [128], - "llvm": "min.pd", - "ret": "f64", - "args": ["0", "0"] - }, - { - "intrinsic": "_movemask_pd", - "width": [128], - "llvm": "movmsk.pd", - "ret": "S32", - "args": ["f64"] - }, - { - "intrinsic": "_movemask_epi8", - "width": [128], - "llvm": "pmovmskb.128", - "ret": "S32", - "args": ["s8"] - }, - { - "intrinsic": "_mul_epu32", - "width": [128], - "llvm": "pmulu.dq", - "ret": "u64", - "args": ["0dn", "0dn"] - }, - { - "intrinsic": "_mulhi_{0.data_type}", - "width": [128], - "llvm": "pmulh{0.kind_short}.w", - "ret": "i16", - "args": ["0", "0"] - }, - { - "intrinsic": "_packs_{1.data_type}", - "width": [128], - "llvm": "packss{1.data_type_short}{0.data_type_short}.128", - "ret": "s(8-16)", - "args": ["0hw", "0hw"] - }, - { - "intrinsic": "_packus_epi16", - "width": [128], - "llvm": "packuswb.128", - "ret": "u8", - "args": ["s16", "s16"] - }, - { - "intrinsic": "_sad_epu8", - "width": [128], - "llvm": "psad.bw", - "ret": "u64", - "args": ["u8", "u8"] - }, - { - "intrinsic": "_sfence", - "width": [128], - "llvm": "sfence", - "ret": "V", - "args": [] - }, - { - "intrinsic": "_sqrt_pd", - "width": [128], - "llvm": "!llvm.sqrt.v2f64", - "ret": "f64", - "args": ["0"] - }, - { - "intrinsic": "_storeu_pd", - "width": [128], - "llvm": "storeu.pd", - "ret": "V", - "args": ["F64Pm/U8", "f64"] - }, - { - "intrinsic": "_storeu_si128", - "width": [128], - "llvm": "storeu.dq", - "ret": "V", - "args": ["u8Pm/U8", "u8"] - }, - { - "intrinsic": "_subs_{0.data_type}", - "width": [128], - "llvm": "psub{0.kind_short}s.{0.data_type_short}", - "ret": "i(8-16)", - "args": ["0", "0"] - } - ] -} diff --git a/src/etc/platform-intrinsics/x86/sse3.json b/src/etc/platform-intrinsics/x86/sse3.json deleted file mode 100644 index 119bf208f7e34..0000000000000 --- a/src/etc/platform-intrinsics/x86/sse3.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "intrinsic_prefix": "_mm", - "llvm_prefix": "llvm.x86.sse3.", - "intrinsics": [ - { - "intrinsic": "_addsub_{0.data_type}", - "width": [128], - "llvm": "addsub.{0.data_type}", - "ret": "f(32-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "_hadd_{0.data_type}", - "width": [128], - "llvm": "hadd.{0.data_type}", - "ret": "f(32-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "_hsub_{0.data_type}", - "width": [128], - "llvm": "hsub.{0.data_type}", - "ret": "f(32-64)", - "args": ["0", "0"] - }, - { - "intrinsic": "_lddqu_si128", - "width": [128], - "llvm": "ldu.dq", - "ret": "u8", - "args": ["0Pc/S8"] - } - ] -} diff --git a/src/etc/platform-intrinsics/x86/sse41.json b/src/etc/platform-intrinsics/x86/sse41.json deleted file mode 100644 index b499637e0d3c6..0000000000000 --- a/src/etc/platform-intrinsics/x86/sse41.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "intrinsic_prefix": "_mm", - "llvm_prefix": "llvm.x86.sse41.", - "intrinsics": [ - { - "intrinsic": "_dp_{0.data_type}", - "width": [128], - "llvm": "dp{0.data_type}", - "ret": "f(32-64)", - "args": ["0", "0", "S32/8"] - }, - { - "intrinsic": "_max_{0.data_type}", - "width": [128], - "llvm": "pmax{0.kind}{0.data_type_short}", - "ret": ["s8", "u16", "i32"], - "args": ["0", "0"] - }, - { - "intrinsic": "_min_{0.data_type}", - "width": [128], - "llvm": "pmin{0.kind}{0.data_type_short}", - "ret": ["s8", "u16", "i32"], - "args": ["0", "0"] - }, - { - "intrinsic": "_minpos_epu16", - "width": [128], - "llvm": "phminposuw", - "ret": "u16", - "args": ["0"] - }, - { - "intrinsic": "_mpsadbw_epu8", - "width": [128], - "llvm": "mpsadbw", - "ret": "u16", - "args": ["u8", "u8", "S32/8"] - }, - { - "intrinsic": "_mul_epi32", - "width": [128], - "llvm": "pmuldq", - "ret": "s64", - "args": ["s32", "s32"] - }, - { - "intrinsic": "_packus_epi32", - "width": [128], - "llvm": "packusdw", - "ret": "u16", - "args": ["s32", "s32"] - }, - { - "intrinsic": "_testc_si128", - "width": [128], - "llvm": "ptestc", - "ret": "S32", - "args": ["u64", "u64"] - }, - { - "intrinsic": "_testnzc_si128", - "width": [128], - "llvm": "ptestnzc", - "ret": "S32", - "args": ["u64", "u64"] - }, - { - "intrinsic": "_testz_si128", - "width": [128], - "llvm": "ptestz", - "ret": "S32", - "args": ["u64", "u64"] - } - ] -} diff --git a/src/etc/platform-intrinsics/x86/sse42.json b/src/etc/platform-intrinsics/x86/sse42.json deleted file mode 100644 index fdee9c8a6671b..0000000000000 --- a/src/etc/platform-intrinsics/x86/sse42.json +++ /dev/null @@ -1,104 +0,0 @@ -{ - "intrinsic_prefix": "_mm", - "llvm_prefix": "llvm.x86.sse42.", - "intrinsics": [ - { - "intrinsic": "_cmpestra", - "width": [128], - "llvm": "pcmpestria128", - "ret": "S32", - "args": ["s8", "S32", "s8", "S32", "S32/8"] - }, - { - "intrinsic": "_cmpestrc", - "width": [128], - "llvm": "pcmpestric128", - "ret": "S32", - "args": ["s8", "S32", "s8", "S32", "S32/8"] - }, - { - "intrinsic": "_cmpestri", - "width": [128], - "llvm": "pcmpestri128", - "ret": "S32", - "args": ["s8", "S32", "s8", "S32", "S32/8"] - }, - { - "intrinsic": "_cmpestrm", - "width": [128], - "llvm": "pcmpestrm128", - "ret": "s8", - "args": ["s8", "S32", "s8", "S32", "S32/8"] - }, - { - "intrinsic": "_cmpestro", - "width": [128], - "llvm": "pcmpestrio128", - "ret": "S32", - "args": ["s8", "S32", "s8", "S32", "S32/8"] - }, - { - "intrinsic": "_cmpestrs", - "width": [128], - "llvm": "pcmpestris128", - "ret": "S32", - "args": ["s8", "S32", "s8", "S32", "S32/8"] - }, - { - "intrinsic": "_cmpestrz", - "width": [128], - "llvm": "pcmpestriz128", - "ret": "S32", - "args": ["s8", "S32", "s8", "S32", "S32/8"] - }, - { - "intrinsic": "_cmpistra", - "width": [128], - "llvm": "pcmpistria128", - "ret": "S32", - "args": ["s8", "s8", "S32/8"] - }, - { - "intrinsic": "_cmpistrc", - "width": [128], - "llvm": "pcmpistric128", - "ret": "S32", - "args": ["s8", "s8", "S32/8"] - }, - { - "intrinsic": "_cmpistri", - "width": [128], - "llvm": "pcmpistri128", - "ret": "S32", - "args": ["s8", "s8", "S32/8"] - }, - { - "intrinsic": "_cmpistrm", - "width": [128], - "llvm": "pcmpistrm128", - "ret": "s8", - "args": ["s8", "s8", "S32/8"] - }, - { - "intrinsic": "_cmpistro", - "width": [128], - "llvm": "pcmpistrio128", - "ret": "S32", - "args": ["s8", "s8", "S32/8"] - }, - { - "intrinsic": "_cmpistrs", - "width": [128], - "llvm": "pcmpistris128", - "ret": "S32", - "args": ["s8", "s8", "S32/8"] - }, - { - "intrinsic": "_cmpistrz", - "width": [128], - "llvm": "pcmpistriz128", - "ret": "S32", - "args": ["s8", "s8", "S32/8"] - } - ] -} diff --git a/src/etc/platform-intrinsics/x86/ssse3.json b/src/etc/platform-intrinsics/x86/ssse3.json deleted file mode 100644 index 5a5617957b3e5..0000000000000 --- a/src/etc/platform-intrinsics/x86/ssse3.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "intrinsic_prefix": "_mm", - "llvm_prefix": "llvm.x86.ssse3.", - "intrinsics": [ - { - "intrinsic": "_abs_{0.data_type}", - "width": [128], - "llvm": "pabs.{0.data_type_short}.128", - "ret": "s(8-32)", - "args": ["0"] - }, - { - "intrinsic": "_hadd_{0.data_type}", - "width": [128], - "llvm": "phadd.{0.data_type_short}.128", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "_hadds_epi16", - "width": [128], - "llvm": "phadd.sw.128", - "ret": "s16", - "args": ["0", "0"] - }, - { - "intrinsic": "_hsub_{0.data_type}", - "width": [128], - "llvm": "phsub.{0.data_type_short}.128", - "ret": "s(16-32)", - "args": ["0", "0"] - }, - { - "intrinsic": "_hsubs_epi16", - "width": [128], - "llvm": "phsub.sw.128", - "ret": "s16", - "args": ["0", "0"] - }, - { - "intrinsic": "_maddubs_epi16", - "width": [128], - "llvm": "pmadd.ub.sw.128", - "ret": "s16", - "args": ["u8", "s8"] - }, - { - "intrinsic": "_mulhrs_epi16", - "width": [128], - "llvm": "pmul.hr.sw.128", - "ret": "s16", - "args": ["s16", "s16"] - }, - { - "intrinsic": "_shuffle_epi8", - "width": [128], - "llvm": "pshuf.b.128", - "ret": "s8", - "args": ["s8", "s8"] - }, - { - "intrinsic": "_sign_{0.data_type}", - "width": [128], - "llvm": "psign.{0.data_type_short}.128", - "ret": "s(8-32)", - "args": ["0", "0"] - } - ] -} diff --git a/src/etc/platform-intrinsics/x86/tbm.json b/src/etc/platform-intrinsics/x86/tbm.json deleted file mode 100644 index d1322cd60c4cf..0000000000000 --- a/src/etc/platform-intrinsics/x86/tbm.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "intrinsic_prefix": "_tbm", - "llvm_prefix": "llvm.x86.tbm.", - "intrinsics": [ - { - "intrinsic": "_bextri_u{0.bitwidth}", - "width": ["0"], - "llvm": "bextri.u{0.bitwidth}", - "ret": "S(32-64)u", - "args": ["0", "0"] - } - ] -} diff --git a/src/etc/rust-gdb b/src/etc/rust-gdb index 743952a5bef89..23ba93da8e529 100755 --- a/src/etc/rust-gdb +++ b/src/etc/rust-gdb @@ -1,14 +1,4 @@ #!/bin/sh -# Copyright 2014 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - # Exit if anything fails set -e diff --git a/src/etc/rust-gdbgui b/src/etc/rust-gdbgui index 7e179ba927dff..08d598cde1c3d 100755 --- a/src/etc/rust-gdbgui +++ b/src/etc/rust-gdbgui @@ -1,13 +1,4 @@ #!/bin/sh -# Copyright 2014 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. # Exit if anything fails set -e diff --git a/src/etc/rust-lldb b/src/etc/rust-lldb index 6ed8210349e17..424302d495f3e 100755 --- a/src/etc/rust-lldb +++ b/src/etc/rust-lldb @@ -1,13 +1,4 @@ #!/bin/sh -# Copyright 2014 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. # Exit if anything fails set -e diff --git a/src/etc/rust-windbg.cmd b/src/etc/rust-windbg.cmd index 8da4efb58039d..2deb2a03a3976 100644 --- a/src/etc/rust-windbg.cmd +++ b/src/etc/rust-windbg.cmd @@ -1,16 +1,6 @@ @echo off setlocal -REM Copyright 2014 The Rust Project Developers. See the COPYRIGHT -REM file at the top-level directory of this distribution and at -REM http://rust-lang.org/COPYRIGHT. -REM -REM Licensed under the Apache License, Version 2.0 or the MIT license -REM , at your -REM option. This file may not be copied, modified, or distributed -REM except according to those terms. - for /f "delims=" %%i in ('rustc --print=sysroot') do set rustc_sysroot=%%i set rust_etc=%rustc_sysroot%\lib\rustlib\etc diff --git a/src/etc/test-float-parse/_common.rs b/src/etc/test-float-parse/_common.rs index 725a715f7cf32..9cbad5486b485 100644 --- a/src/etc/test-float-parse/_common.rs +++ b/src/etc/test-float-parse/_common.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::io; use std::io::prelude::*; use std::mem::transmute; diff --git a/src/etc/test-float-parse/few-ones.rs b/src/etc/test-float-parse/few-ones.rs index 2486df4446631..2d82918aabbdb 100644 --- a/src/etc/test-float-parse/few-ones.rs +++ b/src/etc/test-float-parse/few-ones.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod _common; use _common::validate; diff --git a/src/etc/test-float-parse/huge-pow10.rs b/src/etc/test-float-parse/huge-pow10.rs index 9d12a03dae290..9a16d9c60280d 100644 --- a/src/etc/test-float-parse/huge-pow10.rs +++ b/src/etc/test-float-parse/huge-pow10.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod _common; use _common::validate; diff --git a/src/etc/test-float-parse/long-fractions.rs b/src/etc/test-float-parse/long-fractions.rs index 9598bd12a0d45..55e4f07cd0cc3 100644 --- a/src/etc/test-float-parse/long-fractions.rs +++ b/src/etc/test-float-parse/long-fractions.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod _common; use std::char; diff --git a/src/etc/test-float-parse/many-digits.rs b/src/etc/test-float-parse/many-digits.rs index 469a38da2b831..76da818c7091f 100644 --- a/src/etc/test-float-parse/many-digits.rs +++ b/src/etc/test-float-parse/many-digits.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - extern crate rand; mod _common; diff --git a/src/etc/test-float-parse/rand-f64.rs b/src/etc/test-float-parse/rand-f64.rs index 2994dd9d5e644..39ad63945cd11 100644 --- a/src/etc/test-float-parse/rand-f64.rs +++ b/src/etc/test-float-parse/rand-f64.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - extern crate rand; mod _common; diff --git a/src/etc/test-float-parse/runtests.py b/src/etc/test-float-parse/runtests.py index e9f5bba2312d8..852bc77589616 100644 --- a/src/etc/test-float-parse/runtests.py +++ b/src/etc/test-float-parse/runtests.py @@ -1,14 +1,4 @@ #!/usr/bin/env python2.7 -# -# Copyright 2015 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. """ Testing dec2flt diff --git a/src/etc/test-float-parse/short-decimals.rs b/src/etc/test-float-parse/short-decimals.rs index 4909f7c58f89a..8b9b6f78ae35a 100644 --- a/src/etc/test-float-parse/short-decimals.rs +++ b/src/etc/test-float-parse/short-decimals.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod _common; use _common::validate; diff --git a/src/etc/test-float-parse/subnorm.rs b/src/etc/test-float-parse/subnorm.rs index 04a7cc2746675..448482c6eb817 100644 --- a/src/etc/test-float-parse/subnorm.rs +++ b/src/etc/test-float-parse/subnorm.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod _common; use std::mem::transmute; diff --git a/src/etc/test-float-parse/tiny-pow10.rs b/src/etc/test-float-parse/tiny-pow10.rs index 50ca5e32609ac..43fad5b49eee6 100644 --- a/src/etc/test-float-parse/tiny-pow10.rs +++ b/src/etc/test-float-parse/tiny-pow10.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod _common; use _common::validate; diff --git a/src/etc/test-float-parse/u32-small.rs b/src/etc/test-float-parse/u32-small.rs index 571ac80e5b0c5..3ae62425adf0b 100644 --- a/src/etc/test-float-parse/u32-small.rs +++ b/src/etc/test-float-parse/u32-small.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod _common; use _common::validate; diff --git a/src/etc/test-float-parse/u64-pow2.rs b/src/etc/test-float-parse/u64-pow2.rs index 5b25c8399319c..1c9bda948ffd3 100644 --- a/src/etc/test-float-parse/u64-pow2.rs +++ b/src/etc/test-float-parse/u64-pow2.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod _common; use _common::validate; diff --git a/src/etc/wasm32-shim.js b/src/etc/wasm32-shim.js index 378aae5973316..2a89c0d321d6a 100644 --- a/src/etc/wasm32-shim.js +++ b/src/etc/wasm32-shim.js @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // This is a small "shim" program which is used when wasm32 unit tests are run // in this repository. This program is intended to be run in node.js and will // load a wasm module into memory, instantiate it with a set of imports, and diff --git a/src/grammar/lexer.l b/src/grammar/lexer.l index 2f282c8281d6e..1feb781b2b39f 100644 --- a/src/grammar/lexer.l +++ b/src/grammar/lexer.l @@ -1,14 +1,4 @@ %{ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #include #include diff --git a/src/grammar/parser-lalr-main.c b/src/grammar/parser-lalr-main.c index db88a1f2999aa..6348190cc140b 100644 --- a/src/grammar/parser-lalr-main.c +++ b/src/grammar/parser-lalr-main.c @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #include #include #include diff --git a/src/grammar/parser-lalr.y b/src/grammar/parser-lalr.y index 8ea1cb26dc0c7..5585c95a5a63a 100644 --- a/src/grammar/parser-lalr.y +++ b/src/grammar/parser-lalr.y @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - %{ #define YYERROR_VERBOSE #define YYSTYPE struct node * diff --git a/src/grammar/testparser.py b/src/grammar/testparser.py index 37be41b935f84..4b5a7fb9e10b5 100755 --- a/src/grammar/testparser.py +++ b/src/grammar/testparser.py @@ -1,14 +1,4 @@ #!/usr/bin/env python -# -# Copyright 2015 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. # ignore-tidy-linelength diff --git a/src/grammar/tokens.h b/src/grammar/tokens.h index 15ea738ed0057..297e3dc841e87 100644 --- a/src/grammar/tokens.h +++ b/src/grammar/tokens.h @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - enum Token { SHL = 257, // Parser generators reserve 0-256 for char literals SHR, diff --git a/src/liballoc/Cargo.toml b/src/liballoc/Cargo.toml index b2eb3566c04a7..ddf0044c506f8 100644 --- a/src/liballoc/Cargo.toml +++ b/src/liballoc/Cargo.toml @@ -4,6 +4,7 @@ name = "alloc" version = "0.0.0" autotests = false autobenches = false +edition = "2018" [lib] name = "alloc" @@ -11,10 +12,11 @@ path = "lib.rs" [dependencies] core = { path = "../libcore" } -compiler_builtins = { version = "0.1.0", features = ['rustc-dep-of-std'] } +compiler_builtins = { version = "0.1.10", features = ['rustc-dep-of-std'] } [dev-dependencies] rand = "0.6" +rand_xorshift = "0.1" [[test]] name = "collectionstests" diff --git a/src/liballoc/alloc.rs b/src/liballoc/alloc.rs index 1a8a081e16fad..f3877e51a6633 100644 --- a/src/liballoc/alloc.rs +++ b/src/liballoc/alloc.rs @@ -1,13 +1,3 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Memory allocation APIs #![stable(feature = "alloc_module", since = "1.28.0")] @@ -44,6 +34,9 @@ extern "Rust" { /// This type implements the [`Alloc`] trait by forwarding calls /// to the allocator registered with the `#[global_allocator]` attribute /// if there is one, or the `std` crate’s default. +/// +/// Note: while this type is unstable, the functionality it provides can be +/// accessed through the [free functions in `alloc`](index.html#functions). #[unstable(feature = "allocator_api", issue = "32838")] #[derive(Copy, Clone, Default, Debug)] pub struct Global; @@ -237,9 +230,9 @@ pub fn handle_alloc_error(layout: Layout) -> ! { #[cfg(test)] mod tests { extern crate test; - use self::test::Bencher; - use boxed::Box; - use alloc::{Global, Alloc, Layout, handle_alloc_error}; + use test::Bencher; + use crate::boxed::Box; + use crate::alloc::{Global, Alloc, Layout, handle_alloc_error}; #[test] fn allocate_zeroed() { diff --git a/src/liballoc/benches/btree/map.rs b/src/liballoc/benches/btree/map.rs index 20b9091a07bfc..4c17bdc3e9e9d 100644 --- a/src/liballoc/benches/btree/map.rs +++ b/src/liballoc/benches/btree/map.rs @@ -1,18 +1,8 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - use std::iter::Iterator; use std::vec::Vec; use std::collections::BTreeMap; -use rand::{Rng, thread_rng}; + +use rand::{Rng, seq::SliceRandom, thread_rng}; use test::{Bencher, black_box}; macro_rules! map_insert_rand_bench { @@ -78,7 +68,7 @@ macro_rules! map_find_rand_bench { map.insert(k, k); } - rng.shuffle(&mut keys); + keys.shuffle(&mut rng); // measure let mut i = 0; diff --git a/src/liballoc/benches/btree/mod.rs b/src/liballoc/benches/btree/mod.rs index f436b0ac0c037..095ca5dd2e21b 100644 --- a/src/liballoc/benches/btree/mod.rs +++ b/src/liballoc/benches/btree/mod.rs @@ -1,11 +1,2 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod map; +mod set; diff --git a/src/liballoc/benches/btree/set.rs b/src/liballoc/benches/btree/set.rs new file mode 100644 index 0000000000000..6357ea3ea11bd --- /dev/null +++ b/src/liballoc/benches/btree/set.rs @@ -0,0 +1,88 @@ +use std::collections::BTreeSet; + +use rand::{thread_rng, Rng}; +use test::{black_box, Bencher}; + +fn random(n: usize) -> BTreeSet { + let mut rng = thread_rng(); + let mut set = BTreeSet::new(); + while set.len() < n { + set.insert(rng.gen()); + } + assert_eq!(set.len(), n); + set +} + +fn neg(n: usize) -> BTreeSet { + let mut set = BTreeSet::new(); + for i in -(n as i32)..=-1 { + set.insert(i); + } + assert_eq!(set.len(), n); + set +} + +fn pos(n: usize) -> BTreeSet { + let mut set = BTreeSet::new(); + for i in 1..=(n as i32) { + set.insert(i); + } + assert_eq!(set.len(), n); + set +} + + +fn stagger(n1: usize, factor: usize) -> [BTreeSet; 2] { + let n2 = n1 * factor; + let mut sets = [BTreeSet::new(), BTreeSet::new()]; + for i in 0..(n1 + n2) { + let b = i % (factor + 1) != 0; + sets[b as usize].insert(i as u32); + } + assert_eq!(sets[0].len(), n1); + assert_eq!(sets[1].len(), n2); + sets +} + +macro_rules! set_bench { + ($name: ident, $set_func: ident, $result_func: ident, $sets: expr) => { + #[bench] + pub fn $name(b: &mut Bencher) { + // setup + let sets = $sets; + + // measure + b.iter(|| { + let x = sets[0].$set_func(&sets[1]).$result_func(); + black_box(x); + }) + } + }; +} + +set_bench! {intersection_100_neg_vs_100_pos, intersection, count, [neg(100), pos(100)]} +set_bench! {intersection_100_neg_vs_10k_pos, intersection, count, [neg(100), pos(10_000)]} +set_bench! {intersection_100_pos_vs_100_neg, intersection, count, [pos(100), neg(100)]} +set_bench! {intersection_100_pos_vs_10k_neg, intersection, count, [pos(100), neg(10_000)]} +set_bench! {intersection_10k_neg_vs_100_pos, intersection, count, [neg(10_000), pos(100)]} +set_bench! {intersection_10k_neg_vs_10k_pos, intersection, count, [neg(10_000), pos(10_000)]} +set_bench! {intersection_10k_pos_vs_100_neg, intersection, count, [pos(10_000), neg(100)]} +set_bench! {intersection_10k_pos_vs_10k_neg, intersection, count, [pos(10_000), neg(10_000)]} +set_bench! {intersection_random_100_vs_100, intersection, count, [random(100), random(100)]} +set_bench! {intersection_random_100_vs_10k, intersection, count, [random(100), random(10_000)]} +set_bench! {intersection_random_10k_vs_100, intersection, count, [random(10_000), random(100)]} +set_bench! {intersection_random_10k_vs_10k, intersection, count, [random(10_000), random(10_000)]} +set_bench! {intersection_staggered_100_vs_100, intersection, count, stagger(100, 1)} +set_bench! {intersection_staggered_10k_vs_10k, intersection, count, stagger(10_000, 1)} +set_bench! {intersection_staggered_100_vs_10k, intersection, count, stagger(100, 100)} +set_bench! {difference_random_100_vs_100, difference, count, [random(100), random(100)]} +set_bench! {difference_random_100_vs_10k, difference, count, [random(100), random(10_000)]} +set_bench! {difference_random_10k_vs_100, difference, count, [random(10_000), random(100)]} +set_bench! {difference_random_10k_vs_10k, difference, count, [random(10_000), random(10_000)]} +set_bench! {difference_staggered_100_vs_100, difference, count, stagger(100, 1)} +set_bench! {difference_staggered_10k_vs_10k, difference, count, stagger(10_000, 1)} +set_bench! {difference_staggered_100_vs_10k, difference, count, stagger(100, 100)} +set_bench! {is_subset_100_vs_100, is_subset, clone, [pos(100), pos(100)]} +set_bench! {is_subset_100_vs_10k, is_subset, clone, [pos(100), pos(10_000)]} +set_bench! {is_subset_10k_vs_100, is_subset, clone, [pos(10_000), pos(100)]} +set_bench! {is_subset_10k_vs_10k, is_subset, clone, [pos(10_000), pos(10_000)]} diff --git a/src/liballoc/benches/lib.rs b/src/liballoc/benches/lib.rs index b4f4fd74f3a39..4bf5ec10c41e7 100644 --- a/src/liballoc/benches/lib.rs +++ b/src/liballoc/benches/lib.rs @@ -1,18 +1,6 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![feature(repr_simd)] -#![feature(slice_sort_by_cached_key)] #![feature(test)] -extern crate rand; extern crate test; mod btree; diff --git a/src/liballoc/benches/linked_list.rs b/src/liballoc/benches/linked_list.rs index bbac44553f18a..29c5ad2bc6eb2 100644 --- a/src/liballoc/benches/linked_list.rs +++ b/src/liballoc/benches/linked_list.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::collections::LinkedList; use test::Bencher; diff --git a/src/liballoc/benches/slice.rs b/src/liballoc/benches/slice.rs index 490320f57cbf7..f17fb8212ce19 100644 --- a/src/liballoc/benches/slice.rs +++ b/src/liballoc/benches/slice.rs @@ -1,19 +1,8 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rand::{thread_rng}; -use std::mem; -use std::ptr; - -use rand::{Rng, SeedableRng, XorShiftRng}; +use std::{mem, ptr}; + +use rand::{thread_rng, Rng, SeedableRng}; use rand::distributions::{Standard, Alphanumeric}; +use rand_xorshift::XorShiftRng; use test::{Bencher, black_box}; #[bench] diff --git a/src/liballoc/benches/str.rs b/src/liballoc/benches/str.rs index 38c94d4d8b5f3..7f8661bd96888 100644 --- a/src/liballoc/benches/str.rs +++ b/src/liballoc/benches/str.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use test::{Bencher, black_box}; #[bench] @@ -274,11 +264,11 @@ make_test!(split_a_str, s, s.split("a").count()); make_test!(trim_ascii_char, s, { s.trim_matches(|c: char| c.is_ascii()) }); -make_test!(trim_left_ascii_char, s, { - s.trim_left_matches(|c: char| c.is_ascii()) +make_test!(trim_start_ascii_char, s, { + s.trim_start_matches(|c: char| c.is_ascii()) }); -make_test!(trim_right_ascii_char, s, { - s.trim_right_matches(|c: char| c.is_ascii()) +make_test!(trim_end_ascii_char, s, { + s.trim_end_matches(|c: char| c.is_ascii()) }); make_test!(find_underscore_char, s, s.find('_')); diff --git a/src/liballoc/benches/string.rs b/src/liballoc/benches/string.rs index 36be21d978e1f..2933014cb58e9 100644 --- a/src/liballoc/benches/string.rs +++ b/src/liballoc/benches/string.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::iter::repeat; use test::Bencher; diff --git a/src/liballoc/benches/vec.rs b/src/liballoc/benches/vec.rs index 414901170683e..590c49f4ef500 100644 --- a/src/liballoc/benches/vec.rs +++ b/src/liballoc/benches/vec.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use test::Bencher; use std::iter::{FromIterator, repeat}; diff --git a/src/liballoc/benches/vec_deque.rs b/src/liballoc/benches/vec_deque.rs index 380645e7cd03a..7d2d3cfa61225 100644 --- a/src/liballoc/benches/vec_deque.rs +++ b/src/liballoc/benches/vec_deque.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::collections::VecDeque; use test::{Bencher, black_box}; @@ -55,3 +45,10 @@ fn bench_mut_iter_1000(b: &mut Bencher) { black_box(sum); }) } + +#[bench] +fn bench_try_fold(b: &mut Bencher) { + let ring: VecDeque<_> = (0..1000).collect(); + + b.iter(|| black_box(ring.iter().try_fold(0, |a, b| Some(a + b)))) +} diff --git a/src/liballoc/benches/vec_deque_append.rs b/src/liballoc/benches/vec_deque_append.rs index bd33565113752..78ec91d9e3e9b 100644 --- a/src/liballoc/benches/vec_deque_append.rs +++ b/src/liballoc/benches/vec_deque_append.rs @@ -1,14 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![feature(duration_as_u128)] use std::{collections::VecDeque, time::Instant}; const VECDEQUE_LEN: i32 = 100000; diff --git a/src/liballoc/borrow.rs b/src/liballoc/borrow.rs index 5ae5339138fbe..ee1799fad8e15 100644 --- a/src/liballoc/borrow.rs +++ b/src/liballoc/borrow.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A module for working with borrowed data. #![stable(feature = "rust1", since = "1.0.0")] @@ -16,14 +6,14 @@ use core::cmp::Ordering; use core::hash::{Hash, Hasher}; use core::ops::{Add, AddAssign, Deref}; -use fmt; -use string::String; - -use self::Cow::*; - #[stable(feature = "rust1", since = "1.0.0")] pub use core::borrow::{Borrow, BorrowMut}; +use crate::fmt; +use crate::string::String; + +use Cow::*; + #[stable(feature = "rust1", since = "1.0.0")] impl<'a, B: ?Sized> Borrow for Cow<'a, B> where B: ToOwned, @@ -145,13 +135,13 @@ impl ToOwned for T /// Another example showing how to keep `Cow` in a struct: /// /// ``` -/// use std::borrow::{Cow, ToOwned}; +/// use std::borrow::Cow; /// -/// struct Items<'a, X: 'a> where [X]: ToOwned> { +/// struct Items<'a, X: 'a> where [X]: ToOwned> { /// values: Cow<'a, [X]>, /// } /// -/// impl<'a, X: Clone + 'a> Items<'a, X> where [X]: ToOwned> { +/// impl<'a, X: Clone + 'a> Items<'a, X> where [X]: ToOwned> { /// fn new(v: Cow<'a, [X]>) -> Self { /// Items { values: v } /// } @@ -192,10 +182,8 @@ pub enum Cow<'a, B: ?Sized + 'a> } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, B: ?Sized> Clone for Cow<'a, B> - where B: ToOwned -{ - fn clone(&self) -> Cow<'a, B> { +impl Clone for Cow<'_, B> { + fn clone(&self) -> Self { match *self { Borrowed(b) => Borrowed(b), Owned(ref o) => { @@ -205,7 +193,7 @@ impl<'a, B: ?Sized> Clone for Cow<'a, B> } } - fn clone_from(&mut self, source: &Cow<'a, B>) { + fn clone_from(&mut self, source: &Self) { if let Owned(ref mut dest) = *self { if let Owned(ref o) = *source { o.borrow().clone_into(dest); @@ -217,9 +205,7 @@ impl<'a, B: ?Sized> Clone for Cow<'a, B> } } -impl<'a, B: ?Sized> Cow<'a, B> - where B: ToOwned -{ +impl Cow<'_, B> { /// Acquires a mutable reference to the owned form of the data. /// /// Clones the data if it is not already owned. @@ -295,9 +281,7 @@ impl<'a, B: ?Sized> Cow<'a, B> } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, B: ?Sized> Deref for Cow<'a, B> - where B: ToOwned -{ +impl Deref for Cow<'_, B> { type Target = B; fn deref(&self) -> &B { @@ -309,14 +293,14 @@ impl<'a, B: ?Sized> Deref for Cow<'a, B> } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, B: ?Sized> Eq for Cow<'a, B> where B: Eq + ToOwned {} +impl Eq for Cow<'_, B> where B: Eq + ToOwned {} #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, B: ?Sized> Ord for Cow<'a, B> +impl Ord for Cow<'_, B> where B: Ord + ToOwned { #[inline] - fn cmp(&self, other: &Cow<'a, B>) -> Ordering { + fn cmp(&self, other: &Self) -> Ordering { Ord::cmp(&**self, &**other) } } @@ -343,11 +327,11 @@ impl<'a, B: ?Sized> PartialOrd for Cow<'a, B> } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, B: ?Sized> fmt::Debug for Cow<'a, B> +impl fmt::Debug for Cow<'_, B> where B: fmt::Debug + ToOwned, ::Owned: fmt::Debug { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Borrowed(ref b) => fmt::Debug::fmt(b, f), Owned(ref o) => fmt::Debug::fmt(o, f), @@ -356,11 +340,11 @@ impl<'a, B: ?Sized> fmt::Debug for Cow<'a, B> } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, B: ?Sized> fmt::Display for Cow<'a, B> +impl fmt::Display for Cow<'_, B> where B: fmt::Display + ToOwned, ::Owned: fmt::Display { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Borrowed(ref b) => fmt::Display::fmt(b, f), Owned(ref o) => fmt::Display::fmt(o, f), @@ -369,18 +353,18 @@ impl<'a, B: ?Sized> fmt::Display for Cow<'a, B> } #[stable(feature = "default", since = "1.11.0")] -impl<'a, B: ?Sized> Default for Cow<'a, B> +impl Default for Cow<'_, B> where B: ToOwned, ::Owned: Default { /// Creates an owned Cow<'a, B> with the default value for the contained owned value. - fn default() -> Cow<'a, B> { + fn default() -> Self { Owned(::Owned::default()) } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, B: ?Sized> Hash for Cow<'a, B> +impl Hash for Cow<'_, B> where B: Hash + ToOwned { #[inline] @@ -390,8 +374,7 @@ impl<'a, B: ?Sized> Hash for Cow<'a, B> } #[stable(feature = "rust1", since = "1.0.0")] -#[allow(deprecated)] -impl<'a, T: ?Sized + ToOwned> AsRef for Cow<'a, T> { +impl AsRef for Cow<'_, T> { fn as_ref(&self) -> &T { self } diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index 83adcce5c742c..6a6a9146e2432 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -1,19 +1,19 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A pointer type for heap allocation. //! //! `Box`, casually referred to as a 'box', provides the simplest form of //! heap allocation in Rust. Boxes provide ownership for this allocation, and //! drop their contents when they go out of scope. //! +//! For non-zero-sized values, a [`Box`] will use the [`Global`] allocator for +//! its allocation. It is valid to convert both ways between a [`Box`] and a +//! raw pointer allocated with the [`Global`] allocator, given that the +//! [`Layout`] used with the allocator is correct for the type. More precisely, +//! a `value: *mut T` that has been allocated with the [`Global`] allocator +//! with `Layout::for_value(&*value)` may be converted into a box using +//! `Box::::from_raw(value)`. Conversely, the memory backing a `value: *mut +//! T` obtained from `Box::::into_raw` may be deallocated using the +//! [`Global`] allocator with `Layout::for_value(&*value)`. +//! //! # Examples //! //! Move a value from the stack to the heap by creating a [`Box`]: @@ -63,6 +63,8 @@ //! //! [dereferencing]: ../../std/ops/trait.Deref.html //! [`Box`]: struct.Box.html +//! [`Global`]: ../alloc/struct.Global.html +//! [`Layout`]: ../alloc/struct.Layout.html #![stable(feature = "rust1", since = "1.0.0")] @@ -77,13 +79,15 @@ use core::iter::{Iterator, FromIterator, FusedIterator}; use core::marker::{Unpin, Unsize}; use core::mem; use core::pin::Pin; -use core::ops::{CoerceUnsized, DispatchFromDyn, Deref, DerefMut, Generator, GeneratorState}; +use core::ops::{ + CoerceUnsized, DispatchFromDyn, Deref, DerefMut, Receiver, Generator, GeneratorState +}; use core::ptr::{self, NonNull, Unique}; -use core::task::{LocalWaker, Poll}; +use core::task::{Context, Poll}; -use vec::Vec; -use raw_vec::RawVec; -use str::from_boxed_utf8_unchecked; +use crate::vec::Vec; +use crate::raw_vec::RawVec; +use crate::str::from_boxed_utf8_unchecked; /// A pointer type for heap allocation. /// @@ -109,9 +113,11 @@ impl Box { box x } - #[unstable(feature = "pin", issue = "49150")] + /// Constructs a new `Pin>`. If `T` does not implement `Unpin`, then + /// `x` will be pinned in memory and unable to be moved. + #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] - pub fn pinned(x: T) -> Pin> { + pub fn pin(x: T) -> Pin> { (box x).into() } } @@ -208,10 +214,15 @@ impl Box { #[unstable(feature = "ptr_internals", issue = "0", reason = "use into_raw_non_null instead")] #[inline] #[doc(hidden)] - pub fn into_unique(b: Box) -> Unique { - let unique = b.0; + pub fn into_unique(mut b: Box) -> Unique { + // Box is kind-of a library type, but recognized as a "unique pointer" by + // Stacked Borrows. This function here corresponds to "reborrowing to + // a raw pointer", but there is no actual reborrow here -- so + // without some care, the pointer we are returning here still carries + // the `Uniq` tag. We round-trip through a mutable reference to avoid that. + let unique = unsafe { b.0.as_mut() as *mut T }; mem::forget(b); - unique + unsafe { Unique::new_unchecked(unique) } } /// Consumes and leaks the `Box`, returning a mutable reference, @@ -263,6 +274,19 @@ impl Box { { unsafe { &mut *Box::into_raw(b) } } + + /// Converts a `Box` into a `Pin>` + /// + /// This conversion does not allocate on the heap and happens in place. + /// + /// This is also available via [`From`]. + #[unstable(feature = "box_into_pin", issue = "0")] + pub fn into_pin(boxed: Box) -> Pin> { + // It's not possible to move or replace the insides of a `Pin>` + // when `T: !Unpin`, so it's safe to pin it directly without any + // additional requirements. + unsafe { Pin::new_unchecked(boxed) } + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -304,7 +328,7 @@ impl Clone for Box { /// let x = Box::new(5); /// let y = x.clone(); /// ``` - #[rustfmt_skip] + #[rustfmt::skip] #[inline] fn clone(&self) -> Box { box { (**self).clone() } @@ -439,24 +463,49 @@ impl Hasher for Box { #[stable(feature = "from_for_ptrs", since = "1.6.0")] impl From for Box { + /// Converts a generic type `T` into a `Box` + /// + /// The conversion allocates on the heap and moves `t` + /// from the stack into it. + /// + /// # Examples + /// ```rust + /// let x = 5; + /// let boxed = Box::new(5); + /// + /// assert_eq!(Box::from(x), boxed); + /// ``` fn from(t: T) -> Self { Box::new(t) } } -#[unstable(feature = "pin", issue = "49150")] -impl From> for Pin> { +#[stable(feature = "pin", since = "1.33.0")] +impl From> for Pin> { + /// Converts a `Box` into a `Pin>` + /// + /// This conversion does not allocate on the heap and happens in place. fn from(boxed: Box) -> Self { - // It's not possible to move or replace the insides of a `Pin>` - // when `T: !Unpin`, so it's safe to pin it directly without any - // additional requirements. - unsafe { Pin::new_unchecked(boxed) } + Box::into_pin(boxed) } } #[stable(feature = "box_from_slice", since = "1.17.0")] -impl<'a, T: Copy> From<&'a [T]> for Box<[T]> { - fn from(slice: &'a [T]) -> Box<[T]> { +impl From<&[T]> for Box<[T]> { + /// Converts a `&[T]` into a `Box<[T]>` + /// + /// This conversion allocates on the heap + /// and performs a copy of `slice`. + /// + /// # Examples + /// ```rust + /// // create a &[u8] which will be used to create a Box<[u8]> + /// let slice: &[u8] = &[104, 101, 108, 108, 111]; + /// let boxed_slice: Box<[u8]> = Box::from(slice); + /// + /// println!("{:?}", boxed_slice); + /// ``` + fn from(slice: &[T]) -> Box<[T]> { let mut boxed = unsafe { RawVec::with_capacity(slice.len()).into_box() }; boxed.copy_from_slice(slice); boxed @@ -464,15 +513,41 @@ impl<'a, T: Copy> From<&'a [T]> for Box<[T]> { } #[stable(feature = "box_from_slice", since = "1.17.0")] -impl<'a> From<&'a str> for Box { +impl From<&str> for Box { + /// Converts a `&str` into a `Box` + /// + /// This conversion allocates on the heap + /// and performs a copy of `s`. + /// + /// # Examples + /// ```rust + /// let boxed: Box = Box::from("hello"); + /// println!("{}", boxed); + /// ``` #[inline] - fn from(s: &'a str) -> Box { + fn from(s: &str) -> Box { unsafe { from_boxed_utf8_unchecked(Box::from(s.as_bytes())) } } } #[stable(feature = "boxed_str_conv", since = "1.19.0")] impl From> for Box<[u8]> { + /// Converts a `Box>` into a `Box<[u8]>` + /// + /// This conversion does not allocate on the heap and happens in place. + /// + /// # Examples + /// ```rust + /// // create a Box which will be used to create a Box<[u8]> + /// let boxed: Box = Box::from("hello"); + /// let boxed_str: Box<[u8]> = Box::from(boxed); + /// + /// // create a &[u8] which will be used to create a Box<[u8]> + /// let slice: &[u8] = &[104, 101, 108, 108, 111]; + /// let boxed_slice = Box::from(slice); + /// + /// assert_eq!(boxed_slice, boxed_str); + /// ``` #[inline] fn from(s: Box) -> Self { unsafe { Box::from_raw(Box::into_raw(s) as *mut [u8]) } @@ -545,21 +620,21 @@ impl Box { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for Box { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for Box { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Pointer for Box { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // It's not possible to extract the inner Uniq directly from the Box, // instead we cast it to a *const which aliases the Unique let ptr: *const T = &**self; @@ -583,6 +658,9 @@ impl DerefMut for Box { } } +#[unstable(feature = "receiver_trait", issue = "0")] +impl Receiver for Box {} + #[stable(feature = "rust1", since = "1.0.0")] impl Iterator for Box { type Item = I::Item; @@ -601,6 +679,9 @@ impl DoubleEndedIterator for Box { fn next_back(&mut self) -> Option { (**self).next_back() } + fn nth_back(&mut self, n: usize) -> Option { + (**self).nth_back(n) + } } #[stable(feature = "rust1", since = "1.0.0")] impl ExactSizeIterator for Box { @@ -615,6 +696,28 @@ impl ExactSizeIterator for Box { #[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for Box {} +#[stable(feature = "boxed_closure_impls", since = "1.35.0")] +impl + ?Sized> FnOnce for Box { + type Output = >::Output; + + extern "rust-call" fn call_once(self, args: A) -> Self::Output { + >::call_once(*self, args) + } +} + +#[stable(feature = "boxed_closure_impls", since = "1.35.0")] +impl + ?Sized> FnMut for Box { + extern "rust-call" fn call_mut(&mut self, args: A) -> Self::Output { + >::call_mut(self, args) + } +} + +#[stable(feature = "boxed_closure_impls", since = "1.35.0")] +impl + ?Sized> Fn for Box { + extern "rust-call" fn call(&self, args: A) -> Self::Output { + >::call(self, args) + } +} /// `FnBox` is a version of the `FnOnce` intended for use with boxed /// closure objects. The idea is that where one would normally store a @@ -656,9 +759,7 @@ impl FusedIterator for Box {} #[rustc_paren_sugar] #[unstable(feature = "fnbox", reason = "will be deprecated if and when `Box` becomes usable", issue = "28796")] -pub trait FnBox { - type Output; - +pub trait FnBox: FnOnce { fn call_box(self: Box, args: A) -> Self::Output; } @@ -667,33 +768,11 @@ pub trait FnBox { impl FnBox for F where F: FnOnce { - type Output = F::Output; - fn call_box(self: Box, args: A) -> F::Output { self.call_once(args) } } -#[unstable(feature = "fnbox", - reason = "will be deprecated if and when `Box` becomes usable", issue = "28796")] -impl<'a, A, R> FnOnce for Box + 'a> { - type Output = R; - - extern "rust-call" fn call_once(self, args: A) -> R { - self.call_box(args) - } -} - -#[unstable(feature = "fnbox", - reason = "will be deprecated if and when `Box` becomes usable", issue = "28796")] -impl<'a, A, R> FnOnce for Box + Send + 'a> { - type Output = R; - - extern "rust-call" fn call_once(self, args: A) -> R { - self.call_box(args) - } -} - #[unstable(feature = "coerce_unsized", issue = "27732")] impl, U: ?Sized> CoerceUnsized> for Box {} @@ -808,17 +887,26 @@ impl AsMut for Box { * implementation of `Unpin` (where `T: Unpin`) would be valid/safe, and * could have a method to project a Pin from it. */ -#[unstable(feature = "pin", issue = "49150")] +#[stable(feature = "pin", since = "1.33.0")] impl Unpin for Box { } #[unstable(feature = "generator_trait", issue = "43122")] -impl Generator for Box - where T: Generator + ?Sized -{ - type Yield = T::Yield; - type Return = T::Return; - unsafe fn resume(&mut self) -> GeneratorState { - (**self).resume() +impl Generator for Box { + type Yield = G::Yield; + type Return = G::Return; + + fn resume(mut self: Pin<&mut Self>) -> GeneratorState { + G::resume(Pin::new(&mut *self)) + } +} + +#[unstable(feature = "generator_trait", issue = "43122")] +impl Generator for Pin> { + type Yield = G::Yield; + type Return = G::Return; + + fn resume(mut self: Pin<&mut Self>) -> GeneratorState { + G::resume((*self).as_mut()) } } @@ -826,7 +914,7 @@ impl Generator for Box impl Future for Box { type Output = F::Output; - fn poll(mut self: Pin<&mut Self>, lw: &LocalWaker) -> Poll { - F::poll(Pin::new(&mut *self), lw) + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + F::poll(Pin::new(&mut *self), cx) } } diff --git a/src/liballoc/boxed_test.rs b/src/liballoc/boxed_test.rs index f340ea01c5f07..654eabd070326 100644 --- a/src/liballoc/boxed_test.rs +++ b/src/liballoc/boxed_test.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Test for `boxed` mod. use core::any::Any; diff --git a/src/liballoc/collections/binary_heap.rs b/src/liballoc/collections/binary_heap.rs index 5dd0ea7d431a7..8c142a3d317c6 100644 --- a/src/liballoc/collections/binary_heap.rs +++ b/src/liballoc/collections/binary_heap.rs @@ -1,13 +1,3 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A priority queue implemented with a binary heap. //! //! Insertion and popping the largest element have `O(log n)` time complexity. @@ -161,8 +151,8 @@ use core::mem::{swap, size_of, ManuallyDrop}; use core::ptr; use core::fmt; -use slice; -use vec::{self, Vec}; +use crate::slice; +use crate::vec::{self, Vec}; use super::SpecExtend; @@ -237,8 +227,8 @@ pub struct PeekMut<'a, T: 'a + Ord> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, T: Ord + fmt::Debug> fmt::Debug for PeekMut<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for PeekMut<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("PeekMut") .field(&self.heap.data[0]) .finish() @@ -246,7 +236,7 @@ impl<'a, T: Ord + fmt::Debug> fmt::Debug for PeekMut<'a, T> { } #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")] -impl<'a, T: Ord> Drop for PeekMut<'a, T> { +impl Drop for PeekMut<'_, T> { fn drop(&mut self) { if self.sift { self.heap.sift_down(0); @@ -255,17 +245,21 @@ impl<'a, T: Ord> Drop for PeekMut<'a, T> { } #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")] -impl<'a, T: Ord> Deref for PeekMut<'a, T> { +impl Deref for PeekMut<'_, T> { type Target = T; fn deref(&self) -> &T { - &self.heap.data[0] + debug_assert!(!self.heap.is_empty()); + // SAFE: PeekMut is only instantiated for non-empty heaps + unsafe { self.heap.data.get_unchecked(0) } } } #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")] -impl<'a, T: Ord> DerefMut for PeekMut<'a, T> { +impl DerefMut for PeekMut<'_, T> { fn deref_mut(&mut self) -> &mut T { - &mut self.heap.data[0] + debug_assert!(!self.heap.is_empty()); + // SAFE: PeekMut is only instantiated for non-empty heaps + unsafe { self.heap.data.get_unchecked_mut(0) } } } @@ -300,8 +294,8 @@ impl Default for BinaryHeap { } #[stable(feature = "binaryheap_debug", since = "1.4.0")] -impl fmt::Debug for BinaryHeap { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for BinaryHeap { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } } @@ -342,49 +336,6 @@ impl BinaryHeap { BinaryHeap { data: Vec::with_capacity(capacity) } } - /// Returns an iterator visiting all values in the underlying vector, in - /// arbitrary order. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::collections::BinaryHeap; - /// let heap = BinaryHeap::from(vec![1, 2, 3, 4]); - /// - /// // Print 1, 2, 3, 4 in arbitrary order - /// for x in heap.iter() { - /// println!("{}", x); - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn iter(&self) -> Iter { - Iter { iter: self.data.iter() } - } - - /// Returns the greatest item in the binary heap, or `None` if it is empty. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::collections::BinaryHeap; - /// let mut heap = BinaryHeap::new(); - /// assert_eq!(heap.peek(), None); - /// - /// heap.push(1); - /// heap.push(5); - /// heap.push(2); - /// assert_eq!(heap.peek(), Some(&5)); - /// - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn peek(&self) -> Option<&T> { - self.data.get(0) - } - /// Returns a mutable reference to the greatest item in the binary heap, or /// `None` if it is empty. /// @@ -410,7 +361,7 @@ impl BinaryHeap { /// assert_eq!(heap.peek(), Some(&2)); /// ``` #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")] - pub fn peek_mut(&mut self) -> Option> { + pub fn peek_mut(&mut self) -> Option> { if self.is_empty() { None } else { @@ -421,119 +372,6 @@ impl BinaryHeap { } } - /// Returns the number of elements the binary heap can hold without reallocating. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::collections::BinaryHeap; - /// let mut heap = BinaryHeap::with_capacity(100); - /// assert!(heap.capacity() >= 100); - /// heap.push(4); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn capacity(&self) -> usize { - self.data.capacity() - } - - /// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the - /// given `BinaryHeap`. Does nothing if the capacity is already sufficient. - /// - /// Note that the allocator may give the collection more space than it requests. Therefore - /// capacity can not be relied upon to be precisely minimal. Prefer [`reserve`] if future - /// insertions are expected. - /// - /// # Panics - /// - /// Panics if the new capacity overflows `usize`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::collections::BinaryHeap; - /// let mut heap = BinaryHeap::new(); - /// heap.reserve_exact(100); - /// assert!(heap.capacity() >= 100); - /// heap.push(4); - /// ``` - /// - /// [`reserve`]: #method.reserve - #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve_exact(&mut self, additional: usize) { - self.data.reserve_exact(additional); - } - - /// Reserves capacity for at least `additional` more elements to be inserted in the - /// `BinaryHeap`. The collection may reserve more space to avoid frequent reallocations. - /// - /// # Panics - /// - /// Panics if the new capacity overflows `usize`. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::collections::BinaryHeap; - /// let mut heap = BinaryHeap::new(); - /// heap.reserve(100); - /// assert!(heap.capacity() >= 100); - /// heap.push(4); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve(&mut self, additional: usize) { - self.data.reserve(additional); - } - - /// Discards as much additional capacity as possible. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::collections::BinaryHeap; - /// let mut heap: BinaryHeap = BinaryHeap::with_capacity(100); - /// - /// assert!(heap.capacity() >= 100); - /// heap.shrink_to_fit(); - /// assert!(heap.capacity() == 0); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn shrink_to_fit(&mut self) { - self.data.shrink_to_fit(); - } - - /// Discards capacity with a lower bound. - /// - /// The capacity will remain at least as large as both the length - /// and the supplied value. - /// - /// Panics if the current capacity is smaller than the supplied - /// minimum capacity. - /// - /// # Examples - /// - /// ``` - /// #![feature(shrink_to)] - /// use std::collections::BinaryHeap; - /// let mut heap: BinaryHeap = BinaryHeap::with_capacity(100); - /// - /// assert!(heap.capacity() >= 100); - /// heap.shrink_to(10); - /// assert!(heap.capacity() >= 10); - /// ``` - #[inline] - #[unstable(feature = "shrink_to", reason = "new API", issue="56431")] - pub fn shrink_to(&mut self, min_capacity: usize) { - self.data.shrink_to(min_capacity) - } - /// Removes the greatest item from the binary heap and returns it, or `None` if it /// is empty. /// @@ -583,28 +421,6 @@ impl BinaryHeap { self.sift_up(0, old_len); } - /// Consumes the `BinaryHeap` and returns the underlying vector - /// in arbitrary order. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::collections::BinaryHeap; - /// let heap = BinaryHeap::from(vec![1, 2, 3, 4, 5, 6, 7]); - /// let vec = heap.into_vec(); - /// - /// // Will print in some order - /// for x in vec { - /// println!("{}", x); - /// } - /// ``` - #[stable(feature = "binary_heap_extras_15", since = "1.5.0")] - pub fn into_vec(self) -> Vec { - self.into() - } - /// Consumes the `BinaryHeap` and returns a vector in sorted /// (ascending) order. /// @@ -709,6 +525,247 @@ impl BinaryHeap { self.sift_up(start, pos); } + fn rebuild(&mut self) { + let mut n = self.len() / 2; + while n > 0 { + n -= 1; + self.sift_down(n); + } + } + + /// Moves all the elements of `other` into `self`, leaving `other` empty. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BinaryHeap; + /// + /// let v = vec![-10, 1, 2, 3, 3]; + /// let mut a = BinaryHeap::from(v); + /// + /// let v = vec![-20, 5, 43]; + /// let mut b = BinaryHeap::from(v); + /// + /// a.append(&mut b); + /// + /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]); + /// assert!(b.is_empty()); + /// ``` + #[stable(feature = "binary_heap_append", since = "1.11.0")] + pub fn append(&mut self, other: &mut Self) { + if self.len() < other.len() { + swap(self, other); + } + + if other.is_empty() { + return; + } + + #[inline(always)] + fn log2_fast(x: usize) -> usize { + 8 * size_of::() - (x.leading_zeros() as usize) - 1 + } + + // `rebuild` takes O(len1 + len2) operations + // and about 2 * (len1 + len2) comparisons in the worst case + // while `extend` takes O(len2 * log_2(len1)) operations + // and about 1 * len2 * log_2(len1) comparisons in the worst case, + // assuming len1 >= len2. + #[inline] + fn better_to_rebuild(len1: usize, len2: usize) -> bool { + 2 * (len1 + len2) < len2 * log2_fast(len1) + } + + if better_to_rebuild(self.len(), other.len()) { + self.data.append(&mut other.data); + self.rebuild(); + } else { + self.extend(other.drain()); + } + } +} + +impl BinaryHeap { + /// Returns an iterator visiting all values in the underlying vector, in + /// arbitrary order. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BinaryHeap; + /// let heap = BinaryHeap::from(vec![1, 2, 3, 4]); + /// + /// // Print 1, 2, 3, 4 in arbitrary order + /// for x in heap.iter() { + /// println!("{}", x); + /// } + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn iter(&self) -> Iter<'_, T> { + Iter { iter: self.data.iter() } + } + + /// Returns the greatest item in the binary heap, or `None` if it is empty. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BinaryHeap; + /// let mut heap = BinaryHeap::new(); + /// assert_eq!(heap.peek(), None); + /// + /// heap.push(1); + /// heap.push(5); + /// heap.push(2); + /// assert_eq!(heap.peek(), Some(&5)); + /// + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn peek(&self) -> Option<&T> { + self.data.get(0) + } + + /// Returns the number of elements the binary heap can hold without reallocating. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BinaryHeap; + /// let mut heap = BinaryHeap::with_capacity(100); + /// assert!(heap.capacity() >= 100); + /// heap.push(4); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn capacity(&self) -> usize { + self.data.capacity() + } + + /// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the + /// given `BinaryHeap`. Does nothing if the capacity is already sufficient. + /// + /// Note that the allocator may give the collection more space than it requests. Therefore + /// capacity can not be relied upon to be precisely minimal. Prefer [`reserve`] if future + /// insertions are expected. + /// + /// # Panics + /// + /// Panics if the new capacity overflows `usize`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BinaryHeap; + /// let mut heap = BinaryHeap::new(); + /// heap.reserve_exact(100); + /// assert!(heap.capacity() >= 100); + /// heap.push(4); + /// ``` + /// + /// [`reserve`]: #method.reserve + #[stable(feature = "rust1", since = "1.0.0")] + pub fn reserve_exact(&mut self, additional: usize) { + self.data.reserve_exact(additional); + } + + /// Reserves capacity for at least `additional` more elements to be inserted in the + /// `BinaryHeap`. The collection may reserve more space to avoid frequent reallocations. + /// + /// # Panics + /// + /// Panics if the new capacity overflows `usize`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BinaryHeap; + /// let mut heap = BinaryHeap::new(); + /// heap.reserve(100); + /// assert!(heap.capacity() >= 100); + /// heap.push(4); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn reserve(&mut self, additional: usize) { + self.data.reserve(additional); + } + + /// Discards as much additional capacity as possible. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BinaryHeap; + /// let mut heap: BinaryHeap = BinaryHeap::with_capacity(100); + /// + /// assert!(heap.capacity() >= 100); + /// heap.shrink_to_fit(); + /// assert!(heap.capacity() == 0); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + pub fn shrink_to_fit(&mut self) { + self.data.shrink_to_fit(); + } + + /// Discards capacity with a lower bound. + /// + /// The capacity will remain at least as large as both the length + /// and the supplied value. + /// + /// Panics if the current capacity is smaller than the supplied + /// minimum capacity. + /// + /// # Examples + /// + /// ``` + /// #![feature(shrink_to)] + /// use std::collections::BinaryHeap; + /// let mut heap: BinaryHeap = BinaryHeap::with_capacity(100); + /// + /// assert!(heap.capacity() >= 100); + /// heap.shrink_to(10); + /// assert!(heap.capacity() >= 10); + /// ``` + #[inline] + #[unstable(feature = "shrink_to", reason = "new API", issue="56431")] + pub fn shrink_to(&mut self, min_capacity: usize) { + self.data.shrink_to(min_capacity) + } + + /// Consumes the `BinaryHeap` and returns the underlying vector + /// in arbitrary order. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::collections::BinaryHeap; + /// let heap = BinaryHeap::from(vec![1, 2, 3, 4, 5, 6, 7]); + /// let vec = heap.into_vec(); + /// + /// // Will print in some order + /// for x in vec { + /// println!("{}", x); + /// } + /// ``` + #[stable(feature = "binary_heap_extras_15", since = "1.5.0")] + pub fn into_vec(self) -> Vec { + self.into() + } + /// Returns the length of the binary heap. /// /// # Examples @@ -771,7 +828,7 @@ impl BinaryHeap { /// ``` #[inline] #[stable(feature = "drain", since = "1.6.0")] - pub fn drain(&mut self) -> Drain { + pub fn drain(&mut self) -> Drain<'_, T> { Drain { iter: self.data.drain(..) } } @@ -795,67 +852,6 @@ impl BinaryHeap { pub fn clear(&mut self) { self.drain(); } - - fn rebuild(&mut self) { - let mut n = self.len() / 2; - while n > 0 { - n -= 1; - self.sift_down(n); - } - } - - /// Moves all the elements of `other` into `self`, leaving `other` empty. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::collections::BinaryHeap; - /// - /// let v = vec![-10, 1, 2, 3, 3]; - /// let mut a = BinaryHeap::from(v); - /// - /// let v = vec![-20, 5, 43]; - /// let mut b = BinaryHeap::from(v); - /// - /// a.append(&mut b); - /// - /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]); - /// assert!(b.is_empty()); - /// ``` - #[stable(feature = "binary_heap_append", since = "1.11.0")] - pub fn append(&mut self, other: &mut Self) { - if self.len() < other.len() { - swap(self, other); - } - - if other.is_empty() { - return; - } - - #[inline(always)] - fn log2_fast(x: usize) -> usize { - 8 * size_of::() - (x.leading_zeros() as usize) - 1 - } - - // `rebuild` takes O(len1 + len2) operations - // and about 2 * (len1 + len2) comparisons in the worst case - // while `extend` takes O(len2 * log_2(len1)) operations - // and about 1 * len2 * log_2(len1) comparisons in the worst case, - // assuming len1 >= len2. - #[inline] - fn better_to_rebuild(len1: usize, len2: usize) -> bool { - 2 * (len1 + len2) < len2 * log2_fast(len1) - } - - if better_to_rebuild(self.len(), other.len()) { - self.data.append(&mut other.data); - self.rebuild(); - } else { - self.extend(other.drain()); - } - } } /// Hole represents a hole in a slice i.e., an index without valid value @@ -869,13 +865,14 @@ struct Hole<'a, T: 'a> { } impl<'a, T> Hole<'a, T> { - /// Create a new Hole at index `pos`. + /// Create a new `Hole` at index `pos`. /// /// Unsafe because pos must be within the data slice. #[inline] unsafe fn new(data: &'a mut [T], pos: usize) -> Self { debug_assert!(pos < data.len()); - let elt = ptr::read(&data[pos]); + // SAFE: pos should be inside the slice + let elt = ptr::read(data.get_unchecked(pos)); Hole { data, elt: ManuallyDrop::new(elt), @@ -918,7 +915,7 @@ impl<'a, T> Hole<'a, T> { } } -impl<'a, T> Drop for Hole<'a, T> { +impl Drop for Hole<'_, T> { #[inline] fn drop(&mut self) { // fill the hole again @@ -942,8 +939,8 @@ pub struct Iter<'a, T: 'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for Iter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("Iter") .field(&self.iter.as_slice()) .finish() @@ -952,8 +949,8 @@ impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> { // FIXME(#26925) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> Clone for Iter<'a, T> { - fn clone(&self) -> Iter<'a, T> { +impl Clone for Iter<'_, T> { + fn clone(&self) -> Self { Iter { iter: self.iter.clone() } } } @@ -982,14 +979,14 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> ExactSizeIterator for Iter<'a, T> { +impl ExactSizeIterator for Iter<'_, T> { fn is_empty(&self) -> bool { self.iter.is_empty() } } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, T> FusedIterator for Iter<'a, T> {} +impl FusedIterator for Iter<'_, T> {} /// An owning iterator over the elements of a `BinaryHeap`. /// @@ -1006,7 +1003,7 @@ pub struct IntoIter { #[stable(feature = "collection_debug", since = "1.17.0")] impl fmt::Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("IntoIter") .field(&self.iter.as_slice()) .finish() @@ -1060,7 +1057,7 @@ pub struct Drain<'a, T: 'a> { } #[stable(feature = "drain", since = "1.6.0")] -impl<'a, T: 'a> Iterator for Drain<'a, T> { +impl Iterator for Drain<'_, T> { type Item = T; #[inline] @@ -1075,7 +1072,7 @@ impl<'a, T: 'a> Iterator for Drain<'a, T> { } #[stable(feature = "drain", since = "1.6.0")] -impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { +impl DoubleEndedIterator for Drain<'_, T> { #[inline] fn next_back(&mut self) -> Option { self.iter.next_back() @@ -1083,14 +1080,14 @@ impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { } #[stable(feature = "drain", since = "1.6.0")] -impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> { +impl ExactSizeIterator for Drain<'_, T> { fn is_empty(&self) -> bool { self.iter.is_empty() } } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, T: 'a> FusedIterator for Drain<'a, T> {} +impl FusedIterator for Drain<'_, T> {} #[stable(feature = "binary_heap_extras_15", since = "1.5.0")] impl From> for BinaryHeap { @@ -1116,7 +1113,7 @@ impl FromIterator for BinaryHeap { } #[stable(feature = "rust1", since = "1.0.0")] -impl IntoIterator for BinaryHeap { +impl IntoIterator for BinaryHeap { type Item = T; type IntoIter = IntoIter; @@ -1144,9 +1141,7 @@ impl IntoIterator for BinaryHeap { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> IntoIterator for &'a BinaryHeap - where T: Ord -{ +impl<'a, T> IntoIterator for &'a BinaryHeap { type Item = &'a T; type IntoIter = Iter<'a, T>; @@ -1182,9 +1177,7 @@ impl BinaryHeap { self.reserve(lower); - for elem in iterator { - self.push(elem); - } + iterator.for_each(move |elem| self.push(elem)); } } diff --git a/src/liballoc/collections/btree/map.rs b/src/liballoc/collections/btree/map.rs index 49e488702b4b9..6b079fc87cc78 100644 --- a/src/liballoc/collections/btree/map.rs +++ b/src/liballoc/collections/btree/map.rs @@ -1,33 +1,18 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - +use core::borrow::Borrow; use core::cmp::Ordering; use core::fmt::Debug; use core::hash::{Hash, Hasher}; use core::iter::{FromIterator, Peekable, FusedIterator}; use core::marker::PhantomData; use core::ops::Bound::{Excluded, Included, Unbounded}; -use core::ops::Index; -use core::ops::RangeBounds; +use core::ops::{Index, RangeBounds}; use core::{fmt, intrinsics, mem, ptr}; -use borrow::Borrow; - -use super::node::{self, Handle, NodeRef, marker}; -use super::search; +use super::node::{self, Handle, NodeRef, marker, InsertResult::*, ForceResult::*}; +use super::search::{self, SearchResult::*}; -use super::node::InsertResult::*; -use super::node::ForceResult::*; -use super::search::SearchResult::*; -use self::UnderflowResult::*; -use self::Entry::*; +use UnderflowResult::*; +use Entry::*; /// A map based on a B-Tree. /// @@ -97,6 +82,9 @@ use self::Entry::*; /// } /// } /// +/// // Look up the value for a key (will panic if the key is not found). +/// println!("Movie review: {}", movie_reviews["Office Space"]); +/// /// // iterate over everything. /// for (movie, review) in &movie_reviews { /// println!("{}: \"{}\"", movie, review); @@ -255,7 +243,7 @@ impl super::Recover for BTreeMap fn replace(&mut self, key: K) -> Option { self.ensure_root_is_owned(); - match search::search_tree::(self.root.as_mut(), &key) { + match search::search_tree::, K, (), K>(self.root.as_mut(), &key) { Found(handle) => Some(mem::replace(handle.into_kv_mut().0, key)), GoDown(handle) => { VacantEntry { @@ -285,8 +273,8 @@ pub struct Iter<'a, K: 'a, V: 'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, K: 'a + fmt::Debug, V: 'a + fmt::Debug> fmt::Debug for Iter<'a, K, V> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for Iter<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } @@ -321,7 +309,7 @@ pub struct IntoIter { #[stable(feature = "collection_debug", since = "1.17.0")] impl fmt::Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let range = Range { front: self.front.reborrow(), back: self.back.reborrow(), @@ -343,8 +331,8 @@ pub struct Keys<'a, K: 'a, V: 'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, K: 'a + fmt::Debug, V: 'a> fmt::Debug for Keys<'a, K, V> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for Keys<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } @@ -362,8 +350,8 @@ pub struct Values<'a, K: 'a, V: 'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, K: 'a, V: 'a + fmt::Debug> fmt::Debug for Values<'a, K, V> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for Values<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } @@ -395,8 +383,8 @@ pub struct Range<'a, K: 'a, V: 'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, K: 'a + fmt::Debug, V: 'a + fmt::Debug> fmt::Debug for Range<'a, K, V> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for Range<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } @@ -418,8 +406,8 @@ pub struct RangeMut<'a, K: 'a, V: 'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, K: 'a + fmt::Debug, V: 'a + fmt::Debug> fmt::Debug for RangeMut<'a, K, V> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for RangeMut<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let range = Range { front: self.front.reborrow(), back: self.back.reborrow(), @@ -448,8 +436,8 @@ pub enum Entry<'a, K: 'a, V: 'a> { } #[stable(feature= "debug_btree_map", since = "1.12.0")] -impl<'a, K: 'a + Debug + Ord, V: 'a + Debug> Debug for Entry<'a, K, V> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl Debug for Entry<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Vacant(ref v) => f.debug_tuple("Entry") .field(v) @@ -476,8 +464,8 @@ pub struct VacantEntry<'a, K: 'a, V: 'a> { } #[stable(feature= "debug_btree_map", since = "1.12.0")] -impl<'a, K: 'a + Debug + Ord, V: 'a> Debug for VacantEntry<'a, K, V> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl Debug for VacantEntry<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("VacantEntry") .field(self.key()) .finish() @@ -499,8 +487,8 @@ pub struct OccupiedEntry<'a, K: 'a, V: 'a> { } #[stable(feature= "debug_btree_map", since = "1.12.0")] -impl<'a, K: 'a + Debug + Ord, V: 'a + Debug> Debug for OccupiedEntry<'a, K, V> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl Debug for OccupiedEntry<'_, K, V> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("key", self.key()) .field("value", self.get()) @@ -824,7 +812,7 @@ impl BTreeMap { /// assert_eq!(Some((&5, &"b")), map.range(4..).next()); /// ``` #[stable(feature = "btree_range", since = "1.17.0")] - pub fn range(&self, range: R) -> Range + pub fn range(&self, range: R) -> Range<'_, K, V> where T: Ord, K: Borrow, R: RangeBounds { let root1 = self.root.as_ref(); @@ -865,7 +853,7 @@ impl BTreeMap { /// } /// ``` #[stable(feature = "btree_range", since = "1.17.0")] - pub fn range_mut(&mut self, range: R) -> RangeMut + pub fn range_mut(&mut self, range: R) -> RangeMut<'_, K, V> where T: Ord, K: Borrow, R: RangeBounds { let root1 = self.root.as_mut(); @@ -898,7 +886,7 @@ impl BTreeMap { /// assert_eq!(count["a"], 3); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn entry(&mut self, key: K) -> Entry { + pub fn entry(&mut self, key: K) -> Entry<'_, K, V> { // FIXME(@porglezomp) Avoid allocating if we don't insert self.ensure_root_is_owned(); match search::search_tree(self.root.as_mut(), &key) { @@ -1208,7 +1196,7 @@ impl<'a, K: 'a, V: 'a> Iterator for Iter<'a, K, V> { } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, K, V> FusedIterator for Iter<'a, K, V> {} +impl FusedIterator for Iter<'_, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, K: 'a, V: 'a> DoubleEndedIterator for Iter<'a, K, V> { @@ -1223,15 +1211,15 @@ impl<'a, K: 'a, V: 'a> DoubleEndedIterator for Iter<'a, K, V> { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, K: 'a, V: 'a> ExactSizeIterator for Iter<'a, K, V> { +impl ExactSizeIterator for Iter<'_, K, V> { fn len(&self) -> usize { self.length } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, K, V> Clone for Iter<'a, K, V> { - fn clone(&self) -> Iter<'a, K, V> { +impl Clone for Iter<'_, K, V> { + fn clone(&self) -> Self { Iter { range: self.range.clone(), length: self.length, @@ -1280,14 +1268,14 @@ impl<'a, K: 'a, V: 'a> DoubleEndedIterator for IterMut<'a, K, V> { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, K: 'a, V: 'a> ExactSizeIterator for IterMut<'a, K, V> { +impl ExactSizeIterator for IterMut<'_, K, V> { fn len(&self) -> usize { self.length } } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, K, V> FusedIterator for IterMut<'a, K, V> {} +impl FusedIterator for IterMut<'_, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] impl IntoIterator for BTreeMap { @@ -1443,18 +1431,18 @@ impl<'a, K, V> DoubleEndedIterator for Keys<'a, K, V> { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> { +impl ExactSizeIterator for Keys<'_, K, V> { fn len(&self) -> usize { self.inner.len() } } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, K, V> FusedIterator for Keys<'a, K, V> {} +impl FusedIterator for Keys<'_, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, K, V> Clone for Keys<'a, K, V> { - fn clone(&self) -> Keys<'a, K, V> { +impl Clone for Keys<'_, K, V> { + fn clone(&self) -> Self { Keys { inner: self.inner.clone() } } } @@ -1480,18 +1468,18 @@ impl<'a, K, V> DoubleEndedIterator for Values<'a, K, V> { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> { +impl ExactSizeIterator for Values<'_, K, V> { fn len(&self) -> usize { self.inner.len() } } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, K, V> FusedIterator for Values<'a, K, V> {} +impl FusedIterator for Values<'_, K, V> {} #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, K, V> Clone for Values<'a, K, V> { - fn clone(&self) -> Values<'a, K, V> { +impl Clone for Values<'_, K, V> { + fn clone(&self) -> Self { Values { inner: self.inner.clone() } } } @@ -1530,15 +1518,14 @@ impl<'a, K, V> DoubleEndedIterator for ValuesMut<'a, K, V> { } #[stable(feature = "map_values_mut", since = "1.10.0")] -impl<'a, K, V> ExactSizeIterator for ValuesMut<'a, K, V> { +impl ExactSizeIterator for ValuesMut<'_, K, V> { fn len(&self) -> usize { self.inner.len() } } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, K, V> FusedIterator for ValuesMut<'a, K, V> {} - +impl FusedIterator for ValuesMut<'_, K, V> {} impl<'a, K, V> Range<'a, K, V> { unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) { @@ -1616,11 +1603,11 @@ impl<'a, K, V> Range<'a, K, V> { } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, K, V> FusedIterator for Range<'a, K, V> {} +impl FusedIterator for Range<'_, K, V> {} #[stable(feature = "btree_range", since = "1.17.0")] -impl<'a, K, V> Clone for Range<'a, K, V> { - fn clone(&self) -> Range<'a, K, V> { +impl Clone for Range<'_, K, V> { + fn clone(&self) -> Self { Range { front: self.front, back: self.back, @@ -1647,9 +1634,11 @@ impl<'a, K, V> RangeMut<'a, K, V> { let mut cur_handle = match handle.right_kv() { Ok(kv) => { - let (k, v) = ptr::read(&kv).into_kv_mut(); - self.front = kv.right_edge(); - return (k, v); + self.front = ptr::read(&kv).right_edge(); + // Doing the descend invalidates the references returned by `into_kv_mut`, + // so we have to do this last. + let (k, v) = kv.into_kv_mut(); + return (k, v); // coerce k from `&mut K` to `&K` } Err(last_edge) => { let next_level = last_edge.into_node().ascend().ok(); @@ -1660,9 +1649,11 @@ impl<'a, K, V> RangeMut<'a, K, V> { loop { match cur_handle.right_kv() { Ok(kv) => { - let (k, v) = ptr::read(&kv).into_kv_mut(); - self.front = first_leaf_edge(kv.right_edge().descend()); - return (k, v); + self.front = first_leaf_edge(ptr::read(&kv).right_edge().descend()); + // Doing the descend invalidates the references returned by `into_kv_mut`, + // so we have to do this last. + let (k, v) = kv.into_kv_mut(); + return (k, v); // coerce k from `&mut K` to `&K` } Err(last_edge) => { let next_level = last_edge.into_node().ascend().ok(); @@ -1685,7 +1676,7 @@ impl<'a, K, V> DoubleEndedIterator for RangeMut<'a, K, V> { } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, K, V> FusedIterator for RangeMut<'a, K, V> {} +impl FusedIterator for RangeMut<'_, K, V> {} impl<'a, K, V> RangeMut<'a, K, V> { unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a mut V) { @@ -1693,9 +1684,11 @@ impl<'a, K, V> RangeMut<'a, K, V> { let mut cur_handle = match handle.left_kv() { Ok(kv) => { - let (k, v) = ptr::read(&kv).into_kv_mut(); - self.back = kv.left_edge(); - return (k, v); + self.back = ptr::read(&kv).left_edge(); + // Doing the descend invalidates the references returned by `into_kv_mut`, + // so we have to do this last. + let (k, v) = kv.into_kv_mut(); + return (k, v); // coerce k from `&mut K` to `&K` } Err(last_edge) => { let next_level = last_edge.into_node().ascend().ok(); @@ -1706,9 +1699,11 @@ impl<'a, K, V> RangeMut<'a, K, V> { loop { match cur_handle.left_kv() { Ok(kv) => { - let (k, v) = ptr::read(&kv).into_kv_mut(); - self.back = last_leaf_edge(kv.left_edge().descend()); - return (k, v); + self.back = last_leaf_edge(ptr::read(&kv).left_edge().descend()); + // Doing the descend invalidates the references returned by `into_kv_mut`, + // so we have to do this last. + let (k, v) = kv.into_kv_mut(); + return (k, v); // coerce k from `&mut K` to `&K` } Err(last_edge) => { let next_level = last_edge.into_node().ascend().ok(); @@ -1732,9 +1727,9 @@ impl FromIterator<(K, V)> for BTreeMap { impl Extend<(K, V)> for BTreeMap { #[inline] fn extend>(&mut self, iter: T) { - for (k, v) in iter { + iter.into_iter().for_each(move |(k, v)| { self.insert(k, v); - } + }); } } @@ -1790,13 +1785,13 @@ impl Ord for BTreeMap { #[stable(feature = "rust1", since = "1.0.0")] impl Debug for BTreeMap { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, K: Ord, Q: ?Sized, V> Index<&'a Q> for BTreeMap +impl Index<&Q> for BTreeMap where K: Borrow, Q: Ord { @@ -1947,7 +1942,7 @@ impl BTreeMap { /// assert_eq!((*first_key, *first_value), (1, "a")); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn iter(&self) -> Iter { + pub fn iter(&self) -> Iter<'_, K, V> { Iter { range: Range { front: first_leaf_edge(self.root.as_ref()), @@ -1979,7 +1974,7 @@ impl BTreeMap { /// } /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn iter_mut(&mut self) -> IterMut { + pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { let root1 = self.root.as_mut(); let root2 = unsafe { ptr::read(&root1) }; IterMut { @@ -2056,7 +2051,7 @@ impl BTreeMap { /// String::from("goodbye!")]); /// ``` #[stable(feature = "map_values_mut", since = "1.10.0")] - pub fn values_mut(&mut self) -> ValuesMut { + pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> { ValuesMut { inner: self.iter_mut() } } @@ -2381,7 +2376,7 @@ impl<'a, K: Ord, V> OccupiedEntry<'a, K, V> { /// Gets a mutable reference to the value in the entry. /// - /// If you need a reference to the `OccupiedEntry` which may outlive the + /// If you need a reference to the `OccupiedEntry` that may outlive the /// destruction of the `Entry` value, see [`into_mut`]. /// /// [`into_mut`]: #method.into_mut diff --git a/src/liballoc/collections/btree/mod.rs b/src/liballoc/collections/btree/mod.rs index 087c9f228d444..8b7dc07063b62 100644 --- a/src/liballoc/collections/btree/mod.rs +++ b/src/liballoc/collections/btree/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod node; mod search; pub mod map; diff --git a/src/liballoc/collections/btree/node.rs b/src/liballoc/collections/btree/node.rs index a2d2d3c74be9d..581c66c7086a5 100644 --- a/src/liballoc/collections/btree/node.rs +++ b/src/liballoc/collections/btree/node.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // This is an attempt at an implementation following the ideal // // ``` @@ -46,8 +36,8 @@ use core::mem::{self, MaybeUninit}; use core::ptr::{self, Unique, NonNull}; use core::slice; -use alloc::{Global, Alloc, Layout}; -use boxed::Box; +use crate::alloc::{Global, Alloc, Layout}; +use crate::boxed::Box; const B: usize = 6; pub const MIN_LEN: usize = B - 1; @@ -60,11 +50,11 @@ pub const CAPACITY: usize = 2 * B - 1; /// /// We have a separate type for the header and rely on it matching the prefix of `LeafNode`, in /// order to statically allocate a single dummy node to avoid allocations. This struct is -/// `repr(C)` to prevent them from being reordered. `LeafNode` does not just contain a +/// `repr(C)` to prevent them from being reordered. `LeafNode` does not just contain a /// `NodeHeader` because we do not want unnecessary padding between `len` and the keys. -/// Crucially, `NodeHeader` can be safely transmuted to different K and V. (This is exploited +/// Crucially, `NodeHeader` can be safely transmuted to different K and V. (This is exploited /// by `as_header`.) -/// See `into_key_slice` for an explanation of K2. K2 cannot be safely transmuted around +/// See `into_key_slice` for an explanation of K2. K2 cannot be safely transmuted around /// because the size of `NodeHeader` depends on its alignment! #[repr(C)] struct NodeHeader { @@ -105,8 +95,8 @@ struct LeafNode { /// The arrays storing the actual data of the node. Only the first `len` elements of each /// array are initialized and valid. - keys: MaybeUninit<[K; CAPACITY]>, - vals: MaybeUninit<[V; CAPACITY]>, + keys: [MaybeUninit; CAPACITY], + vals: [MaybeUninit; CAPACITY], } impl LeafNode { @@ -116,10 +106,10 @@ impl LeafNode { LeafNode { // As a general policy, we leave fields uninitialized if they can be, as this should // be both slightly faster and easier to track in Valgrind. - keys: MaybeUninit::uninitialized(), - vals: MaybeUninit::uninitialized(), + keys: uninitialized_array![_; CAPACITY], + vals: uninitialized_array![_; CAPACITY], parent: ptr::null(), - parent_idx: MaybeUninit::uninitialized(), + parent_idx: MaybeUninit::uninit(), len: 0 } } @@ -139,7 +129,7 @@ unsafe impl Sync for NodeHeader<(), ()> {} // ever take a pointer past the first key. static EMPTY_ROOT_NODE: NodeHeader<(), ()> = NodeHeader { parent: ptr::null(), - parent_idx: MaybeUninit::uninitialized(), + parent_idx: MaybeUninit::uninit(), len: 0, keys_start: [], }; @@ -155,7 +145,7 @@ struct InternalNode { /// The pointers to the children of this node. `len + 1` of these are considered /// initialized and valid. - edges: [BoxedNode; 2 * B], + edges: [MaybeUninit>; 2 * B], } impl InternalNode { @@ -169,7 +159,7 @@ impl InternalNode { unsafe fn new() -> Self { InternalNode { data: LeafNode::new(), - edges: mem::uninitialized() + edges: uninitialized_array![_; 2*B], } } } @@ -236,7 +226,7 @@ impl Root { } pub fn as_ref(&self) - -> NodeRef { + -> NodeRef, K, V, marker::LeafOrInternal> { NodeRef { height: self.height, node: self.node.as_ptr(), @@ -246,7 +236,7 @@ impl Root { } pub fn as_mut(&mut self) - -> NodeRef { + -> NodeRef, K, V, marker::LeafOrInternal> { NodeRef { height: self.height, node: self.node.as_ptr(), @@ -268,10 +258,10 @@ impl Root { /// Adds a new internal node with a single edge, pointing to the previous root, and make that /// new node the root. This increases the height by 1 and is the opposite of `pop_level`. pub fn push_level(&mut self) - -> NodeRef { + -> NodeRef, K, V, marker::Internal> { debug_assert!(!self.is_shared_root()); let mut new_node = Box::new(unsafe { InternalNode::new() }); - new_node.edges[0] = unsafe { BoxedNode::from_ptr(self.node.as_ptr()) }; + new_node.edges[0].write(unsafe { BoxedNode::from_ptr(self.node.as_ptr()) }); self.node = BoxedNode::from_internal(new_node); self.height += 1; @@ -463,7 +453,7 @@ impl NodeRef { root: self.root, _marker: PhantomData }, - idx: unsafe { usize::from(*self.as_header().parent_idx.get_ref()) }, + idx: unsafe { usize::from(*self.as_header().parent_idx.as_ptr()) }, _marker: PhantomData }) } else { @@ -541,7 +531,7 @@ impl<'a, K, V, Type> NodeRef, K, V, Type> { /// Unsafely asserts to the compiler some static information about whether this /// node is a `Leaf`. unsafe fn cast_unchecked(&mut self) - -> NodeRef { + -> NodeRef, K, V, NewType> { NodeRef { height: self.height, @@ -561,7 +551,7 @@ impl<'a, K, V, Type> NodeRef, K, V, Type> { /// of a reborrowed handle, out of bounds. // FIXME(@gereeter) consider adding yet another type parameter to `NodeRef` that restricts // the use of `ascend` and `into_root_mut` on reborrowed pointers, preventing this unsafety. - unsafe fn reborrow_mut(&mut self) -> NodeRef { + unsafe fn reborrow_mut(&mut self) -> NodeRef, K, V, Type> { NodeRef { height: self.height, node: self.node, @@ -633,7 +623,7 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { // We cannot be the root, so `as_leaf` is okay unsafe { slice::from_raw_parts( - self.as_leaf().vals.as_ptr() as *const V, + MaybeUninit::first_ptr(&self.as_leaf().vals), self.len() ) } @@ -655,12 +645,14 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { } fn into_key_slice_mut(mut self) -> &'a mut [K] { + // Same as for `into_key_slice` above, we try to avoid a run-time check + // (the alignment comparison will usually be performed at compile-time). if mem::align_of::() > mem::align_of::>() && self.is_shared_root() { &mut [] } else { unsafe { slice::from_raw_parts_mut( - (*self.as_leaf_mut()).keys.as_mut_ptr() as *mut K, + MaybeUninit::first_ptr_mut(&mut (*self.as_leaf_mut()).keys), self.len() ) } @@ -671,15 +663,32 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { debug_assert!(!self.is_shared_root()); unsafe { slice::from_raw_parts_mut( - (*self.as_leaf_mut()).vals.as_mut_ptr() as *mut V, + MaybeUninit::first_ptr_mut(&mut (*self.as_leaf_mut()).vals), self.len() ) } } - fn into_slices_mut(self) -> (&'a mut [K], &'a mut [V]) { - let k = unsafe { ptr::read(&self) }; - (k.into_key_slice_mut(), self.into_val_slice_mut()) + fn into_slices_mut(mut self) -> (&'a mut [K], &'a mut [V]) { + debug_assert!(!self.is_shared_root()); + // We cannot use the getters here, because calling the second one + // invalidates the reference returned by the first. + // More precisely, it is the call to `len` that is the culprit, + // because that creates a shared reference to the header, which *can* + // overlap with the keys (and even the values, for ZST keys). + unsafe { + let len = self.len(); + let leaf = self.as_leaf_mut(); + let keys = slice::from_raw_parts_mut( + MaybeUninit::first_ptr_mut(&mut (*leaf).keys), + len + ); + let vals = slice::from_raw_parts_mut( + MaybeUninit::first_ptr_mut(&mut (*leaf).vals), + len + ); + (keys, vals) + } } } @@ -728,7 +737,7 @@ impl<'a, K, V> NodeRef, K, V, marker::Internal> { unsafe { ptr::write(self.keys_mut().get_unchecked_mut(idx), key); ptr::write(self.vals_mut().get_unchecked_mut(idx), val); - ptr::write(self.as_internal_mut().edges.get_unchecked_mut(idx + 1), edge.node); + self.as_internal_mut().edges.get_unchecked_mut(idx + 1).write(edge.node); (*self.as_leaf_mut()).len += 1; @@ -759,7 +768,7 @@ impl<'a, K, V> NodeRef, K, V, marker::Internal> { slice_insert(self.vals_mut(), 0, val); slice_insert( slice::from_raw_parts_mut( - self.as_internal_mut().edges.as_mut_ptr(), + MaybeUninit::first_ptr_mut(&mut self.as_internal_mut().edges), self.len()+1 ), 0, @@ -788,7 +797,9 @@ impl<'a, K, V> NodeRef, K, V, marker::LeafOrInternal> { let edge = match self.reborrow_mut().force() { ForceResult::Leaf(_) => None, ForceResult::Internal(internal) => { - let edge = ptr::read(internal.as_internal().edges.get_unchecked(idx + 1)); + let edge = ptr::read( + internal.as_internal().edges.get_unchecked(idx + 1).as_ptr() + ); let mut new_root = Root { node: edge, height: internal.height - 1 }; (*new_root.as_mut().as_leaf_mut()).parent = ptr::null(); Some(new_root) @@ -816,7 +827,7 @@ impl<'a, K, V> NodeRef, K, V, marker::LeafOrInternal> { ForceResult::Internal(mut internal) => { let edge = slice_remove( slice::from_raw_parts_mut( - internal.as_internal_mut().edges.as_mut_ptr(), + MaybeUninit::first_ptr_mut(&mut internal.as_internal_mut().edges), old_len+1 ), 0 @@ -936,7 +947,7 @@ impl /// Temporarily takes out another, immutable handle on the same location. pub fn reborrow(&self) - -> Handle, HandleType> { + -> Handle, K, V, NodeType>, HandleType> { // We can't use Handle::new_kv or Handle::new_edge because we don't know our type Handle { @@ -961,7 +972,7 @@ impl<'a, K, V, NodeType, HandleType> // FIXME(@gereeter) consider adding yet another type parameter to `NodeRef` that restricts // the use of `ascend` and `into_root_mut` on reborrowed pointers, preventing this unsafety. pub unsafe fn reborrow_mut(&mut self) - -> Handle, HandleType> { + -> Handle, K, V, NodeType>, HandleType> { // We can't use Handle::new_kv or Handle::new_edge because we don't know our type Handle { @@ -1069,14 +1080,14 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: let mut child = self.descend(); unsafe { (*child.as_leaf_mut()).parent = ptr; - (*child.as_leaf_mut()).parent_idx.set(idx); + (*child.as_leaf_mut()).parent_idx.write(idx); } } /// Unsafely asserts to the compiler some static information about whether the underlying /// node of this handle is a `Leaf`. unsafe fn cast_unchecked(&mut self) - -> Handle, marker::Edge> { + -> Handle, K, V, NewType>, marker::Edge> { Handle::new_edge(self.node.cast_unchecked(), self.idx) } @@ -1095,7 +1106,7 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: slice_insert( slice::from_raw_parts_mut( - self.node.as_internal_mut().edges.as_mut_ptr(), + MaybeUninit::first_ptr_mut(&mut self.node.as_internal_mut().edges), self.node.len() ), self.idx + 1, @@ -1150,7 +1161,9 @@ impl pub fn descend(self) -> NodeRef { NodeRef { height: self.node.height - 1, - node: unsafe { self.node.as_internal().edges.get_unchecked(self.idx).as_ptr() }, + node: unsafe { + (&*self.node.as_internal().edges.get_unchecked(self.idx).as_ptr()).as_ptr() + }, root: self.node.root, _marker: PhantomData } @@ -1301,7 +1314,7 @@ impl<'a, K, V> Handle, K, V, marker::Internal>, marker:: } } - /// Returns whether it is valid to call `.merge()`, i.e., whether there is enough room in + /// Returns `true` if it is valid to call `.merge()`, i.e., whether there is enough room in /// a node to hold the combination of the nodes to the left and right of this handle along /// with the key/value pair at this handle. pub fn can_merge(&self) -> bool { @@ -1564,8 +1577,8 @@ unsafe fn move_kv( // Source and destination must have the same height. unsafe fn move_edges( - mut source: NodeRef, source_offset: usize, - mut dest: NodeRef, dest_offset: usize, + mut source: NodeRef, K, V, marker::Internal>, source_offset: usize, + mut dest: NodeRef, K, V, marker::Internal>, dest_offset: usize, count: usize) { let source_ptr = source.as_internal_mut().edges.as_mut_ptr(); @@ -1579,7 +1592,7 @@ unsafe fn move_edges( impl Handle, HandleType> { - /// Check whether the underlying node is an `Internal` node or a `Leaf` node. + /// Checks whether the underlying node is an `Internal` node or a `Leaf` node. pub fn force(self) -> ForceResult< Handle, HandleType>, Handle, HandleType> diff --git a/src/liballoc/collections/btree/search.rs b/src/liballoc/collections/btree/search.rs index bc1272fbc786e..dfb67d2ea5756 100644 --- a/src/liballoc/collections/btree/search.rs +++ b/src/liballoc/collections/btree/search.rs @@ -1,21 +1,9 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - +use core::borrow::Borrow; use core::cmp::Ordering; -use borrow::Borrow; - -use super::node::{Handle, NodeRef, marker}; +use super::node::{Handle, NodeRef, marker, ForceResult::*}; -use super::node::ForceResult::*; -use self::SearchResult::*; +use SearchResult::*; pub enum SearchResult { Found(Handle, marker::KV>), diff --git a/src/liballoc/collections/btree/set.rs b/src/liballoc/collections/btree/set.rs index fa74dce2f1f4a..16a96ca19b824 100644 --- a/src/liballoc/collections/btree/set.rs +++ b/src/liballoc/collections/btree/set.rs @@ -1,25 +1,14 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // This is pretty much entirely stolen from TreeSet, since BTreeMap has an identical interface // to TreeMap +use core::borrow::Borrow; use core::cmp::Ordering::{self, Less, Greater, Equal}; -use core::cmp::{min, max}; -use core::fmt::Debug; -use core::fmt; +use core::cmp::max; +use core::fmt::{self, Debug}; use core::iter::{Peekable, FromIterator, FusedIterator}; use core::ops::{BitOr, BitAnd, BitXor, Sub, RangeBounds}; -use borrow::Borrow; -use collections::btree_map::{self, BTreeMap, Keys}; +use crate::collections::btree_map::{self, BTreeMap, Keys}; use super::Recover; // FIXME(conventions): implement bounded iterators @@ -86,8 +75,8 @@ pub struct Iter<'a, T: 'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for Iter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("Iter") .field(&self.iter.clone()) .finish() @@ -129,17 +118,36 @@ pub struct Range<'a, T: 'a> { /// [`difference`]: struct.BTreeSet.html#method.difference #[stable(feature = "rust1", since = "1.0.0")] pub struct Difference<'a, T: 'a> { - a: Peekable>, - b: Peekable>, + inner: DifferenceInner<'a, T>, +} +enum DifferenceInner<'a, T: 'a> { + Stitch { + self_iter: Iter<'a, T>, + other_iter: Peekable>, + }, + Search { + self_iter: Iter<'a, T>, + other_set: &'a BTreeSet, + }, } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for Difference<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("Difference") - .field(&self.a) - .field(&self.b) - .finish() +impl fmt::Debug for Difference<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.inner { + DifferenceInner::Stitch { + self_iter, + other_iter, + } => f + .debug_tuple("Difference") + .field(&self_iter) + .field(&other_iter) + .finish(), + DifferenceInner::Search { + self_iter, + other_set: _, + } => f.debug_tuple("Difference").field(&self_iter).finish(), + } } } @@ -157,8 +165,8 @@ pub struct SymmetricDifference<'a, T: 'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for SymmetricDifference<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for SymmetricDifference<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("SymmetricDifference") .field(&self.a) .field(&self.b) @@ -175,17 +183,36 @@ impl<'a, T: 'a + fmt::Debug> fmt::Debug for SymmetricDifference<'a, T> { /// [`intersection`]: struct.BTreeSet.html#method.intersection #[stable(feature = "rust1", since = "1.0.0")] pub struct Intersection<'a, T: 'a> { - a: Peekable>, - b: Peekable>, + inner: IntersectionInner<'a, T>, +} +enum IntersectionInner<'a, T: 'a> { + Stitch { + small_iter: Iter<'a, T>, // for size_hint, should be the smaller of the sets + other_iter: Iter<'a, T>, + }, + Search { + small_iter: Iter<'a, T>, + large_set: &'a BTreeSet, + }, } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for Intersection<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("Intersection") - .field(&self.a) - .field(&self.b) - .finish() +impl fmt::Debug for Intersection<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.inner { + IntersectionInner::Stitch { + small_iter, + other_iter, + } => f + .debug_tuple("Intersection") + .field(&small_iter) + .field(&other_iter) + .finish(), + IntersectionInner::Search { + small_iter, + large_set: _, + } => f.debug_tuple("Intersection").field(&small_iter).finish(), + } } } @@ -203,8 +230,8 @@ pub struct Union<'a, T: 'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for Union<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for Union<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("Union") .field(&self.a) .field(&self.b) @@ -212,6 +239,14 @@ impl<'a, T: 'a + fmt::Debug> fmt::Debug for Union<'a, T> { } } +// This constant is used by functions that compare two sets. +// It estimates the relative size at which searching performs better +// than iterating, based on the benchmarks in +// https://github.com/ssomers/rust_bench_btreeset_intersection; +// It's used to divide rather than multiply sizes, to rule out overflow, +// and it's a power of two to make that division cheap. +const ITER_PERFORMANCE_TIPPING_SIZE_DIFF: usize = 16; + impl BTreeSet { /// Makes a new `BTreeSet` with a reasonable choice of B. /// @@ -251,7 +286,7 @@ impl BTreeSet { /// assert_eq!(Some(&5), set.range(4..).next()); /// ``` #[stable(feature = "btree_range", since = "1.17.0")] - pub fn range(&self, range: R) -> Range + pub fn range(&self, range: R) -> Range<'_, T> where K: Ord, T: Borrow, R: RangeBounds { Range { iter: self.map.range(range) } @@ -279,9 +314,24 @@ impl BTreeSet { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn difference<'a>(&'a self, other: &'a BTreeSet) -> Difference<'a, T> { - Difference { - a: self.iter().peekable(), - b: other.iter().peekable(), + if self.len() > other.len() / ITER_PERFORMANCE_TIPPING_SIZE_DIFF { + // Self is bigger than or not much smaller than other set. + // Iterate both sets jointly, spotting matches along the way. + Difference { + inner: DifferenceInner::Stitch { + self_iter: self.iter(), + other_iter: other.iter().peekable(), + }, + } + } else { + // Self is much smaller than other set, or both sets are empty. + // Iterate the small set, searching for matches in the large set. + Difference { + inner: DifferenceInner::Search { + self_iter: self.iter(), + other_set: other, + }, + } } } @@ -337,9 +387,29 @@ impl BTreeSet { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn intersection<'a>(&'a self, other: &'a BTreeSet) -> Intersection<'a, T> { - Intersection { - a: self.iter().peekable(), - b: other.iter().peekable(), + let (small, other) = if self.len() <= other.len() { + (self, other) + } else { + (other, self) + }; + if small.len() > other.len() / ITER_PERFORMANCE_TIPPING_SIZE_DIFF { + // Small set is not much smaller than other set. + // Iterate both sets jointly, spotting matches along the way. + Intersection { + inner: IntersectionInner::Stitch { + small_iter: small.iter(), + other_iter: other.iter(), + }, + } + } else { + // Big difference in number of elements, or both sets are empty. + // Iterate the small set, searching for matches in the large set. + Intersection { + inner: IntersectionInner::Search { + small_iter: small.iter(), + large_set: other, + }, + } } } @@ -473,28 +543,44 @@ impl BTreeSet { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn is_subset(&self, other: &BTreeSet) -> bool { - // Stolen from TreeMap - let mut x = self.iter(); - let mut y = other.iter(); - let mut a = x.next(); - let mut b = y.next(); - while a.is_some() { - if b.is_none() { - return false; - } + // Same result as self.difference(other).next().is_none() + // but the 3 paths below are faster (in order: hugely, 20%, 5%). + if self.len() > other.len() { + false + } else if self.len() > other.len() / ITER_PERFORMANCE_TIPPING_SIZE_DIFF { + // Self is not much smaller than other set. + // Stolen from TreeMap + let mut x = self.iter(); + let mut y = other.iter(); + let mut a = x.next(); + let mut b = y.next(); + while a.is_some() { + if b.is_none() { + return false; + } - let a1 = a.unwrap(); - let b1 = b.unwrap(); + let a1 = a.unwrap(); + let b1 = b.unwrap(); - match b1.cmp(a1) { - Less => (), - Greater => return false, - Equal => a = x.next(), - } + match b1.cmp(a1) { + Less => (), + Greater => return false, + Equal => a = x.next(), + } - b = y.next(); + b = y.next(); + } + true + } else { + // Big difference in number of elements, or both sets are empty. + // Iterate the small set, searching for matches in the large set. + for next in self { + if !other.contains(next) { + return false; + } + } + true } - true } /// Returns `true` if the set is a superset of another, @@ -567,7 +653,7 @@ impl BTreeSet { Recover::replace(&mut self.map, value) } - /// Removes a value from the set. Returns `true` if the value was + /// Removes a value from the set. Returns whether the value was /// present in the set. /// /// The value may be any borrowed form of the set's value type, @@ -713,7 +799,7 @@ impl BTreeSet { /// assert_eq!(set_iter.next(), None); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn iter(&self) -> Iter { + pub fn iter(&self) -> Iter<'_, T> { Iter { iter: self.map.keys() } } @@ -797,9 +883,9 @@ impl<'a, T> IntoIterator for &'a BTreeSet { impl Extend for BTreeSet { #[inline] fn extend>(&mut self, iter: Iter) { - for elem in iter { + iter.into_iter().for_each(move |elem| { self.insert(elem); - } + }); } } @@ -819,7 +905,7 @@ impl Default for BTreeSet { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, 'b, T: Ord + Clone> Sub<&'b BTreeSet> for &'a BTreeSet { +impl Sub<&BTreeSet> for &BTreeSet { type Output = BTreeSet; /// Returns the difference of `self` and `rhs` as a new `BTreeSet`. @@ -842,7 +928,7 @@ impl<'a, 'b, T: Ord + Clone> Sub<&'b BTreeSet> for &'a BTreeSet { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, 'b, T: Ord + Clone> BitXor<&'b BTreeSet> for &'a BTreeSet { +impl BitXor<&BTreeSet> for &BTreeSet { type Output = BTreeSet; /// Returns the symmetric difference of `self` and `rhs` as a new `BTreeSet`. @@ -865,7 +951,7 @@ impl<'a, 'b, T: Ord + Clone> BitXor<&'b BTreeSet> for &'a BTreeSet { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, 'b, T: Ord + Clone> BitAnd<&'b BTreeSet> for &'a BTreeSet { +impl BitAnd<&BTreeSet> for &BTreeSet { type Output = BTreeSet; /// Returns the intersection of `self` and `rhs` as a new `BTreeSet`. @@ -888,7 +974,7 @@ impl<'a, 'b, T: Ord + Clone> BitAnd<&'b BTreeSet> for &'a BTreeSet { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, 'b, T: Ord + Clone> BitOr<&'b BTreeSet> for &'a BTreeSet { +impl BitOr<&BTreeSet> for &BTreeSet { type Output = BTreeSet; /// Returns the union of `self` and `rhs` as a new `BTreeSet`. @@ -912,14 +998,14 @@ impl<'a, 'b, T: Ord + Clone> BitOr<&'b BTreeSet> for &'a BTreeSet { #[stable(feature = "rust1", since = "1.0.0")] impl Debug for BTreeSet { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_set().entries(self.iter()).finish() } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> Clone for Iter<'a, T> { - fn clone(&self) -> Iter<'a, T> { +impl Clone for Iter<'_, T> { + fn clone(&self) -> Self { Iter { iter: self.iter.clone() } } } @@ -941,12 +1027,12 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> ExactSizeIterator for Iter<'a, T> { +impl ExactSizeIterator for Iter<'_, T> { fn len(&self) -> usize { self.iter.len() } } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, T> FusedIterator for Iter<'a, T> {} +impl FusedIterator for Iter<'_, T> {} #[stable(feature = "rust1", since = "1.0.0")] impl Iterator for IntoIter { @@ -974,8 +1060,8 @@ impl ExactSizeIterator for IntoIter { impl FusedIterator for IntoIter {} #[stable(feature = "btree_range", since = "1.17.0")] -impl<'a, T> Clone for Range<'a, T> { - fn clone(&self) -> Range<'a, T> { +impl Clone for Range<'_, T> { + fn clone(&self) -> Self { Range { iter: self.iter.clone() } } } @@ -997,9 +1083,9 @@ impl<'a, T> DoubleEndedIterator for Range<'a, T> { } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, T> FusedIterator for Range<'a, T> {} +impl FusedIterator for Range<'_, T> {} -/// Compare `x` and `y`, but return `short` if x is None and `long` if y is None +/// Compares `x` and `y`, but return `short` if x is None and `long` if y is None fn cmp_opt(x: Option<&T>, y: Option<&T>, short: Ordering, long: Ordering) -> Ordering { match (x, y) { (None, _) => short, @@ -1009,11 +1095,25 @@ fn cmp_opt(x: Option<&T>, y: Option<&T>, short: Ordering, long: Ordering } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> Clone for Difference<'a, T> { - fn clone(&self) -> Difference<'a, T> { +impl Clone for Difference<'_, T> { + fn clone(&self) -> Self { Difference { - a: self.a.clone(), - b: self.b.clone(), + inner: match &self.inner { + DifferenceInner::Stitch { + self_iter, + other_iter, + } => DifferenceInner::Stitch { + self_iter: self_iter.clone(), + other_iter: other_iter.clone(), + }, + DifferenceInner::Search { + self_iter, + other_set, + } => DifferenceInner::Search { + self_iter: self_iter.clone(), + other_set, + }, + }, } } } @@ -1022,33 +1122,61 @@ impl<'a, T: Ord> Iterator for Difference<'a, T> { type Item = &'a T; fn next(&mut self) -> Option<&'a T> { - loop { - match cmp_opt(self.a.peek(), self.b.peek(), Less, Less) { - Less => return self.a.next(), - Equal => { - self.a.next(); - self.b.next(); - } - Greater => { - self.b.next(); + match &mut self.inner { + DifferenceInner::Stitch { + self_iter, + other_iter, + } => { + let mut self_next = self_iter.next()?; + loop { + match other_iter + .peek() + .map_or(Less, |other_next| Ord::cmp(self_next, other_next)) + { + Less => return Some(self_next), + Equal => { + self_next = self_iter.next()?; + other_iter.next(); + } + Greater => { + other_iter.next(); + } + } } } + DifferenceInner::Search { + self_iter, + other_set, + } => loop { + let self_next = self_iter.next()?; + if !other_set.contains(&self_next) { + return Some(self_next); + } + }, } } fn size_hint(&self) -> (usize, Option) { - let a_len = self.a.len(); - let b_len = self.b.len(); - (a_len.saturating_sub(b_len), Some(a_len)) + let (self_len, other_len) = match &self.inner { + DifferenceInner::Stitch { + self_iter, + other_iter + } => (self_iter.len(), other_iter.len()), + DifferenceInner::Search { + self_iter, + other_set + } => (self_iter.len(), other_set.len()), + }; + (self_len.saturating_sub(other_len), Some(self_len)) } } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, T: Ord> FusedIterator for Difference<'a, T> {} +impl FusedIterator for Difference<'_, T> {} #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> Clone for SymmetricDifference<'a, T> { - fn clone(&self) -> SymmetricDifference<'a, T> { +impl Clone for SymmetricDifference<'_, T> { + fn clone(&self) -> Self { SymmetricDifference { a: self.a.clone(), b: self.b.clone(), @@ -1078,14 +1206,28 @@ impl<'a, T: Ord> Iterator for SymmetricDifference<'a, T> { } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, T: Ord> FusedIterator for SymmetricDifference<'a, T> {} +impl FusedIterator for SymmetricDifference<'_, T> {} #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> Clone for Intersection<'a, T> { - fn clone(&self) -> Intersection<'a, T> { +impl Clone for Intersection<'_, T> { + fn clone(&self) -> Self { Intersection { - a: self.a.clone(), - b: self.b.clone(), + inner: match &self.inner { + IntersectionInner::Stitch { + small_iter, + other_iter, + } => IntersectionInner::Stitch { + small_iter: small_iter.clone(), + other_iter: other_iter.clone(), + }, + IntersectionInner::Search { + small_iter, + large_set, + } => IntersectionInner::Search { + small_iter: small_iter.clone(), + large_set, + }, + }, } } } @@ -1094,33 +1236,48 @@ impl<'a, T: Ord> Iterator for Intersection<'a, T> { type Item = &'a T; fn next(&mut self) -> Option<&'a T> { - loop { - match Ord::cmp(self.a.peek()?, self.b.peek()?) { - Less => { - self.a.next(); - } - Equal => { - self.b.next(); - return self.a.next(); - } - Greater => { - self.b.next(); + match &mut self.inner { + IntersectionInner::Stitch { + small_iter, + other_iter, + } => { + let mut small_next = small_iter.next()?; + let mut other_next = other_iter.next()?; + loop { + match Ord::cmp(small_next, other_next) { + Less => small_next = small_iter.next()?, + Greater => other_next = other_iter.next()?, + Equal => return Some(small_next), + } } } + IntersectionInner::Search { + small_iter, + large_set, + } => loop { + let small_next = small_iter.next()?; + if large_set.contains(&small_next) { + return Some(small_next); + } + }, } } fn size_hint(&self) -> (usize, Option) { - (0, Some(min(self.a.len(), self.b.len()))) + let min_len = match &self.inner { + IntersectionInner::Stitch { small_iter, .. } => small_iter.len(), + IntersectionInner::Search { small_iter, .. } => small_iter.len(), + }; + (0, Some(min_len)) } } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, T: Ord> FusedIterator for Intersection<'a, T> {} +impl FusedIterator for Intersection<'_, T> {} #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> Clone for Union<'a, T> { - fn clone(&self) -> Union<'a, T> { +impl Clone for Union<'_, T> { + fn clone(&self) -> Self { Union { a: self.a.clone(), b: self.b.clone(), @@ -1150,4 +1307,4 @@ impl<'a, T: Ord> Iterator for Union<'a, T> { } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, T: Ord> FusedIterator for Union<'a, T> {} +impl FusedIterator for Union<'_, T> {} diff --git a/src/liballoc/collections/linked_list.rs b/src/liballoc/collections/linked_list.rs index ba46fafaf169f..d6d84a4f083d0 100644 --- a/src/liballoc/collections/linked_list.rs +++ b/src/liballoc/collections/linked_list.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A doubly-linked list with owned nodes. //! //! The `LinkedList` allows pushing and popping elements at either end @@ -30,7 +20,7 @@ use core::marker::PhantomData; use core::mem; use core::ptr::NonNull; -use boxed::Box; +use crate::boxed::Box; use super::SpecExtend; /// A doubly-linked list with owned nodes. @@ -71,8 +61,8 @@ pub struct Iter<'a, T: 'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for Iter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("Iter") .field(&self.len) .finish() @@ -81,7 +71,7 @@ impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> { // FIXME(#26925) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> Clone for Iter<'a, T> { +impl Clone for Iter<'_, T> { fn clone(&self) -> Self { Iter { ..*self } } @@ -103,8 +93,8 @@ pub struct IterMut<'a, T: 'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for IterMut<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for IterMut<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("IterMut") .field(&self.list) .field(&self.len) @@ -127,7 +117,7 @@ pub struct IntoIter { #[stable(feature = "collection_debug", since = "1.17.0")] impl fmt::Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("IntoIter") .field(&self.list) .finish() @@ -341,7 +331,7 @@ impl LinkedList { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn iter(&self) -> Iter { + pub fn iter(&self) -> Iter<'_, T> { Iter { head: self.head, tail: self.tail, @@ -375,7 +365,7 @@ impl LinkedList { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn iter_mut(&mut self) -> IterMut { + pub fn iter_mut(&mut self) -> IterMut<'_, T> { IterMut { head: self.head, tail: self.tail, @@ -774,7 +764,7 @@ impl LinkedList { /// assert_eq!(odds.into_iter().collect::>(), vec![1, 3, 5, 9, 11, 13, 15]); /// ``` #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] - pub fn drain_filter(&mut self, filter: F) -> DrainFilter + pub fn drain_filter(&mut self, filter: F) -> DrainFilter<'_, T, F> where F: FnMut(&mut T) -> bool { // avoid borrow issues. @@ -842,10 +832,10 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> ExactSizeIterator for Iter<'a, T> {} +impl ExactSizeIterator for Iter<'_, T> {} #[stable(feature = "fused", since = "1.26.0")] -impl<'a, T> FusedIterator for Iter<'a, T> {} +impl FusedIterator for Iter<'_, T> {} #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Iterator for IterMut<'a, T> { @@ -891,12 +881,12 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> ExactSizeIterator for IterMut<'a, T> {} +impl ExactSizeIterator for IterMut<'_, T> {} #[stable(feature = "fused", since = "1.26.0")] -impl<'a, T> FusedIterator for IterMut<'a, T> {} +impl FusedIterator for IterMut<'_, T> {} -impl<'a, T> IterMut<'a, T> { +impl IterMut<'_, T> { /// Inserts the given element just after the element most recently returned by `.next()`. /// The inserted element does not appear in the iteration. /// @@ -992,7 +982,7 @@ pub struct DrainFilter<'a, T: 'a, F: 'a> } #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] -impl<'a, T, F> Iterator for DrainFilter<'a, T, F> +impl Iterator for DrainFilter<'_, T, F> where F: FnMut(&mut T) -> bool, { type Item = T; @@ -1019,7 +1009,7 @@ impl<'a, T, F> Iterator for DrainFilter<'a, T, F> } #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] -impl<'a, T, F> Drop for DrainFilter<'a, T, F> +impl Drop for DrainFilter<'_, T, F> where F: FnMut(&mut T) -> bool, { fn drop(&mut self) { @@ -1028,10 +1018,10 @@ impl<'a, T, F> Drop for DrainFilter<'a, T, F> } #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] -impl<'a, T: 'a + fmt::Debug, F> fmt::Debug for DrainFilter<'a, T, F> +impl fmt::Debug for DrainFilter<'_, T, F> where F: FnMut(&mut T) -> bool { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("DrainFilter") .field(&self.list) .finish() @@ -1117,9 +1107,7 @@ impl Extend for LinkedList { impl SpecExtend for LinkedList { default fn spec_extend(&mut self, iter: I) { - for elt in iter { - self.push_back(elt); - } + iter.into_iter().for_each(move |elt| self.push_back(elt)); } } @@ -1174,7 +1162,7 @@ impl Clone for LinkedList { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for LinkedList { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self).finish() } } @@ -1210,16 +1198,16 @@ unsafe impl Send for LinkedList {} unsafe impl Sync for LinkedList {} #[stable(feature = "rust1", since = "1.0.0")] -unsafe impl<'a, T: Sync> Send for Iter<'a, T> {} +unsafe impl Send for Iter<'_, T> {} #[stable(feature = "rust1", since = "1.0.0")] -unsafe impl<'a, T: Sync> Sync for Iter<'a, T> {} +unsafe impl Sync for Iter<'_, T> {} #[stable(feature = "rust1", since = "1.0.0")] -unsafe impl<'a, T: Send> Send for IterMut<'a, T> {} +unsafe impl Send for IterMut<'_, T> {} #[stable(feature = "rust1", since = "1.0.0")] -unsafe impl<'a, T: Sync> Sync for IterMut<'a, T> {} +unsafe impl Sync for IterMut<'_, T> {} #[cfg(test)] mod tests { diff --git a/src/liballoc/collections/mod.rs b/src/liballoc/collections/mod.rs index 96e0eb633b2f5..5a33ddc14f004 100644 --- a/src/liballoc/collections/mod.rs +++ b/src/liballoc/collections/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Collection types. #![stable(feature = "rust1", since = "1.0.0")] @@ -33,25 +23,25 @@ pub mod btree_set { #[stable(feature = "rust1", since = "1.0.0")] #[doc(no_inline)] -pub use self::binary_heap::BinaryHeap; +pub use binary_heap::BinaryHeap; #[stable(feature = "rust1", since = "1.0.0")] #[doc(no_inline)] -pub use self::btree_map::BTreeMap; +pub use btree_map::BTreeMap; #[stable(feature = "rust1", since = "1.0.0")] #[doc(no_inline)] -pub use self::btree_set::BTreeSet; +pub use btree_set::BTreeSet; #[stable(feature = "rust1", since = "1.0.0")] #[doc(no_inline)] -pub use self::linked_list::LinkedList; +pub use linked_list::LinkedList; #[stable(feature = "rust1", since = "1.0.0")] #[doc(no_inline)] -pub use self::vec_deque::VecDeque; +pub use vec_deque::VecDeque; -use alloc::{AllocErr, LayoutErr}; +use crate::alloc::{AllocErr, LayoutErr}; /// Augments `AllocErr` with a CapacityOverflow variant. #[derive(Clone, PartialEq, Eq, Debug)] diff --git a/src/liballoc/collections/vec_deque.rs b/src/liballoc/collections/vec_deque.rs index 0c5926fbaf1dc..4bea615ab861f 100644 --- a/src/liballoc/collections/vec_deque.rs +++ b/src/liballoc/collections/vec_deque.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A double-ended queue implemented with a growable ring buffer. //! //! This queue has `O(1)` amortized inserts and removals from both ends of the @@ -17,22 +7,19 @@ #![stable(feature = "rust1", since = "1.0.0")] -use core::cmp::Ordering; +use core::cmp::{self, Ordering}; use core::fmt; use core::iter::{repeat_with, FromIterator, FusedIterator}; use core::mem; use core::ops::Bound::{Excluded, Included, Unbounded}; -use core::ops::{Index, IndexMut, RangeBounds}; -use core::ptr; -use core::ptr::NonNull; +use core::ops::{Index, IndexMut, RangeBounds, Try}; +use core::ptr::{self, NonNull}; use core::slice; - use core::hash::{Hash, Hasher}; -use core::cmp; -use collections::CollectionAllocErr; -use raw_vec::RawVec; -use vec::Vec; +use crate::collections::CollectionAllocErr; +use crate::raw_vec::RawVec; +use crate::vec::Vec; const INITIAL_CAPACITY: usize = 7; // 2^3 - 1 const MINIMUM_CAPACITY: usize = 1; // 2 - 1 @@ -137,7 +124,7 @@ impl VecDeque { ptr::write(self.ptr().add(off), value); } - /// Returns `true` if and only if the buffer is at full capacity. + /// Returns `true` if the buffer is at full capacity. #[inline] fn is_full(&self) -> bool { self.cap() - self.len() == 1 @@ -573,7 +560,7 @@ impl VecDeque { /// Does nothing if the capacity is already sufficient. /// /// Note that the allocator may give the collection more space than it - /// requests. Therefore capacity can not be relied upon to be precisely + /// requests. Therefore, capacity can not be relied upon to be precisely /// minimal. Prefer `reserve` if future insertions are expected. /// /// # Errors @@ -808,7 +795,7 @@ impl VecDeque { /// assert_eq!(&c[..], b); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn iter(&self) -> Iter { + pub fn iter(&self) -> Iter<'_, T> { Iter { tail: self.tail, head: self.head, @@ -834,7 +821,7 @@ impl VecDeque { /// assert_eq!(&buf.iter_mut().collect::>()[..], b); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn iter_mut(&mut self) -> IterMut { + pub fn iter_mut(&mut self) -> IterMut<'_, T> { IterMut { tail: self.tail, head: self.head, @@ -937,7 +924,7 @@ impl VecDeque { self.tail == self.head } - /// Create a draining iterator that removes the specified range in the + /// Creates a draining iterator that removes the specified range in the /// `VecDeque` and yields the removed items. /// /// Note 1: The element range is removed even if the iterator is not @@ -945,7 +932,7 @@ impl VecDeque { /// /// Note 2: It is unspecified how many elements are removed from the deque, /// if the `Drain` value is not dropped, but the borrow it holds expires - /// (eg. due to mem::forget). + /// (e.g., due to `mem::forget`). /// /// # Panics /// @@ -968,7 +955,7 @@ impl VecDeque { /// ``` #[inline] #[stable(feature = "drain", since = "1.6.0")] - pub fn drain(&mut self, range: R) -> Drain + pub fn drain(&mut self, range: R) -> Drain<'_, T> where R: RangeBounds { // Memory safety @@ -1897,8 +1884,6 @@ impl VecDeque { /// # Examples /// /// ``` - /// #![feature(vec_resize_with)] - /// /// use std::collections::VecDeque; /// /// let mut buf = VecDeque::new(); @@ -1917,7 +1902,7 @@ impl VecDeque { /// buf.resize_with(5, || { state += 1; state }); /// assert_eq!(buf, [5, 10, 101, 102, 103]); /// ``` - #[unstable(feature = "vec_resize_with", issue = "41758")] + #[stable(feature = "vec_resize_with", since = "1.33.0")] pub fn resize_with(&mut self, new_len: usize, generator: impl FnMut()->T) { let len = self.len(); @@ -1927,6 +1912,118 @@ impl VecDeque { self.truncate(new_len); } } + + /// Rotates the double-ended queue `mid` places to the left. + /// + /// Equivalently, + /// - Rotates item `mid` into the first position. + /// - Pops the first `mid` items and pushes them to the end. + /// - Rotates `len() - mid` places to the right. + /// + /// # Panics + /// + /// If `mid` is greater than `len()`. Note that `mid == len()` + /// does _not_ panic and is a no-op rotation. + /// + /// # Complexity + /// + /// Takes `O(min(mid, len() - mid))` time and no extra space. + /// + /// # Examples + /// + /// ``` + /// #![feature(vecdeque_rotate)] + /// + /// use std::collections::VecDeque; + /// + /// let mut buf: VecDeque<_> = (0..10).collect(); + /// + /// buf.rotate_left(3); + /// assert_eq!(buf, [3, 4, 5, 6, 7, 8, 9, 0, 1, 2]); + /// + /// for i in 1..10 { + /// assert_eq!(i * 3 % 10, buf[0]); + /// buf.rotate_left(3); + /// } + /// assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); + /// ``` + #[unstable(feature = "vecdeque_rotate", issue = "56686")] + pub fn rotate_left(&mut self, mid: usize) { + assert!(mid <= self.len()); + let k = self.len() - mid; + if mid <= k { + unsafe { self.rotate_left_inner(mid) } + } else { + unsafe { self.rotate_right_inner(k) } + } + } + + /// Rotates the double-ended queue `k` places to the right. + /// + /// Equivalently, + /// - Rotates the first item into position `k`. + /// - Pops the last `k` items and pushes them to the front. + /// - Rotates `len() - k` places to the left. + /// + /// # Panics + /// + /// If `k` is greater than `len()`. Note that `k == len()` + /// does _not_ panic and is a no-op rotation. + /// + /// # Complexity + /// + /// Takes `O(min(k, len() - k))` time and no extra space. + /// + /// # Examples + /// + /// ``` + /// #![feature(vecdeque_rotate)] + /// + /// use std::collections::VecDeque; + /// + /// let mut buf: VecDeque<_> = (0..10).collect(); + /// + /// buf.rotate_right(3); + /// assert_eq!(buf, [7, 8, 9, 0, 1, 2, 3, 4, 5, 6]); + /// + /// for i in 1..10 { + /// assert_eq!(0, buf[i * 3 % 10]); + /// buf.rotate_right(3); + /// } + /// assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); + /// ``` + #[unstable(feature = "vecdeque_rotate", issue = "56686")] + pub fn rotate_right(&mut self, k: usize) { + assert!(k <= self.len()); + let mid = self.len() - k; + if k <= mid { + unsafe { self.rotate_right_inner(k) } + } else { + unsafe { self.rotate_left_inner(mid) } + } + } + + // Safety: the following two methods require that the rotation amount + // be less than half the length of the deque. + // + // `wrap_copy` requres that `min(x, cap() - x) + copy_len <= cap()`, + // but than `min` is never more than half the capacity, regardless of x, + // so it's sound to call here because we're calling with something + // less than half the length, which is never above half the capacity. + + unsafe fn rotate_left_inner(&mut self, mid: usize) { + debug_assert!(mid * 2 <= self.len()); + self.wrap_copy(self.head, self.tail, mid); + self.head = self.wrap_add(self.head, mid); + self.tail = self.wrap_add(self.tail, mid); + } + + unsafe fn rotate_right_inner(&mut self, k: usize) { + debug_assert!(k * 2 <= self.len()); + self.head = self.wrap_sub(self.head, k); + self.tail = self.wrap_sub(self.tail, k); + self.wrap_copy(self.tail, self.head, k); + } } impl VecDeque { @@ -1983,7 +2080,7 @@ trait RingSlices: Sized { } } -impl<'a, T> RingSlices for &'a [T] { +impl RingSlices for &[T] { fn slice(self, from: usize, to: usize) -> Self { &self[from..to] } @@ -1992,7 +2089,7 @@ impl<'a, T> RingSlices for &'a [T] { } } -impl<'a, T> RingSlices for &'a mut [T] { +impl RingSlices for &mut [T] { fn slice(self, from: usize, to: usize) -> Self { &mut self[from..to] } @@ -2023,8 +2120,8 @@ pub struct Iter<'a, T: 'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for Iter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail); f.debug_tuple("Iter") .field(&front) @@ -2035,8 +2132,8 @@ impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> { // FIXME(#26925) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> Clone for Iter<'a, T> { - fn clone(&self) -> Iter<'a, T> { +impl Clone for Iter<'_, T> { + fn clone(&self) -> Self { Iter { ring: self.ring, tail: self.tail, @@ -2072,6 +2169,31 @@ impl<'a, T> Iterator for Iter<'a, T> { accum = front.iter().fold(accum, &mut f); back.iter().fold(accum, &mut f) } + + fn try_fold(&mut self, init: B, mut f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + let (mut iter, final_res); + if self.tail <= self.head { + // single slice self.ring[self.tail..self.head] + iter = self.ring[self.tail..self.head].iter(); + final_res = iter.try_fold(init, &mut f); + } else { + // two slices: self.ring[self.tail..], self.ring[..self.head] + let (front, back) = self.ring.split_at(self.tail); + let mut back_iter = back.iter(); + let res = back_iter.try_fold(init, &mut f); + let len = self.ring.len(); + self.tail = (self.ring.len() - back_iter.len()) & (len - 1); + iter = front[..self.head].iter(); + final_res = iter.try_fold(res?, &mut f); + } + self.tail = self.head - iter.len(); + final_res + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -2092,17 +2214,41 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> { accum = back.iter().rfold(accum, &mut f); front.iter().rfold(accum, &mut f) } + + fn try_rfold(&mut self, init: B, mut f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try, + { + let (mut iter, final_res); + if self.tail <= self.head { + // single slice self.ring[self.tail..self.head] + iter = self.ring[self.tail..self.head].iter(); + final_res = iter.try_rfold(init, &mut f); + } else { + // two slices: self.ring[self.tail..], self.ring[..self.head] + let (front, back) = self.ring.split_at(self.tail); + let mut front_iter = front[..self.head].iter(); + let res = front_iter.try_rfold(init, &mut f); + self.head = front_iter.len(); + iter = back.iter(); + final_res = iter.try_rfold(res?, &mut f); + } + self.head = self.tail + iter.len(); + final_res + } } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> ExactSizeIterator for Iter<'a, T> { +impl ExactSizeIterator for Iter<'_, T> { fn is_empty(&self) -> bool { self.head == self.tail } } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, T> FusedIterator for Iter<'a, T> {} +impl FusedIterator for Iter<'_, T> {} /// A mutable iterator over the elements of a `VecDeque`. @@ -2120,8 +2266,8 @@ pub struct IterMut<'a, T: 'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for IterMut<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for IterMut<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let (front, back) = RingSlices::ring_slices(&*self.ring, self.head, self.tail); f.debug_tuple("IterMut") .field(&front) @@ -2188,14 +2334,14 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T> ExactSizeIterator for IterMut<'a, T> { +impl ExactSizeIterator for IterMut<'_, T> { fn is_empty(&self) -> bool { self.head == self.tail } } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, T> FusedIterator for IterMut<'a, T> {} +impl FusedIterator for IterMut<'_, T> {} /// An owning iterator over the elements of a `VecDeque`. /// @@ -2212,7 +2358,7 @@ pub struct IntoIter { #[stable(feature = "collection_debug", since = "1.17.0")] impl fmt::Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("IntoIter") .field(&self.inner) .finish() @@ -2269,8 +2415,8 @@ pub struct Drain<'a, T: 'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for Drain<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for Drain<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("Drain") .field(&self.after_tail) .field(&self.after_head) @@ -2280,12 +2426,12 @@ impl<'a, T: 'a + fmt::Debug> fmt::Debug for Drain<'a, T> { } #[stable(feature = "drain", since = "1.6.0")] -unsafe impl<'a, T: Sync> Sync for Drain<'a, T> {} +unsafe impl Sync for Drain<'_, T> {} #[stable(feature = "drain", since = "1.6.0")] -unsafe impl<'a, T: Send> Send for Drain<'a, T> {} +unsafe impl Send for Drain<'_, T> {} #[stable(feature = "drain", since = "1.6.0")] -impl<'a, T: 'a> Drop for Drain<'a, T> { +impl Drop for Drain<'_, T> { fn drop(&mut self) { self.for_each(drop); @@ -2332,7 +2478,7 @@ impl<'a, T: 'a> Drop for Drain<'a, T> { } #[stable(feature = "drain", since = "1.6.0")] -impl<'a, T: 'a> Iterator for Drain<'a, T> { +impl Iterator for Drain<'_, T> { type Item = T; #[inline] @@ -2347,7 +2493,7 @@ impl<'a, T: 'a> Iterator for Drain<'a, T> { } #[stable(feature = "drain", since = "1.6.0")] -impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { +impl DoubleEndedIterator for Drain<'_, T> { #[inline] fn next_back(&mut self) -> Option { self.iter.next_back().map(|elt| unsafe { ptr::read(elt) }) @@ -2355,10 +2501,10 @@ impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> { } #[stable(feature = "drain", since = "1.6.0")] -impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {} +impl ExactSizeIterator for Drain<'_, T> {} #[stable(feature = "fused", since = "1.26.0")] -impl<'a, T: 'a> FusedIterator for Drain<'a, T> {} +impl FusedIterator for Drain<'_, T> {} #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for VecDeque { @@ -2408,7 +2554,7 @@ macro_rules! __impl_slice_eq1 { }; ($Lhs: ty, $Rhs: ty, $Bound: ident) => { #[stable(feature = "vec_deque_partial_eq_slice", since = "1.17.0")] - impl<'a, 'b, A: $Bound, B> PartialEq<$Rhs> for $Lhs where A: PartialEq { + impl PartialEq<$Rhs> for $Lhs where A: PartialEq { fn eq(&self, other: &$Rhs) -> bool { if self.len() != other.len() { return false; @@ -2422,15 +2568,15 @@ macro_rules! __impl_slice_eq1 { } __impl_slice_eq1! { VecDeque, Vec } -__impl_slice_eq1! { VecDeque, &'b [B] } -__impl_slice_eq1! { VecDeque, &'b mut [B] } +__impl_slice_eq1! { VecDeque, &[B] } +__impl_slice_eq1! { VecDeque, &mut [B] } macro_rules! array_impls { ($($N: expr)+) => { $( __impl_slice_eq1! { VecDeque, [B; $N] } - __impl_slice_eq1! { VecDeque, &'b [B; $N] } - __impl_slice_eq1! { VecDeque, &'b mut [B; $N] } + __impl_slice_eq1! { VecDeque, &[B; $N] } + __impl_slice_eq1! { VecDeque, &mut [B; $N] } )+ } } @@ -2531,9 +2677,7 @@ impl<'a, T> IntoIterator for &'a mut VecDeque { #[stable(feature = "rust1", since = "1.0.0")] impl Extend for VecDeque { fn extend>(&mut self, iter: T) { - for elt in iter { - self.push_back(elt); - } + iter.into_iter().for_each(move |elt| self.push_back(elt)); } } @@ -2546,7 +2690,7 @@ impl<'a, T: 'a + Copy> Extend<&'a T> for VecDeque { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for VecDeque { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self).finish() } } @@ -2650,7 +2794,7 @@ impl From> for Vec { #[cfg(test)] mod tests { - use test; + use ::test; use super::VecDeque; @@ -2928,7 +3072,7 @@ mod tests { #[test] fn test_from_vec() { - use vec::Vec; + use crate::vec::Vec; for cap in 0..35 { for len in 0..=cap { let mut vec = Vec::with_capacity(cap); @@ -2944,7 +3088,7 @@ mod tests { #[test] fn test_vec_from_vecdeque() { - use vec::Vec; + use crate::vec::Vec; fn create_vec_and_test_convert(cap: usize, offset: usize, len: usize) { let mut vd = VecDeque::with_capacity(cap); @@ -3006,7 +3150,7 @@ mod tests { #[test] fn issue_53529() { - use boxed::Box; + use crate::boxed::Box; let mut dst = VecDeque::new(); dst.push_front(Box::new(1)); diff --git a/src/liballoc/fmt.rs b/src/liballoc/fmt.rs index b857964ccb3c3..d2ba9b001916c 100644 --- a/src/liballoc/fmt.rs +++ b/src/liballoc/fmt.rs @@ -1,13 +1,3 @@ -// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Utilities for formatting and printing `String`s. //! //! This module contains the runtime support for the [`format!`] syntax extension. @@ -37,6 +27,9 @@ //! will then parse the format string and determine if the list of arguments //! provided is suitable to pass to this format string. //! +//! To convert a single value to a string, use the [`to_string`] method. This +//! will use the [`Display`] formatting trait. +//! //! ## Positional parameters //! //! Each formatting argument is allowed to specify which value argument it's @@ -109,7 +102,7 @@ //! When requesting that an argument be formatted with a particular type, you //! are actually requesting that an argument ascribes to a particular trait. //! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as -//! well as [`isize`]). The current mapping of types to traits is: +//! well as [`isize`]). The current mapping of types to traits is: //! //! * *nothing* ⇒ [`Display`] //! * `?` ⇒ [`Debug`] @@ -434,7 +427,7 @@ //! 3. An asterisk `.*`: //! //! `.*` means that this `{...}` is associated with *two* format inputs rather than one: the -//! first input holds the `usize` precision, and the second holds the value to print. Note that +//! first input holds the `usize` precision, and the second holds the value to print. Note that //! in this case, if one uses the format string `{:.*}`, then the `` part refers //! to the *value* to print, and the `precision` must come in the input preceding ``. //! @@ -497,6 +490,7 @@ //! [`write!`]: ../../std/macro.write.html //! [`Debug`]: trait.Debug.html //! [`format!`]: ../../std/macro.format.html +//! [`to_string`]: ../../std/string/trait.ToString.html //! [`writeln!`]: ../../std/macro.writeln.html //! [`write_fmt`]: ../../std/io/trait.Write.html#method.write_fmt //! [`std::io::Write`]: ../../std/io/trait.Write.html @@ -533,7 +527,7 @@ pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple}; #[stable(feature = "fmt_flags_align", since = "1.28.0")] pub use core::fmt::{Alignment}; -use string; +use crate::string; /// The `format` function takes an [`Arguments`] struct and returns the resulting /// formatted string. @@ -563,7 +557,7 @@ use string; /// [`format_args!`]: ../../std/macro.format_args.html /// [`format!`]: ../../std/macro.format.html #[stable(feature = "rust1", since = "1.0.0")] -pub fn format(args: Arguments) -> string::String { +pub fn format(args: Arguments<'_>) -> string::String { let capacity = args.estimated_capacity(); let mut output = string::String::with_capacity(capacity); output diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index abacc62c8562b..9064b4ccd6a88 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2014-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! # The Rust core allocation and collections library //! //! This library provides smart pointers and collections for managing @@ -65,16 +55,19 @@ reason = "this library is unlikely to be stabilized in its current \ form or name", issue = "27783")] -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/", +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/", issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/", test(no_crate_inject, attr(allow(unused_variables), deny(warnings))))] #![no_std] #![needs_allocator] -#![deny(missing_debug_implementations)] -#![cfg_attr(not(test), feature(fn_traits))] +#![deny(rust_2018_idioms)] +#![allow(explicit_outlives_requirements)] + +#![warn(deprecated_in_future)] +#![warn(intra_doc_link_resolution_failure)] +#![warn(missing_debug_implementations)] + #![cfg_attr(not(test), feature(generator_trait))] #![cfg_attr(test, feature(test))] @@ -92,6 +85,7 @@ #![feature(dropck_eyepatch)] #![feature(exact_size_is_empty)] #![feature(fmt_internals)] +#![feature(fn_traits)] #![feature(fundamental)] #![feature(futures_api)] #![feature(lang_items)] @@ -100,26 +94,29 @@ #![feature(nll)] #![feature(optin_builtin_traits)] #![feature(pattern)] -#![feature(pin)] #![feature(ptr_internals)] #![feature(ptr_offset_from)] #![feature(rustc_attrs)] +#![feature(receiver_trait)] #![feature(specialization)] -#![feature(split_ascii_whitespace)] #![feature(staged_api)] +#![feature(std_internals)] #![feature(str_internals)] #![feature(trusted_len)] #![feature(try_reserve)] #![feature(unboxed_closures)] #![feature(unicode_internals)] #![feature(unsize)] +#![feature(unsized_locals)] #![feature(allocator_internals)] #![feature(on_unimplemented)] #![feature(rustc_const_unstable)] #![feature(const_vec_new)] #![feature(slice_partition_dedup)] -#![feature(maybe_uninit)] +#![feature(maybe_uninit, maybe_uninit_slice, maybe_uninit_array)] #![feature(alloc_layout_extra)] +#![feature(try_trait)] +#![feature(iter_nth_back)] // Allow testing this library @@ -128,8 +125,6 @@ extern crate std; #[cfg(test)] extern crate test; -#[cfg(test)] -extern crate rand; // Module with internal macros used by other modules (needs to be included before other modules). #[macro_use] @@ -139,10 +134,6 @@ mod macros; pub mod alloc; -#[unstable(feature = "futures_api", - reason = "futures in libcore are unstable", - issue = "50547")] -pub mod task; // Primitive types using the heaps above // Need to conditionally define the mod from `boxed.rs` to avoid @@ -171,5 +162,5 @@ pub mod vec; #[cfg(not(test))] mod std { - pub use core::ops; // RangeFull + pub use core::ops; // RangeFull } diff --git a/src/liballoc/macros.rs b/src/liballoc/macros.rs index 472eef77d7956..dd128e096f952 100644 --- a/src/liballoc/macros.rs +++ b/src/liballoc/macros.rs @@ -1,13 +1,3 @@ -// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// Creates a [`Vec`] containing the arguments. /// /// `vec!` allows `Vec`s to be defined with the same syntax as array expressions. @@ -44,7 +34,7 @@ #[cfg(not(test))] #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] -#[allow_internal_unstable] +#[allow_internal_unstable(box_syntax)] macro_rules! vec { ($elem:expr; $n:expr) => ( $crate::vec::from_elem($elem, $n) @@ -72,20 +62,25 @@ macro_rules! vec { /// Creates a `String` using interpolation of runtime expressions. /// -/// The first argument `format!` receives is a format string. This must be a string -/// literal. The power of the formatting string is in the `{}`s contained. +/// The first argument `format!` receives is a format string. This must be a string +/// literal. The power of the formatting string is in the `{}`s contained. /// /// Additional parameters passed to `format!` replace the `{}`s within the /// formatting string in the order given unless named or positional parameters -/// are used, see [`std::fmt`][fmt] for more information. +/// are used; see [`std::fmt`][fmt] for more information. /// /// A common use for `format!` is concatenation and interpolation of strings. /// The same convention is used with [`print!`] and [`write!`] macros, /// depending on the intended destination of the string. /// +/// To convert a single value to a string, use the [`to_string`] method. This +/// will use the [`Display`] formatting trait. +/// /// [fmt]: ../std/fmt/index.html /// [`print!`]: ../std/macro.print.html /// [`write!`]: ../std/macro.write.html +/// [`to_string`]: ../std/string/trait.ToString.html +/// [`Display`]: ../std/fmt/trait.Display.html /// /// # Panics /// diff --git a/src/liballoc/prelude.rs b/src/liballoc/prelude.rs deleted file mode 100644 index 53b5e93a66e26..0000000000000 --- a/src/liballoc/prelude.rs +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! The alloc Prelude -//! -//! The purpose of this module is to alleviate imports of commonly-used -//! items of the `alloc` crate by adding a glob import to the top of modules: -//! -//! ``` -//! # #![allow(unused_imports)] -//! # #![feature(alloc)] -//! extern crate alloc; -//! use alloc::prelude::*; -//! ``` - -#![unstable(feature = "alloc", issue = "27783")] - -#[unstable(feature = "alloc", issue = "27783")] pub use borrow::ToOwned; -#[unstable(feature = "alloc", issue = "27783")] pub use boxed::Box; -#[unstable(feature = "alloc", issue = "27783")] pub use slice::SliceConcatExt; -#[unstable(feature = "alloc", issue = "27783")] pub use string::{String, ToString}; -#[unstable(feature = "alloc", issue = "27783")] pub use vec::Vec; diff --git a/src/liballoc/prelude/mod.rs b/src/liballoc/prelude/mod.rs new file mode 100644 index 0000000000000..33cc51d173203 --- /dev/null +++ b/src/liballoc/prelude/mod.rs @@ -0,0 +1,16 @@ +//! The alloc Prelude +//! +//! The purpose of this module is to alleviate imports of commonly-used +//! items of the `alloc` crate by adding a glob import to the top of modules: +//! +//! ``` +//! # #![allow(unused_imports)] +//! # #![feature(alloc)] +//! #![feature(alloc_prelude)] +//! extern crate alloc; +//! use alloc::prelude::v1::*; +//! ``` + +#![unstable(feature = "alloc_prelude", issue = "58935")] + +pub mod v1; diff --git a/src/liballoc/prelude/v1.rs b/src/liballoc/prelude/v1.rs new file mode 100644 index 0000000000000..b6b01395ad632 --- /dev/null +++ b/src/liballoc/prelude/v1.rs @@ -0,0 +1,11 @@ +//! The first version of the prelude of `alloc` crate. +//! +//! See the [module-level documentation](../index.html) for more. + +#![unstable(feature = "alloc_prelude", issue = "58935")] + +#[unstable(feature = "alloc_prelude", issue = "58935")] pub use crate::borrow::ToOwned; +#[unstable(feature = "alloc_prelude", issue = "58935")] pub use crate::boxed::Box; +#[unstable(feature = "alloc_prelude", issue = "58935")] pub use crate::slice::SliceConcatExt; +#[unstable(feature = "alloc_prelude", issue = "58935")] pub use crate::string::{String, ToString}; +#[unstable(feature = "alloc_prelude", issue = "58935")] pub use crate::vec::Vec; diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index f4674b327695e..fe28fe5095cce 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![unstable(feature = "raw_vec_internals", reason = "implementation detail", issue = "0")] #![doc(hidden)] @@ -17,10 +7,9 @@ use core::ops::Drop; use core::ptr::{self, NonNull, Unique}; use core::slice; -use alloc::{Alloc, Layout, Global, handle_alloc_error}; -use collections::CollectionAllocErr; -use collections::CollectionAllocErr::*; -use boxed::Box; +use crate::alloc::{Alloc, Layout, Global, handle_alloc_error}; +use crate::collections::CollectionAllocErr::{self, *}; +use crate::boxed::Box; /// A low-level utility for more ergonomically allocating, reallocating, and deallocating /// a buffer of memory on the heap without having to worry about all the corner cases @@ -32,7 +21,7 @@ use boxed::Box; /// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics) /// * Guards against 32-bit systems allocating more than isize::MAX bytes /// * Guards against overflowing your length -/// * Aborts on OOM +/// * Aborts on OOM or calls handle_alloc_error as applicable /// * Avoids freeing Unique::empty() /// * Contains a ptr::Unique and thus endows the user with all related benefits /// @@ -346,7 +335,7 @@ impl RawVec { /// enough to want to do that it's easiest to just have a dedicated method. Slightly /// more efficient logic can be provided for this than the general case. /// - /// Returns true if the reallocation attempt has succeeded, or false otherwise. + /// Returns `true` if the reallocation attempt has succeeded. /// /// # Panics /// @@ -515,7 +504,7 @@ impl RawVec { /// the requested space. This is not really unsafe, but the unsafe /// code *you* write that relies on the behavior of this function may break. /// - /// Returns true if the reallocation attempt has succeeded, or false otherwise. + /// Returns `true` if the reallocation attempt has succeeded. /// /// # Panics /// @@ -631,14 +620,14 @@ enum Fallibility { Infallible, } -use self::Fallibility::*; +use Fallibility::*; enum ReserveStrategy { Exact, Amortized, } -use self::ReserveStrategy::*; +use ReserveStrategy::*; impl RawVec { fn reserve_internal( @@ -649,7 +638,7 @@ impl RawVec { strategy: ReserveStrategy, ) -> Result<(), CollectionAllocErr> { unsafe { - use alloc::AllocErr; + use crate::alloc::AllocErr; // NOTE: we don't early branch on ZSTs here because we want this // to actually catch "asking for more than usize::MAX" in that case. @@ -743,7 +732,7 @@ unsafe impl<#[may_dangle] T, A: Alloc> Drop for RawVec { #[inline] fn alloc_guard(alloc_size: usize) -> Result<(), CollectionAllocErr> { - if mem::size_of::() < 8 && alloc_size > ::core::isize::MAX as usize { + if mem::size_of::() < 8 && alloc_size > core::isize::MAX as usize { Err(CapacityOverflow) } else { Ok(()) @@ -763,7 +752,7 @@ mod tests { #[test] fn allocator_param() { - use alloc::AllocErr; + use crate::alloc::AllocErr; // Writing a test of integration between third-party // allocators and RawVec is a little tricky because the RawVec diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 52ad30c411a10..68eecd97ea11a 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -1,15 +1,3 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(deprecated)] - //! Single-threaded reference-counting pointers. 'Rc' stands for 'Reference //! Counted'. //! @@ -239,7 +227,7 @@ #![stable(feature = "rust1", since = "1.0.0")] #[cfg(not(test))] -use boxed::Box; +use crate::boxed::Box; #[cfg(test)] use std::boxed::Box; @@ -250,19 +238,18 @@ use core::cmp::Ordering; use core::fmt; use core::hash::{Hash, Hasher}; use core::intrinsics::abort; -use core::marker; -use core::marker::{Unpin, Unsize, PhantomData}; +use core::marker::{self, Unpin, Unsize, PhantomData}; use core::mem::{self, align_of_val, forget, size_of_val}; -use core::ops::Deref; -use core::ops::{CoerceUnsized, DispatchFromDyn}; +use core::ops::{Deref, Receiver, CoerceUnsized, DispatchFromDyn}; use core::pin::Pin; use core::ptr::{self, NonNull}; +use core::slice::from_raw_parts_mut; use core::convert::From; use core::usize; -use alloc::{Global, Alloc, Layout, box_free, handle_alloc_error}; -use string::String; -use vec::Vec; +use crate::alloc::{Global, Alloc, Layout, box_free, handle_alloc_error}; +use crate::string::String; +use crate::vec::Vec; struct RcBox { strong: Cell, @@ -325,8 +312,10 @@ impl Rc { } } - #[unstable(feature = "pin", issue = "49150")] - pub fn pinned(value: T) -> Pin> { + /// Constructs a new `Pin>`. If `T` does not implement `Unpin`, then + /// `value` will be pinned in memory and unable to be moved. + #[stable(feature = "pin", since = "1.33.0")] + pub fn pin(value: T) -> Pin> { unsafe { Pin::new_unchecked(Rc::new(value)) } } @@ -443,6 +432,27 @@ impl Rc { } } + /// Consumes the `Rc`, returning the wrapped pointer as `NonNull`. + /// + /// # Examples + /// + /// ``` + /// #![feature(rc_into_raw_non_null)] + /// + /// use std::rc::Rc; + /// + /// let x = Rc::new(10); + /// let ptr = Rc::into_raw_non_null(x); + /// let deref = unsafe { *ptr.as_ref() }; + /// assert_eq!(deref, 10); + /// ``` + #[unstable(feature = "rc_into_raw_non_null", issue = "47336")] + #[inline] + pub fn into_raw_non_null(this: Self) -> NonNull { + // safe because Rc guarantees its pointer is non-null + unsafe { NonNull::new_unchecked(Rc::into_raw(this) as *mut _) } + } + /// Creates a new [`Weak`][weak] pointer to this value. /// /// [weak]: struct.Weak.html @@ -502,7 +512,7 @@ impl Rc { this.strong() } - /// Returns true if there are no other `Rc` or [`Weak`][weak] pointers to + /// Returns `true` if there are no other `Rc` or [`Weak`][weak] pointers to /// this inner value. /// /// [weak]: struct.Weak.html @@ -551,7 +561,7 @@ impl Rc { #[inline] #[stable(feature = "ptr_eq", since = "1.17.0")] - /// Returns true if the two `Rc`s point to the same value (not + /// Returns `true` if the two `Rc`s point to the same value (not /// just values that compare as equal). /// /// # Examples @@ -755,8 +765,6 @@ impl RcFromSlice for Rc<[T]> { impl Drop for Guard { fn drop(&mut self) { - use core::slice::from_raw_parts_mut; - unsafe { let slice = from_raw_parts_mut(self.elems, self.n_elems); ptr::drop_in_place(slice); @@ -813,6 +821,9 @@ impl Deref for Rc { } } +#[unstable(feature = "receiver_trait", issue = "0")] +impl Receiver for Rc {} + #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<#[may_dangle] T: ?Sized> Drop for Rc { /// Drops the `Rc`. @@ -840,6 +851,8 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Rc { /// drop(foo); // Doesn't print anything /// drop(foo2); // Prints "dropped!" /// ``` + /// + /// [`Weak`]: ../../std/rc/struct.Weak.html fn drop(&mut self) { unsafe { self.dec_strong(); @@ -900,12 +913,47 @@ impl Default for Rc { } } +#[stable(feature = "rust1", since = "1.0.0")] +trait RcEqIdent { + fn eq(&self, other: &Rc) -> bool; + fn ne(&self, other: &Rc) -> bool; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl RcEqIdent for Rc { + #[inline] + default fn eq(&self, other: &Rc) -> bool { + **self == **other + } + + #[inline] + default fn ne(&self, other: &Rc) -> bool { + **self != **other + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl RcEqIdent for Rc { + #[inline] + fn eq(&self, other: &Rc) -> bool { + Rc::ptr_eq(self, other) || **self == **other + } + + #[inline] + fn ne(&self, other: &Rc) -> bool { + !Rc::ptr_eq(self, other) && **self != **other + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for Rc { /// Equality for two `Rc`s. /// /// Two `Rc`s are equal if their inner values are equal. /// + /// If `T` also implements `Eq`, two `Rc`s that point to the same value are + /// always equal. + /// /// # Examples /// /// ``` @@ -915,15 +963,18 @@ impl PartialEq for Rc { /// /// assert!(five == Rc::new(5)); /// ``` - #[inline(always)] + #[inline] fn eq(&self, other: &Rc) -> bool { - **self == **other + RcEqIdent::eq(self, other) } /// Inequality for two `Rc`s. /// /// Two `Rc`s are unequal if their inner values are unequal. /// + /// If `T` also implements `Eq`, two `Rc`s that point to the same value are + /// never unequal. + /// /// # Examples /// /// ``` @@ -933,9 +984,9 @@ impl PartialEq for Rc { /// /// assert!(five != Rc::new(6)); /// ``` - #[inline(always)] + #[inline] fn ne(&self, other: &Rc) -> bool { - **self != **other + RcEqIdent::ne(self, other) } } @@ -1067,21 +1118,21 @@ impl Hash for Rc { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for Rc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for Rc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Pointer for Rc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Pointer::fmt(&(&**self as *const T), f) } } @@ -1094,7 +1145,7 @@ impl From for Rc { } #[stable(feature = "shared_from_slice", since = "1.21.0")] -impl<'a, T: Clone> From<&'a [T]> for Rc<[T]> { +impl From<&[T]> for Rc<[T]> { #[inline] fn from(v: &[T]) -> Rc<[T]> { >::from_slice(v) @@ -1102,7 +1153,7 @@ impl<'a, T: Clone> From<&'a [T]> for Rc<[T]> { } #[stable(feature = "shared_from_slice", since = "1.21.0")] -impl<'a> From<&'a str> for Rc { +impl From<&str> for Rc { #[inline] fn from(v: &str) -> Rc { let rc = Rc::<[u8]>::from(v.as_bytes()); @@ -1251,8 +1302,40 @@ impl Weak { } } - /// Return `None` when the pointer is dangling and there is no allocated `RcBox`, - /// i.e., this `Weak` was created by `Weak::new` + /// Gets the number of strong (`Rc`) pointers pointing to this value. + /// + /// If `self` was created using [`Weak::new`], this will return 0. + /// + /// [`Weak::new`]: #method.new + #[unstable(feature = "weak_counts", issue = "57977")] + pub fn strong_count(&self) -> usize { + if let Some(inner) = self.inner() { + inner.strong() + } else { + 0 + } + } + + /// Gets the number of `Weak` pointers pointing to this value. + /// + /// If `self` was created using [`Weak::new`], this will return `None`. If + /// not, the returned value is at least 1, since `self` still points to the + /// value. + /// + /// [`Weak::new`]: #method.new + #[unstable(feature = "weak_counts", issue = "57977")] + pub fn weak_count(&self) -> Option { + self.inner().map(|inner| { + if inner.strong() > 0 { + inner.weak() - 1 // subtract the implicit weak ptr + } else { + inner.weak() + } + }) + } + + /// Returns `None` when the pointer is dangling and there is no allocated `RcBox` + /// (i.e., when this `Weak` was created by `Weak::new`). #[inline] fn inner(&self) -> Option<&RcBox> { if is_dangling(self.ptr) { @@ -1262,7 +1345,7 @@ impl Weak { } } - /// Returns true if the two `Weak`s point to the same value (not just values + /// Returns `true` if the two `Weak`s point to the same value (not just values /// that compare as equal). /// /// # Notes @@ -1373,7 +1456,7 @@ impl Clone for Weak { #[stable(feature = "rc_weak", since = "1.4.0")] impl fmt::Debug for Weak { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "(Weak)") } } @@ -1381,9 +1464,10 @@ impl fmt::Debug for Weak { #[stable(feature = "downgraded_weak", since = "1.10.0")] impl Default for Weak { /// Constructs a new `Weak`, allocating memory for `T` without initializing - /// it. Calling [`upgrade`][Weak::upgrade] on the return value always gives [`None`]. + /// it. Calling [`upgrade`] on the return value always gives [`None`]. /// /// [`None`]: ../../std/option/enum.Option.html + /// [`upgrade`]: ../../std/rc/struct.Weak.html#method.upgrade /// /// # Examples /// @@ -1477,8 +1561,7 @@ mod tests { use super::{Rc, Weak}; use std::boxed::Box; use std::cell::RefCell; - use std::option::Option; - use std::option::Option::{None, Some}; + use std::option::Option::{self, None, Some}; use std::result::Result::{Err, Ok}; use std::mem::drop; use std::clone::Clone; @@ -1588,6 +1671,33 @@ mod tests { drop(c); } + #[test] + fn weak_counts() { + assert_eq!(Weak::weak_count(&Weak::::new()), None); + assert_eq!(Weak::strong_count(&Weak::::new()), 0); + + let a = Rc::new(0); + let w = Rc::downgrade(&a); + assert_eq!(Weak::strong_count(&w), 1); + assert_eq!(Weak::weak_count(&w), Some(1)); + let w2 = w.clone(); + assert_eq!(Weak::strong_count(&w), 1); + assert_eq!(Weak::weak_count(&w), Some(2)); + assert_eq!(Weak::strong_count(&w2), 1); + assert_eq!(Weak::weak_count(&w2), Some(2)); + drop(w); + assert_eq!(Weak::strong_count(&w2), 1); + assert_eq!(Weak::weak_count(&w2), Some(1)); + let a2 = a.clone(); + assert_eq!(Weak::strong_count(&w2), 2); + assert_eq!(Weak::weak_count(&w2), Some(1)); + drop(a2); + drop(a); + assert_eq!(Weak::strong_count(&w2), 0); + assert_eq!(Weak::weak_count(&w2), Some(1)); + drop(w2); + } + #[test] fn try_unwrap() { let x = Rc::new(3); @@ -1890,5 +2000,5 @@ impl AsRef for Rc { } } -#[unstable(feature = "pin", issue = "49150")] +#[stable(feature = "pin", since = "1.33.0")] impl Unpin for Rc { } diff --git a/src/liballoc/slice.rs b/src/liballoc/slice.rs index 510b4b06e407c..f4b2d463778a9 100644 --- a/src/liballoc/slice.rs +++ b/src/liballoc/slice.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A dynamically-sized view into a contiguous sequence, `[T]`. //! //! *[See also the slice primitive type](../../std/primitive.slice.html).* @@ -97,15 +87,15 @@ // It's cleaner to just turn off the unused_imports warning than to fix them. #![cfg_attr(test, allow(unused_imports, dead_code))] +use core::borrow::{Borrow, BorrowMut}; use core::cmp::Ordering::{self, Less}; -use core::mem::size_of; -use core::mem; +use core::mem::{self, size_of}; use core::ptr; use core::{u8, u16, u32}; -use borrow::{Borrow, BorrowMut, ToOwned}; -use boxed::Box; -use vec::Vec; +use crate::borrow::ToOwned; +use crate::boxed::Box; +use crate::vec::Vec; #[stable(feature = "rust1", since = "1.0.0")] pub use core::slice::{Chunks, Windows}; @@ -135,24 +125,24 @@ pub use core::slice::{RChunks, RChunksMut, RChunksExact, RChunksExactMut}; // HACK(japaric) needed for the implementation of `vec!` macro during testing // NB see the hack module in this file for more details #[cfg(test)] -pub use self::hack::into_vec; +pub use hack::into_vec; // HACK(japaric) needed for the implementation of `Vec::clone` during testing // NB see the hack module in this file for more details #[cfg(test)] -pub use self::hack::to_vec; +pub use hack::to_vec; // HACK(japaric): With cfg(test) `impl [T]` is not available, these three // functions are actually methods that are in `impl [T]` but not in // `core::slice::SliceExt` - we need to supply these functions for the // `test_permutations` test mod hack { - use boxed::Box; use core::mem; + use crate::boxed::Box; + use crate::vec::Vec; #[cfg(test)] - use string::ToString; - use vec::Vec; + use crate::string::ToString; pub fn into_vec(mut b: Box<[T]>) -> Vec { unsafe { @@ -215,10 +205,10 @@ impl [T] { /// /// The comparator function must define a total ordering for the elements in the slice. If /// the ordering is not total, the order of the elements is unspecified. An order is a - /// total order if it is (for all a, b and c): + /// total order if it is (for all `a`, `b` and `c`): /// - /// * total and antisymmetric: exactly one of a < b, a == b or a > b is true; and - /// * transitive, a < b and b < c implies a < c. The same must hold for both == and >. + /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and + /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`. /// /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`. @@ -267,6 +257,10 @@ impl [T] { /// This sort is stable (i.e., does not reorder equal elements) and `O(m n log(m n))` /// worst-case, where the key function is `O(m)`. /// + /// For expensive key functions (e.g. functions that are not simple property accesses or + /// basic operations), [`sort_by_cached_key`](#method.sort_by_cached_key) is likely to be + /// significantly faster, as it does not recompute element keys. + /// /// When applicable, unstable sorting is preferred because it is generally faster than stable /// sorting and it doesn't allocate auxiliary memory. /// See [`sort_unstable_by_key`](#method.sort_unstable_by_key). @@ -322,7 +316,6 @@ impl [T] { /// # Examples /// /// ``` - /// #![feature(slice_sort_by_cached_key)] /// let mut v = [-5i32, 4, 32, -3, 2]; /// /// v.sort_by_cached_key(|k| k.to_string()); @@ -330,7 +323,7 @@ impl [T] { /// ``` /// /// [pdqsort]: https://github.com/orlp/pdqsort - #[unstable(feature = "slice_sort_by_cached_key", issue = "34447")] + #[stable(feature = "slice_sort_by_cached_key", since = "1.34.0")] #[inline] pub fn sort_by_cached_key(&mut self, f: F) where F: FnMut(&T) -> K, K: Ord diff --git a/src/liballoc/str.rs b/src/liballoc/str.rs index 9916fa639e1d4..a36804bddff32 100644 --- a/src/liballoc/str.rs +++ b/src/liballoc/str.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Unicode string slices. //! //! *[See also the `str` primitive type](../../std/primitive.str.html).* @@ -38,20 +28,18 @@ // It's cleaner to just turn off the unused_imports warning than to fix them. #![allow(unused_imports)] -use core::fmt; -use core::str as core_str; -use core::str::pattern::Pattern; -use core::str::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher}; +use core::borrow::Borrow; +use core::str::pattern::{Pattern, Searcher, ReverseSearcher, DoubleEndedSearcher}; use core::mem; use core::ptr; use core::iter::FusedIterator; use core::unicode::conversions; -use borrow::{Borrow, ToOwned}; -use boxed::Box; -use slice::{SliceConcatExt, SliceIndex}; -use string::String; -use vec::Vec; +use crate::borrow::ToOwned; +use crate::boxed::Box; +use crate::slice::{SliceConcatExt, SliceIndex}; +use crate::string::String; +use crate::vec::Vec; #[stable(feature = "rust1", since = "1.0.0")] pub use core::str::{FromStr, Utf8Error}; @@ -78,7 +66,7 @@ pub use core::str::SplitWhitespace; pub use core::str::pattern; #[stable(feature = "encode_utf16", since = "1.8.0")] pub use core::str::EncodeUtf16; -#[unstable(feature = "split_ascii_whitespace", issue = "48656")] +#[stable(feature = "split_ascii_whitespace", since = "1.34.0")] pub use core::str::SplitAsciiWhitespace; #[unstable(feature = "slice_concat_ext", @@ -453,45 +441,6 @@ impl str { return s; } - /// Escapes each char in `s` with [`char::escape_debug`]. - /// - /// Note: only extended grapheme codepoints that begin the string will be - /// escaped. - /// - /// [`char::escape_debug`]: primitive.char.html#method.escape_debug - #[unstable(feature = "str_escape", - reason = "return type may change to be an iterator", - issue = "27791")] - pub fn escape_debug(&self) -> String { - let mut string = String::with_capacity(self.len()); - let mut chars = self.chars(); - if let Some(first) = chars.next() { - string.extend(first.escape_debug_ext(true)) - } - string.extend(chars.flat_map(|c| c.escape_debug_ext(false))); - string - } - - /// Escapes each char in `s` with [`char::escape_default`]. - /// - /// [`char::escape_default`]: primitive.char.html#method.escape_default - #[unstable(feature = "str_escape", - reason = "return type may change to be an iterator", - issue = "27791")] - pub fn escape_default(&self) -> String { - self.chars().flat_map(|c| c.escape_default()).collect() - } - - /// Escapes each char in `s` with [`char::escape_unicode`]. - /// - /// [`char::escape_unicode`]: primitive.char.html#method.escape_unicode - #[unstable(feature = "str_escape", - reason = "return type may change to be an iterator", - issue = "27791")] - pub fn escape_unicode(&self) -> String { - self.chars().flat_map(|c| c.escape_unicode()).collect() - } - /// Converts a [`Box`] into a [`String`] without copying or allocating. /// /// [`String`]: string/struct.String.html @@ -622,3 +571,4 @@ impl str { pub unsafe fn from_boxed_utf8_unchecked(v: Box<[u8]>) -> Box { Box::from_raw(Box::into_raw(v) as *mut str) } + diff --git a/src/liballoc/string.rs b/src/liballoc/string.rs index 4652c0e7efa70..a3e2098695f70 100644 --- a/src/liballoc/string.rs +++ b/src/liballoc/string.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A UTF-8 encoded, growable string. //! //! This module contains the [`String`] type, a trait for converting @@ -60,17 +50,16 @@ use core::char::{decode_utf16, REPLACEMENT_CHARACTER}; use core::fmt; use core::hash; use core::iter::{FromIterator, FusedIterator}; -use core::ops::Bound::{Excluded, Included, Unbounded}; use core::ops::{self, Add, AddAssign, Index, IndexMut, RangeBounds}; +use core::ops::Bound::{Excluded, Included, Unbounded}; use core::ptr; -use core::str::pattern::Pattern; -use core::str::lossy; +use core::str::{pattern::Pattern, lossy}; -use collections::CollectionAllocErr; -use borrow::{Cow, ToOwned}; -use boxed::Box; -use str::{self, from_boxed_utf8_unchecked, FromStr, Utf8Error, Chars}; -use vec::Vec; +use crate::borrow::{Cow, ToOwned}; +use crate::collections::CollectionAllocErr; +use crate::boxed::Box; +use crate::str::{self, from_boxed_utf8_unchecked, FromStr, Utf8Error, Chars}; +use crate::vec::Vec; /// A UTF-8 encoded, growable string. /// @@ -497,7 +486,7 @@ impl String { /// [`str::from_utf8`]: ../../std/str/fn.from_utf8.html /// [`as_bytes`]: struct.String.html#method.as_bytes /// [`FromUtf8Error`]: struct.FromUtf8Error.html - /// [`Err`]: ../../stdresult/enum.Result.html#variant.Err + /// [`Err`]: ../../std/result/enum.Result.html#variant.Err #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn from_utf8(vec: Vec) -> Result { @@ -974,7 +963,7 @@ impl String { /// Does nothing if the capacity is already sufficient. /// /// Note that the allocator may give the collection more space than it - /// requests. Therefore capacity can not be relied upon to be precisely + /// requests. Therefore, capacity can not be relied upon to be precisely /// minimal. Prefer `reserve` if future insertions are expected. /// /// # Errors @@ -1388,9 +1377,7 @@ impl String { self.vec.len() } - /// Returns `true` if this `String` has a length of zero. - /// - /// Returns `false` otherwise. + /// Returns `true` if this `String` has a length of zero, and `false` otherwise. /// /// # Examples /// @@ -1495,7 +1482,7 @@ impl String { /// assert_eq!(s, ""); /// ``` #[stable(feature = "drain", since = "1.6.0")] - pub fn drain(&mut self, range: R) -> Drain + pub fn drain(&mut self, range: R) -> Drain<'_> where R: RangeBounds { // Memory safety @@ -1679,14 +1666,14 @@ impl FromUtf8Error { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for FromUtf8Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&self.error, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for FromUtf16Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt("invalid utf-16: lone surrogate found", f) } } @@ -1877,7 +1864,7 @@ impl Default for String { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for String { #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&**self, f) } } @@ -1885,7 +1872,7 @@ impl fmt::Display for String { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for String { #[inline] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } @@ -1936,7 +1923,7 @@ impl hash::Hash for String { /// let c = a.to_string() + b; /// ``` #[stable(feature = "rust1", since = "1.0.0")] -impl<'a> Add<&'a str> for String { +impl Add<&str> for String { type Output = String; #[inline] @@ -1950,7 +1937,7 @@ impl<'a> Add<&'a str> for String { /// /// This has the same behavior as the [`push_str`][String::push_str] method. #[stable(feature = "stringaddassign", since = "1.12.0")] -impl<'a> AddAssign<&'a str> for String { +impl AddAssign<&str> for String { #[inline] fn add_assign(&mut self, other: &str) { self.push_str(other); @@ -2086,48 +2073,17 @@ impl ops::DerefMut for String { /// [`String`]: struct.String.html /// [`from_str`]: ../../std/str/trait.FromStr.html#tymethod.from_str #[stable(feature = "str_parse_error", since = "1.5.0")] -#[derive(Copy)] -pub enum ParseError {} +pub type ParseError = core::convert::Infallible; #[stable(feature = "rust1", since = "1.0.0")] impl FromStr for String { - type Err = ParseError; + type Err = core::convert::Infallible; #[inline] fn from_str(s: &str) -> Result { Ok(String::from(s)) } } -#[stable(feature = "str_parse_error", since = "1.5.0")] -impl Clone for ParseError { - fn clone(&self) -> ParseError { - match *self {} - } -} - -#[stable(feature = "str_parse_error", since = "1.5.0")] -impl fmt::Debug for ParseError { - fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { - match *self {} - } -} - -#[stable(feature = "str_parse_error2", since = "1.8.0")] -impl fmt::Display for ParseError { - fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { - match *self {} - } -} - -#[stable(feature = "str_parse_error", since = "1.5.0")] -impl PartialEq for ParseError { - fn eq(&self, _: &ParseError) -> bool { - match *self {} - } -} - -#[stable(feature = "str_parse_error", since = "1.5.0")] -impl Eq for ParseError {} /// A trait for converting a value to a `String`. /// @@ -2166,7 +2122,7 @@ pub trait ToString { impl ToString for T { #[inline] default fn to_string(&self) -> String { - use core::fmt::Write; + use fmt::Write; let mut buf = String::new(); buf.write_fmt(format_args!("{}", self)) .expect("a Display implementation returned an error unexpectedly"); @@ -2184,7 +2140,7 @@ impl ToString for str { } #[stable(feature = "cow_str_to_string_specialization", since = "1.17.0")] -impl<'a> ToString for Cow<'a, str> { +impl ToString for Cow<'_, str> { #[inline] fn to_string(&self) -> String { self[..].to_owned() @@ -2216,9 +2172,9 @@ impl AsRef<[u8]> for String { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a> From<&'a str> for String { +impl From<&str> for String { #[inline] - fn from(s: &'a str) -> String { + fn from(s: &str) -> String { s.to_owned() } } @@ -2374,19 +2330,19 @@ pub struct Drain<'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a> fmt::Debug for Drain<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for Drain<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad("Drain { .. }") } } #[stable(feature = "drain", since = "1.6.0")] -unsafe impl<'a> Sync for Drain<'a> {} +unsafe impl Sync for Drain<'_> {} #[stable(feature = "drain", since = "1.6.0")] -unsafe impl<'a> Send for Drain<'a> {} +unsafe impl Send for Drain<'_> {} #[stable(feature = "drain", since = "1.6.0")] -impl<'a> Drop for Drain<'a> { +impl Drop for Drain<'_> { fn drop(&mut self) { unsafe { // Use Vec::drain. "Reaffirm" the bounds checks to avoid @@ -2400,7 +2356,7 @@ impl<'a> Drop for Drain<'a> { } #[stable(feature = "drain", since = "1.6.0")] -impl<'a> Iterator for Drain<'a> { +impl Iterator for Drain<'_> { type Item = char; #[inline] @@ -2414,7 +2370,7 @@ impl<'a> Iterator for Drain<'a> { } #[stable(feature = "drain", since = "1.6.0")] -impl<'a> DoubleEndedIterator for Drain<'a> { +impl DoubleEndedIterator for Drain<'_> { #[inline] fn next_back(&mut self) -> Option { self.iter.next_back() @@ -2422,4 +2378,4 @@ impl<'a> DoubleEndedIterator for Drain<'a> { } #[stable(feature = "fused", since = "1.26.0")] -impl<'a> FusedIterator for Drain<'a> {} +impl FusedIterator for Drain<'_> {} diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs index 111459d12a4df..b7d7995b540ba 100644 --- a/src/liballoc/sync.rs +++ b/src/liballoc/sync.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![stable(feature = "rust1", since = "1.0.0")] //! Thread-safe reference-counting pointers. @@ -21,23 +11,23 @@ use core::sync::atomic; use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; use core::borrow; use core::fmt; -use core::cmp::Ordering; +use core::cmp::{self, Ordering}; use core::intrinsics::abort; use core::mem::{self, align_of_val, size_of_val}; -use core::ops::Deref; -use core::ops::{CoerceUnsized, DispatchFromDyn}; +use core::ops::{Deref, Receiver, CoerceUnsized, DispatchFromDyn}; use core::pin::Pin; use core::ptr::{self, NonNull}; use core::marker::{Unpin, Unsize, PhantomData}; use core::hash::{Hash, Hasher}; use core::{isize, usize}; use core::convert::From; +use core::slice::from_raw_parts_mut; -use alloc::{Global, Alloc, Layout, box_free, handle_alloc_error}; -use boxed::Box; -use rc::is_dangling; -use string::String; -use vec::Vec; +use crate::alloc::{Global, Alloc, Layout, box_free, handle_alloc_error}; +use crate::boxed::Box; +use crate::rc::is_dangling; +use crate::string::String; +use crate::vec::Vec; /// A soft limit on the amount of references that may be made to an `Arc`. /// @@ -261,7 +251,7 @@ impl, U: ?Sized> DispatchFromDyn> for Weak {} #[stable(feature = "arc_weak", since = "1.4.0")] impl fmt::Debug for Weak { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "(Weak)") } } @@ -303,8 +293,10 @@ impl Arc { Arc { ptr: Box::into_raw_non_null(x), phantom: PhantomData } } - #[unstable(feature = "pin", issue = "49150")] - pub fn pinned(data: T) -> Pin> { + /// Constructs a new `Pin>`. If `T` does not implement `Unpin`, then + /// `data` will be pinned in memory and unable to be moved. + #[stable(feature = "pin", since = "1.33.0")] + pub fn pin(data: T) -> Pin> { unsafe { Pin::new_unchecked(Arc::new(data)) } } @@ -421,6 +413,27 @@ impl Arc { } } + /// Consumes the `Arc`, returning the wrapped pointer as `NonNull`. + /// + /// # Examples + /// + /// ``` + /// #![feature(rc_into_raw_non_null)] + /// + /// use std::sync::Arc; + /// + /// let x = Arc::new(10); + /// let ptr = Arc::into_raw_non_null(x); + /// let deref = unsafe { *ptr.as_ref() }; + /// assert_eq!(deref, 10); + /// ``` + #[unstable(feature = "rc_into_raw_non_null", issue = "47336")] + #[inline] + pub fn into_raw_non_null(this: Self) -> NonNull { + // safe because Arc guarantees its pointer is non-null + unsafe { NonNull::new_unchecked(Arc::into_raw(this) as *mut _) } + } + /// Creates a new [`Weak`][weak] pointer to this value. /// /// [weak]: struct.Weak.html @@ -547,7 +560,7 @@ impl Arc { #[inline] #[stable(feature = "ptr_eq", since = "1.17.0")] - /// Returns true if the two `Arc`s point to the same value (not + /// Returns `true` if the two `Arc`s point to the same value (not /// just values that compare as equal). /// /// # Examples @@ -659,8 +672,6 @@ impl ArcFromSlice for Arc<[T]> { impl Drop for Guard { fn drop(&mut self) { - use core::slice::from_raw_parts_mut; - unsafe { let slice = from_raw_parts_mut(self.elems, self.n_elems); ptr::drop_in_place(slice); @@ -767,6 +778,9 @@ impl Deref for Arc { } } +#[unstable(feature = "receiver_trait", issue = "0")] +impl Receiver for Arc {} + impl Arc { /// Makes a mutable reference into the given `Arc`. /// @@ -952,6 +966,8 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc { /// drop(foo); // Doesn't print anything /// drop(foo2); // Prints "dropped!" /// ``` + /// + /// [`Weak`]: ../../std/sync/struct.Weak.html #[inline] fn drop(&mut self) { // Because `fetch_sub` is already atomic, we do not need to synchronize @@ -1120,8 +1136,63 @@ impl Weak { } } - /// Return `None` when the pointer is dangling and there is no allocated `ArcInner`, - /// i.e., this `Weak` was created by `Weak::new` + /// Gets the number of strong (`Arc`) pointers pointing to this value. + /// + /// If `self` was created using [`Weak::new`], this will return 0. + /// + /// [`Weak::new`]: #method.new + #[unstable(feature = "weak_counts", issue = "57977")] + pub fn strong_count(&self) -> usize { + if let Some(inner) = self.inner() { + inner.strong.load(SeqCst) + } else { + 0 + } + } + + /// Gets an approximation of the number of `Weak` pointers pointing to this + /// value. + /// + /// If `self` was created using [`Weak::new`], this will return 0. If not, + /// the returned value is at least 1, since `self` still points to the + /// value. + /// + /// # Accuracy + /// + /// Due to implementation details, the returned value can be off by 1 in + /// either direction when other threads are manipulating any `Arc`s or + /// `Weak`s pointing to the same value. + /// + /// [`Weak::new`]: #method.new + #[unstable(feature = "weak_counts", issue = "57977")] + pub fn weak_count(&self) -> Option { + // Due to the implicit weak pointer added when any strong pointers are + // around, we cannot implement `weak_count` correctly since it + // necessarily requires accessing the strong count and weak count in an + // unsynchronized fashion. So this version is a bit racy. + self.inner().map(|inner| { + let strong = inner.strong.load(SeqCst); + let weak = inner.weak.load(SeqCst); + if strong == 0 { + // If the last `Arc` has *just* been dropped, it might not yet + // have removed the implicit weak count, so the value we get + // here might be 1 too high. + weak + } else { + // As long as there's still at least 1 `Arc` around, subtract + // the implicit weak pointer. + // Note that the last `Arc` might get dropped between the 2 + // loads we do above, removing the implicit weak pointer. This + // means that the value might be 1 too low here. In order to not + // return 0 here (which would happen if we're the only weak + // pointer), we guard against that specifically. + cmp::max(1, weak - 1) + } + }) + } + + /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`, + /// (i.e., when this `Weak` was created by `Weak::new`). #[inline] fn inner(&self) -> Option<&ArcInner> { if is_dangling(self.ptr) { @@ -1131,7 +1202,7 @@ impl Weak { } } - /// Returns true if the two `Weak`s point to the same value (not just values + /// Returns `true` if the two `Weak`s point to the same value (not just values /// that compare as equal). /// /// # Notes @@ -1219,10 +1290,11 @@ impl Clone for Weak { #[stable(feature = "downgraded_weak", since = "1.10.0")] impl Default for Weak { /// Constructs a new `Weak`, without allocating memory. - /// Calling [`upgrade`][Weak::upgrade] on the return value always + /// Calling [`upgrade`] on the return value always /// gives [`None`]. /// /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// [`upgrade`]: ../../std/sync/struct.Weak.html#method.upgrade /// /// # Examples /// @@ -1287,12 +1359,46 @@ impl Drop for Weak { } } +#[stable(feature = "rust1", since = "1.0.0")] +trait ArcEqIdent { + fn eq(&self, other: &Arc) -> bool; + fn ne(&self, other: &Arc) -> bool; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ArcEqIdent for Arc { + #[inline] + default fn eq(&self, other: &Arc) -> bool { + **self == **other + } + #[inline] + default fn ne(&self, other: &Arc) -> bool { + **self != **other + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ArcEqIdent for Arc { + #[inline] + fn eq(&self, other: &Arc) -> bool { + Arc::ptr_eq(self, other) || **self == **other + } + + #[inline] + fn ne(&self, other: &Arc) -> bool { + !Arc::ptr_eq(self, other) && **self != **other + } +} + #[stable(feature = "rust1", since = "1.0.0")] impl PartialEq for Arc { /// Equality for two `Arc`s. /// /// Two `Arc`s are equal if their inner values are equal. /// + /// If `T` also implements `Eq`, two `Arc`s that point to the same value are + /// always equal. + /// /// # Examples /// /// ``` @@ -1302,14 +1408,18 @@ impl PartialEq for Arc { /// /// assert!(five == Arc::new(5)); /// ``` + #[inline] fn eq(&self, other: &Arc) -> bool { - *(*self) == *(*other) + ArcEqIdent::eq(self, other) } /// Inequality for two `Arc`s. /// /// Two `Arc`s are unequal if their inner values are unequal. /// + /// If `T` also implements `Eq`, two `Arc`s that point to the same value are + /// never unequal. + /// /// # Examples /// /// ``` @@ -1319,10 +1429,12 @@ impl PartialEq for Arc { /// /// assert!(five != Arc::new(6)); /// ``` + #[inline] fn ne(&self, other: &Arc) -> bool { - *(*self) != *(*other) + ArcEqIdent::ne(self, other) } } + #[stable(feature = "rust1", since = "1.0.0")] impl PartialOrd for Arc { /// Partial comparison for two `Arc`s. @@ -1436,21 +1548,21 @@ impl Eq for Arc {} #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Display for Arc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for Arc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Pointer for Arc { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Pointer::fmt(&(&**self as *const T), f) } } @@ -1487,7 +1599,7 @@ impl From for Arc { } #[stable(feature = "shared_from_slice", since = "1.21.0")] -impl<'a, T: Clone> From<&'a [T]> for Arc<[T]> { +impl From<&[T]> for Arc<[T]> { #[inline] fn from(v: &[T]) -> Arc<[T]> { >::from_slice(v) @@ -1495,7 +1607,7 @@ impl<'a, T: Clone> From<&'a [T]> for Arc<[T]> { } #[stable(feature = "shared_from_slice", since = "1.21.0")] -impl<'a> From<&'a str> for Arc { +impl From<&str> for Arc { #[inline] fn from(v: &str) -> Arc { let arc = Arc::<[u8]>::from(v.as_bytes()); @@ -1541,16 +1653,14 @@ mod tests { use std::sync::mpsc::channel; use std::mem::drop; use std::ops::Drop; - use std::option::Option; - use std::option::Option::{None, Some}; - use std::sync::atomic; - use std::sync::atomic::Ordering::{Acquire, SeqCst}; + use std::option::Option::{self, None, Some}; + use std::sync::atomic::{self, Ordering::{Acquire, SeqCst}}; use std::thread; use std::sync::Mutex; use std::convert::From; use super::{Arc, Weak}; - use vec::Vec; + use crate::vec::Vec; struct Canary(*mut atomic::AtomicUsize); @@ -1598,6 +1708,33 @@ mod tests { assert!(Arc::get_mut(&mut x).is_none()); } + #[test] + fn weak_counts() { + assert_eq!(Weak::weak_count(&Weak::::new()), None); + assert_eq!(Weak::strong_count(&Weak::::new()), 0); + + let a = Arc::new(0); + let w = Arc::downgrade(&a); + assert_eq!(Weak::strong_count(&w), 1); + assert_eq!(Weak::weak_count(&w), Some(1)); + let w2 = w.clone(); + assert_eq!(Weak::strong_count(&w), 1); + assert_eq!(Weak::weak_count(&w), Some(2)); + assert_eq!(Weak::strong_count(&w2), 1); + assert_eq!(Weak::weak_count(&w2), Some(2)); + drop(w); + assert_eq!(Weak::strong_count(&w2), 1); + assert_eq!(Weak::weak_count(&w2), Some(1)); + let a2 = a.clone(); + assert_eq!(Weak::strong_count(&w2), 2); + assert_eq!(Weak::weak_count(&w2), Some(1)); + drop(a2); + drop(a); + assert_eq!(Weak::strong_count(&w2), 0); + assert_eq!(Weak::weak_count(&w2), Some(1)); + drop(w2); + } + #[test] fn try_unwrap() { let x = Arc::new(3); @@ -2004,5 +2141,5 @@ impl AsRef for Arc { } } -#[unstable(feature = "pin", issue = "49150")] +#[stable(feature = "pin", since = "1.33.0")] impl Unpin for Arc { } diff --git a/src/liballoc/task.rs b/src/liballoc/task.rs deleted file mode 100644 index 7a4eda21a601a..0000000000000 --- a/src/liballoc/task.rs +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Types and Traits for working with asynchronous tasks. - -pub use core::task::*; - -#[cfg(all(target_has_atomic = "ptr", target_has_atomic = "cas"))] -pub use self::if_arc::*; - -#[cfg(all(target_has_atomic = "ptr", target_has_atomic = "cas"))] -mod if_arc { - use super::*; - use core::marker::PhantomData; - use core::mem; - use core::ptr::{self, NonNull}; - use sync::Arc; - - /// A way of waking up a specific task. - /// - /// Any task executor must provide a way of signaling that a task it owns - /// is ready to be `poll`ed again. Executors do so by implementing this trait. - pub trait Wake: Send + Sync { - /// Indicates that the associated task is ready to make progress and should - /// be `poll`ed. - /// - /// Executors generally maintain a queue of "ready" tasks; `wake` should place - /// the associated task onto this queue. - fn wake(arc_self: &Arc); - - /// Indicates that the associated task is ready to make progress and should - /// be `poll`ed. This function is like `wake`, but can only be called from the - /// thread on which this `Wake` was created. - /// - /// Executors generally maintain a queue of "ready" tasks; `wake_local` should place - /// the associated task onto this queue. - #[inline] - unsafe fn wake_local(arc_self: &Arc) { - Self::wake(arc_self); - } - } - - #[cfg(all(target_has_atomic = "ptr", target_has_atomic = "cas"))] - struct ArcWrapped(PhantomData); - - unsafe impl UnsafeWake for ArcWrapped { - #[inline] - unsafe fn clone_raw(&self) -> Waker { - let me: *const ArcWrapped = self; - let arc = (*(&me as *const *const ArcWrapped as *const Arc)).clone(); - Waker::from(arc) - } - - #[inline] - unsafe fn drop_raw(&self) { - let mut me: *const ArcWrapped = self; - let me = &mut me as *mut *const ArcWrapped as *mut Arc; - ptr::drop_in_place(me); - } - - #[inline] - unsafe fn wake(&self) { - let me: *const ArcWrapped = self; - T::wake(&*(&me as *const *const ArcWrapped as *const Arc)) - } - - #[inline] - unsafe fn wake_local(&self) { - let me: *const ArcWrapped = self; - T::wake_local(&*(&me as *const *const ArcWrapped as *const Arc)) - } - } - - impl From> for Waker - where T: Wake + 'static, - { - fn from(rc: Arc) -> Self { - unsafe { - let ptr = mem::transmute::, NonNull>>(rc); - Waker::new(ptr) - } - } - } - - /// Creates a `LocalWaker` from a local `wake`. - /// - /// This function requires that `wake` is "local" (created on the current thread). - /// The resulting `LocalWaker` will call `wake.wake_local()` when awoken, and - /// will call `wake.wake()` if awoken after being converted to a `Waker`. - #[inline] - pub unsafe fn local_waker(wake: Arc) -> LocalWaker { - let ptr = mem::transmute::, NonNull>>(wake); - LocalWaker::new(ptr) - } - - struct NonLocalAsLocal(ArcWrapped); - - unsafe impl UnsafeWake for NonLocalAsLocal { - #[inline] - unsafe fn clone_raw(&self) -> Waker { - self.0.clone_raw() - } - - #[inline] - unsafe fn drop_raw(&self) { - self.0.drop_raw() - } - - #[inline] - unsafe fn wake(&self) { - self.0.wake() - } - - #[inline] - unsafe fn wake_local(&self) { - // Since we're nonlocal, we can't call wake_local - self.0.wake() - } - } - - /// Creates a `LocalWaker` from a non-local `wake`. - /// - /// This function is similar to `local_waker`, but does not require that `wake` - /// is local to the current thread. The resulting `LocalWaker` will call - /// `wake.wake()` when awoken. - #[inline] - pub fn local_waker_from_nonlocal(wake: Arc) -> LocalWaker { - unsafe { - let ptr = mem::transmute::, NonNull>>(wake); - LocalWaker::new(ptr) - } - } -} diff --git a/src/liballoc/tests/arc.rs b/src/liballoc/tests/arc.rs index d90c22a3b1892..2759b1b1cac27 100644 --- a/src/liballoc/tests/arc.rs +++ b/src/liballoc/tests/arc.rs @@ -1,15 +1,7 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::any::Any; use std::sync::{Arc, Weak}; +use std::cell::RefCell; +use std::cmp::PartialEq; #[test] fn uninhabited() { @@ -53,3 +45,43 @@ fn trait_object() { b = b.clone(); assert!(b.upgrade().is_none()); } + +#[test] +fn float_nan_ne() { + let x = Arc::new(std::f32::NAN); + assert!(x != x); + assert!(!(x == x)); +} + +#[test] +fn partial_eq() { + struct TestPEq (RefCell); + impl PartialEq for TestPEq { + fn eq(&self, other: &TestPEq) -> bool { + *self.0.borrow_mut() += 1; + *other.0.borrow_mut() += 1; + true + } + } + let x = Arc::new(TestPEq(RefCell::new(0))); + assert!(x == x); + assert!(!(x != x)); + assert_eq!(*x.0.borrow(), 4); +} + +#[test] +fn eq() { + #[derive(Eq)] + struct TestEq (RefCell); + impl PartialEq for TestEq { + fn eq(&self, other: &TestEq) -> bool { + *self.0.borrow_mut() += 1; + *other.0.borrow_mut() += 1; + true + } + } + let x = Arc::new(TestEq(RefCell::new(0))); + assert!(x == x); + assert!(!(x != x)); + assert_eq!(*x.0.borrow(), 0); +} diff --git a/src/liballoc/tests/binary_heap.rs b/src/liballoc/tests/binary_heap.rs index 536291de8f006..0930f8dacd494 100644 --- a/src/liballoc/tests/binary_heap.rs +++ b/src/liballoc/tests/binary_heap.rs @@ -1,18 +1,8 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::cmp; use std::collections::BinaryHeap; use std::collections::binary_heap::{Drain, PeekMut}; use std::panic::{self, AssertUnwindSafe}; -use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering}; +use std::sync::atomic::{AtomicUsize, Ordering}; use rand::{thread_rng, seq::SliceRandom}; @@ -292,8 +282,9 @@ fn assert_covariance() { // // Destructors must be called exactly once per element. #[test] +#[cfg(not(miri))] // Miri does not support panics nor entropy fn panic_safe() { - static DROP_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT; + static DROP_COUNTER: AtomicUsize = AtomicUsize::new(0); #[derive(Eq, PartialEq, Ord, Clone, Debug)] struct PanicOrd(T, bool); diff --git a/src/liballoc/tests/btree/map.rs b/src/liballoc/tests/btree/map.rs index 33ef13ab811ce..844afe870766b 100644 --- a/src/liballoc/tests/btree/map.rs +++ b/src/liballoc/tests/btree/map.rs @@ -1,25 +1,18 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::collections::BTreeMap; use std::collections::btree_map::Entry::{Occupied, Vacant}; use std::ops::Bound::{self, Excluded, Included, Unbounded}; use std::rc::Rc; - use std::iter::FromIterator; + use super::DeterministicRng; #[test] fn test_basic_large() { let mut map = BTreeMap::new(); + #[cfg(not(miri))] // Miri is too slow let size = 10000; + #[cfg(miri)] + let size = 200; assert_eq!(map.len(), 0); for i in 0..size { @@ -79,7 +72,10 @@ fn test_basic_small() { #[test] fn test_iter() { + #[cfg(not(miri))] // Miri is too slow let size = 10000; + #[cfg(miri)] + let size = 200; // Forwards let mut map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect(); @@ -101,7 +97,10 @@ fn test_iter() { #[test] fn test_iter_rev() { + #[cfg(not(miri))] // Miri is too slow let size = 10000; + #[cfg(miri)] + let size = 200; // Forwards let mut map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect(); @@ -137,7 +136,10 @@ fn test_values_mut() { #[test] fn test_iter_mixed() { + #[cfg(not(miri))] // Miri is too slow let size = 10000; + #[cfg(miri)] + let size = 200; // Forwards let mut map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect(); @@ -209,7 +211,7 @@ fn test_range_inclusive() { #[test] fn test_range_inclusive_max_value() { - let max = ::std::usize::MAX; + let max = std::usize::MAX; let map: BTreeMap<_, _> = vec![(max, 0)].into_iter().collect(); assert_eq!(map.range(max..=max).collect::>(), &[(&max, &0)]); @@ -259,7 +261,10 @@ fn test_range_backwards_4() { #[test] fn test_range_1000() { + #[cfg(not(miri))] // Miri is too slow let size = 1000; + #[cfg(miri)] + let size = 200; let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect(); fn test(map: &BTreeMap, size: u32, min: Bound<&u32>, max: Bound<&u32>) { @@ -296,7 +301,10 @@ fn test_range_borrowed_key() { #[test] fn test_range() { + #[cfg(not(miri))] // Miri is too slow let size = 200; + #[cfg(miri)] + let size = 30; let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect(); for i in 0..size { @@ -315,7 +323,10 @@ fn test_range() { #[test] fn test_range_mut() { + #[cfg(not(miri))] // Miri is too slow let size = 200; + #[cfg(miri)] + let size = 30; let mut map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect(); for i in 0..size { @@ -489,7 +500,10 @@ fn test_bad_zst() { #[test] fn test_clone() { let mut map = BTreeMap::new(); + #[cfg(not(miri))] // Miri is too slow let size = 100; + #[cfg(miri)] + let size = 30; assert_eq!(map.len(), 0); for i in 0..size { @@ -641,6 +655,7 @@ create_append_test!(test_append_145, 145); create_append_test!(test_append_170, 170); create_append_test!(test_append_181, 181); create_append_test!(test_append_239, 239); +#[cfg(not(miri))] // Miri is too slow create_append_test!(test_append_1700, 1700); fn rand_data(len: usize) -> Vec<(u32, u32)> { diff --git a/src/liballoc/tests/btree/mod.rs b/src/liballoc/tests/btree/mod.rs index ae8b18d0c9fd9..4c704d0f8c28f 100644 --- a/src/liballoc/tests/btree/mod.rs +++ b/src/liballoc/tests/btree/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod map; mod set; diff --git a/src/liballoc/tests/btree/set.rs b/src/liballoc/tests/btree/set.rs index 0330bda5e3238..d52814118b3c7 100644 --- a/src/liballoc/tests/btree/set.rs +++ b/src/liballoc/tests/btree/set.rs @@ -1,16 +1,6 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::collections::BTreeSet; - use std::iter::FromIterator; + use super::DeterministicRng; #[test] @@ -25,6 +15,8 @@ fn test_clone_eq() { #[test] fn test_hash() { + use crate::hash; + let mut x = BTreeSet::new(); let mut y = BTreeSet::new(); @@ -36,7 +28,7 @@ fn test_hash() { y.insert(2); y.insert(1); - assert!(::hash(&x) == ::hash(&y)); + assert!(hash(&x) == hash(&y)); } fn check(a: &[i32], b: &[i32], expected: &[i32], f: F) @@ -77,6 +69,20 @@ fn test_intersection() { check_intersection(&[11, 1, 3, 77, 103, 5, -5], &[2, 11, 77, -9, -42, 5, 3], &[3, 5, 11, 77]); + let large = (0..1000).collect::>(); + check_intersection(&[], &large, &[]); + check_intersection(&large, &[], &[]); + check_intersection(&[-1], &large, &[]); + check_intersection(&large, &[-1], &[]); + check_intersection(&[0], &large, &[0]); + check_intersection(&large, &[0], &[0]); + check_intersection(&[999], &large, &[999]); + check_intersection(&large, &[999], &[999]); + check_intersection(&[1000], &large, &[]); + check_intersection(&large, &[1000], &[]); + check_intersection(&[11, 5000, 1, 3, 77, 8924, 103], + &large, + &[1, 3, 11, 77, 103]); } #[test] @@ -92,6 +98,18 @@ fn test_difference() { check_difference(&[-5, 11, 22, 33, 40, 42], &[-12, -5, 14, 23, 34, 38, 39, 50], &[11, 22, 33, 40, 42]); + let large = (0..1000).collect::>(); + check_difference(&[], &large, &[]); + check_difference(&[-1], &large, &[-1]); + check_difference(&[0], &large, &[]); + check_difference(&[999], &large, &[]); + check_difference(&[1000], &large, &[1000]); + check_difference(&[11, 5000, 1, 3, 77, 8924, 103], + &large, + &[5000, 8924]); + check_difference(&large, &[], &large); + check_difference(&large, &[-1], &large); + check_difference(&large, &[1000], &large); } #[test] @@ -122,6 +140,41 @@ fn test_union() { &[-2, 1, 3, 5, 9, 11, 13, 16, 19, 24]); } +#[test] +// Only tests the simple function definition with respect to intersection +fn test_is_disjoint() { + let one = [1].into_iter().collect::>(); + let two = [2].into_iter().collect::>(); + assert!(one.is_disjoint(&two)); +} + +#[test] +// Also tests the trivial function definition of is_superset +fn test_is_subset() { + fn is_subset(a: &[i32], b: &[i32]) -> bool { + let set_a = a.iter().collect::>(); + let set_b = b.iter().collect::>(); + set_a.is_subset(&set_b) + } + + assert_eq!(is_subset(&[], &[]), true); + assert_eq!(is_subset(&[], &[1, 2]), true); + assert_eq!(is_subset(&[0], &[1, 2]), false); + assert_eq!(is_subset(&[1], &[1, 2]), true); + assert_eq!(is_subset(&[2], &[1, 2]), true); + assert_eq!(is_subset(&[3], &[1, 2]), false); + assert_eq!(is_subset(&[1, 2], &[1]), false); + assert_eq!(is_subset(&[1, 2], &[1, 2]), true); + assert_eq!(is_subset(&[1, 2], &[2, 3]), false); + let large = (0..1000).collect::>(); + assert_eq!(is_subset(&[], &large), true); + assert_eq!(is_subset(&large, &[]), false); + assert_eq!(is_subset(&[-1], &large), false); + assert_eq!(is_subset(&[0], &large), true); + assert_eq!(is_subset(&[1, 2], &large), true); + assert_eq!(is_subset(&[999, 1000], &large), false); +} + #[test] fn test_zip() { let mut x = BTreeSet::new(); diff --git a/src/liballoc/tests/cow_str.rs b/src/liballoc/tests/cow_str.rs index 63939686ab7c2..eb6adb159b0fd 100644 --- a/src/liballoc/tests/cow_str.rs +++ b/src/liballoc/tests/cow_str.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::borrow::Cow; // check that Cow<'a, str> implements addition diff --git a/src/liballoc/tests/fmt.rs b/src/liballoc/tests/fmt.rs index 70e21c65a1806..0ad092b4997f4 100644 --- a/src/liballoc/tests/fmt.rs +++ b/src/liballoc/tests/fmt.rs @@ -1,13 +1,3 @@ -// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::fmt; #[test] diff --git a/src/liballoc/tests/heap.rs b/src/liballoc/tests/heap.rs index bf256b23f9ac0..c225ebfa96b91 100644 --- a/src/liballoc/tests/heap.rs +++ b/src/liballoc/tests/heap.rs @@ -1,16 +1,6 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::alloc::{Global, Alloc, Layout, System}; -/// https://github.com/rust-lang/rust/issues/45955 +/// Issue #45955. #[test] fn alloc_system_overaligned_request() { check_overalign_requests(System) diff --git a/src/liballoc/tests/lib.rs b/src/liballoc/tests/lib.rs index e514a8a69c020..90921b6af9f34 100644 --- a/src/liballoc/tests/lib.rs +++ b/src/liballoc/tests/lib.rs @@ -1,26 +1,12 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![feature(allocator_api)] #![feature(box_syntax)] #![feature(drain_filter)] #![feature(exact_size_is_empty)] #![feature(pattern)] -#![feature(slice_sort_by_cached_key)] -#![feature(str_escape)] +#![feature(repeat_generic_slice)] #![feature(try_reserve)] #![feature(unboxed_closures)] -#![feature(repeat_generic_slice)] - -extern crate core; -extern crate rand; +#![feature(vecdeque_rotate)] use std::hash::{Hash, Hasher}; use std::collections::hash_map::DefaultHasher; diff --git a/src/liballoc/tests/linked_list.rs b/src/liballoc/tests/linked_list.rs index 4e3e855105eb8..0fbfbdccd4537 100644 --- a/src/liballoc/tests/linked_list.rs +++ b/src/liballoc/tests/linked_list.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::collections::LinkedList; #[test] @@ -251,10 +241,12 @@ fn test_eq() { #[test] fn test_hash() { + use crate::hash; + let mut x = LinkedList::new(); let mut y = LinkedList::new(); - assert!(::hash(&x) == ::hash(&y)); + assert!(hash(&x) == hash(&y)); x.push_back(1); x.push_back(2); @@ -264,7 +256,7 @@ fn test_hash() { y.push_front(2); y.push_front(1); - assert!(::hash(&x) == ::hash(&y)); + assert!(hash(&x) == hash(&y)); } #[test] diff --git a/src/liballoc/tests/rc.rs b/src/liballoc/tests/rc.rs index 9ec7c831444d1..18f82e8041008 100644 --- a/src/liballoc/tests/rc.rs +++ b/src/liballoc/tests/rc.rs @@ -1,15 +1,7 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::any::Any; use std::rc::{Rc, Weak}; +use std::cell::RefCell; +use std::cmp::PartialEq; #[test] fn uninhabited() { @@ -53,3 +45,43 @@ fn trait_object() { b = b.clone(); assert!(b.upgrade().is_none()); } + +#[test] +fn float_nan_ne() { + let x = Rc::new(std::f32::NAN); + assert!(x != x); + assert!(!(x == x)); +} + +#[test] +fn partial_eq() { + struct TestPEq (RefCell); + impl PartialEq for TestPEq { + fn eq(&self, other: &TestPEq) -> bool { + *self.0.borrow_mut() += 1; + *other.0.borrow_mut() += 1; + true + } + } + let x = Rc::new(TestPEq(RefCell::new(0))); + assert!(x == x); + assert!(!(x != x)); + assert_eq!(*x.0.borrow(), 4); +} + +#[test] +fn eq() { + #[derive(Eq)] + struct TestEq (RefCell); + impl PartialEq for TestEq { + fn eq(&self, other: &TestEq) -> bool { + *self.0.borrow_mut() += 1; + *other.0.borrow_mut() += 1; + true + } + } + let x = Rc::new(TestEq(RefCell::new(0))); + assert!(x == x); + assert!(!(x != x)); + assert_eq!(*x.0.borrow(), 0); +} diff --git a/src/liballoc/tests/slice.rs b/src/liballoc/tests/slice.rs index 6f31e6ca1a1bd..b54c128a0249a 100644 --- a/src/liballoc/tests/slice.rs +++ b/src/liballoc/tests/slice.rs @@ -1,24 +1,13 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::cell::Cell; -use std::cmp::Ordering::{Equal, Greater, Less}; -use std::cmp::Ordering; +use std::cmp::Ordering::{self, Equal, Greater, Less}; use std::mem; use std::panic; use std::rc::Rc; -use std::sync::atomic::Ordering::Relaxed; -use std::sync::atomic::{ATOMIC_USIZE_INIT, AtomicUsize}; +use std::sync::atomic::{Ordering::Relaxed, AtomicUsize}; use std::thread; -use rand::{Rng, RngCore, thread_rng, seq::SliceRandom}; +use rand::{Rng, RngCore, thread_rng}; +use rand::seq::SliceRandom; use rand::distributions::Standard; fn square(n: usize) -> usize { @@ -400,6 +389,7 @@ fn test_reverse() { } #[test] +#[cfg(not(miri))] // Miri does not support entropy fn test_sort() { let mut rng = thread_rng(); @@ -476,6 +466,7 @@ fn test_sort() { } #[test] +#[cfg(not(miri))] // Miri does not support entropy fn test_sort_stability() { for len in (2..25).chain(500..510) { for _ in 0..10 { @@ -486,7 +477,7 @@ fn test_sort_stability() { // the second item represents which occurrence of that // number this element is, i.e., the second elements // will occur in sorted order. - let mut orig: Vec<_> = (0..len) + let orig: Vec<_> = (0..len) .map(|_| { let n = thread_rng().gen::() % 10; counts[n] += 1; @@ -1406,6 +1397,7 @@ fn test_box_slice_clone() { #[test] #[allow(unused_must_use)] // here, we care about the side effects of `.clone()` #[cfg_attr(target_os = "emscripten", ignore)] +#[cfg(not(miri))] // Miri does not support threads nor entropy fn test_box_slice_clone_panics() { use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -1510,7 +1502,7 @@ static DROP_COUNTS: [AtomicUsize; MAX_LEN] = [ AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), AtomicUsize::new(0), ]; -static VERSIONS: AtomicUsize = ATOMIC_USIZE_INIT; +static VERSIONS: AtomicUsize = AtomicUsize::new(0); #[derive(Clone, Eq)] struct DropCounter { @@ -1597,6 +1589,7 @@ thread_local!(static SILENCE_PANIC: Cell = Cell::new(false)); #[test] #[cfg_attr(target_os = "emscripten", ignore)] // no threads +#[cfg(not(miri))] // Miri does not support threads nor entropy fn panic_safe() { let prev = panic::take_hook(); panic::set_hook(Box::new(move |info| { diff --git a/src/liballoc/tests/str.rs b/src/liballoc/tests/str.rs index 9ad8ad1fc0775..b197516403f78 100644 --- a/src/liballoc/tests/str.rs +++ b/src/liballoc/tests/str.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::borrow::Cow; use std::cmp::Ordering::{Equal, Greater, Less}; use std::str::from_utf8; @@ -17,7 +7,7 @@ fn test_le() { assert!("" <= ""); assert!("" <= "foo"); assert!("foo" <= "foo"); - assert!("foo" != "bar"); + assert_ne!("foo", "bar"); } #[test] @@ -176,6 +166,7 @@ fn test_join_for_different_lengths_with_long_separator() { } #[test] +#[cfg(not(miri))] // Miri is too slow fn test_unsafe_slice() { assert_eq!("ab", unsafe {"abc".get_unchecked(0..2)}); assert_eq!("bc", unsafe {"abc".get_unchecked(1..3)}); @@ -493,6 +484,7 @@ mod slice_index { #[test] #[cfg(not(target_arch = "asmjs"))] // hits an OOM + #[cfg(not(miri))] // Miri is too slow fn simple_big() { fn a_million_letter_x() -> String { let mut i = 0; @@ -594,7 +586,7 @@ mod slice_index { } mod boundary { - const DATA: &'static str = "abcαβγ"; + const DATA: &str = "abcαβγ"; const BAD_START: usize = 4; const GOOD_START: usize = 3; @@ -658,7 +650,7 @@ mod slice_index { } } - const LOREM_PARAGRAPH: &'static str = "\ + const LOREM_PARAGRAPH: &str = "\ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem \ sit amet dolor ultricies condimentum. Praesent iaculis purus elit, ac malesuada \ quam malesuada in. Duis sed orci eros. Suspendisse sit amet magna mollis, mollis \ @@ -989,15 +981,15 @@ fn test_split_at_boundscheck() { #[test] fn test_escape_unicode() { - assert_eq!("abc".escape_unicode(), "\\u{61}\\u{62}\\u{63}"); - assert_eq!("a c".escape_unicode(), "\\u{61}\\u{20}\\u{63}"); - assert_eq!("\r\n\t".escape_unicode(), "\\u{d}\\u{a}\\u{9}"); - assert_eq!("'\"\\".escape_unicode(), "\\u{27}\\u{22}\\u{5c}"); - assert_eq!("\x00\x01\u{fe}\u{ff}".escape_unicode(), "\\u{0}\\u{1}\\u{fe}\\u{ff}"); - assert_eq!("\u{100}\u{ffff}".escape_unicode(), "\\u{100}\\u{ffff}"); - assert_eq!("\u{10000}\u{10ffff}".escape_unicode(), "\\u{10000}\\u{10ffff}"); - assert_eq!("ab\u{fb00}".escape_unicode(), "\\u{61}\\u{62}\\u{fb00}"); - assert_eq!("\u{1d4ea}\r".escape_unicode(), "\\u{1d4ea}\\u{d}"); + assert_eq!("abc".escape_unicode().to_string(), "\\u{61}\\u{62}\\u{63}"); + assert_eq!("a c".escape_unicode().to_string(), "\\u{61}\\u{20}\\u{63}"); + assert_eq!("\r\n\t".escape_unicode().to_string(), "\\u{d}\\u{a}\\u{9}"); + assert_eq!("'\"\\".escape_unicode().to_string(), "\\u{27}\\u{22}\\u{5c}"); + assert_eq!("\x00\x01\u{fe}\u{ff}".escape_unicode().to_string(), "\\u{0}\\u{1}\\u{fe}\\u{ff}"); + assert_eq!("\u{100}\u{ffff}".escape_unicode().to_string(), "\\u{100}\\u{ffff}"); + assert_eq!("\u{10000}\u{10ffff}".escape_unicode().to_string(), "\\u{10000}\\u{10ffff}"); + assert_eq!("ab\u{fb00}".escape_unicode().to_string(), "\\u{61}\\u{62}\\u{fb00}"); + assert_eq!("\u{1d4ea}\r".escape_unicode().to_string(), "\\u{1d4ea}\\u{d}"); } #[test] @@ -1008,31 +1000,32 @@ fn test_escape_debug() { // they are escaped. However, when the character is unescaped (e.g., for // printable characters), only a single backslash appears (as the character // itself appears in the debug string). - assert_eq!("abc".escape_debug(), "abc"); - assert_eq!("a c".escape_debug(), "a c"); - assert_eq!("éèê".escape_debug(), "éèê"); - assert_eq!("\r\n\t".escape_debug(), "\\r\\n\\t"); - assert_eq!("'\"\\".escape_debug(), "\\'\\\"\\\\"); - assert_eq!("\u{7f}\u{ff}".escape_debug(), "\\u{7f}\u{ff}"); - assert_eq!("\u{100}\u{ffff}".escape_debug(), "\u{100}\\u{ffff}"); - assert_eq!("\u{10000}\u{10ffff}".escape_debug(), "\u{10000}\\u{10ffff}"); - assert_eq!("ab\u{200b}".escape_debug(), "ab\\u{200b}"); - assert_eq!("\u{10d4ea}\r".escape_debug(), "\\u{10d4ea}\\r"); - assert_eq!("\u{301}a\u{301}bé\u{e000}".escape_debug(), "\\u{301}a\u{301}bé\\u{e000}"); + assert_eq!("abc".escape_debug().to_string(), "abc"); + assert_eq!("a c".escape_debug().to_string(), "a c"); + assert_eq!("éèê".escape_debug().to_string(), "éèê"); + assert_eq!("\r\n\t".escape_debug().to_string(), "\\r\\n\\t"); + assert_eq!("'\"\\".escape_debug().to_string(), "\\'\\\"\\\\"); + assert_eq!("\u{7f}\u{ff}".escape_debug().to_string(), "\\u{7f}\u{ff}"); + assert_eq!("\u{100}\u{ffff}".escape_debug().to_string(), "\u{100}\\u{ffff}"); + assert_eq!("\u{10000}\u{10ffff}".escape_debug().to_string(), "\u{10000}\\u{10ffff}"); + assert_eq!("ab\u{200b}".escape_debug().to_string(), "ab\\u{200b}"); + assert_eq!("\u{10d4ea}\r".escape_debug().to_string(), "\\u{10d4ea}\\r"); + assert_eq!("\u{301}a\u{301}bé\u{e000}".escape_debug().to_string(), + "\\u{301}a\u{301}bé\\u{e000}"); } #[test] fn test_escape_default() { - assert_eq!("abc".escape_default(), "abc"); - assert_eq!("a c".escape_default(), "a c"); - assert_eq!("éèê".escape_default(), "\\u{e9}\\u{e8}\\u{ea}"); - assert_eq!("\r\n\t".escape_default(), "\\r\\n\\t"); - assert_eq!("'\"\\".escape_default(), "\\'\\\"\\\\"); - assert_eq!("\u{7f}\u{ff}".escape_default(), "\\u{7f}\\u{ff}"); - assert_eq!("\u{100}\u{ffff}".escape_default(), "\\u{100}\\u{ffff}"); - assert_eq!("\u{10000}\u{10ffff}".escape_default(), "\\u{10000}\\u{10ffff}"); - assert_eq!("ab\u{200b}".escape_default(), "ab\\u{200b}"); - assert_eq!("\u{10d4ea}\r".escape_default(), "\\u{10d4ea}\\r"); + assert_eq!("abc".escape_default().to_string(), "abc"); + assert_eq!("a c".escape_default().to_string(), "a c"); + assert_eq!("éèê".escape_default().to_string(), "\\u{e9}\\u{e8}\\u{ea}"); + assert_eq!("\r\n\t".escape_default().to_string(), "\\r\\n\\t"); + assert_eq!("'\"\\".escape_default().to_string(), "\\'\\\"\\\\"); + assert_eq!("\u{7f}\u{ff}".escape_default().to_string(), "\\u{7f}\\u{ff}"); + assert_eq!("\u{100}\u{ffff}".escape_default().to_string(), "\\u{100}\\u{ffff}"); + assert_eq!("\u{10000}\u{10ffff}".escape_default().to_string(), "\\u{10000}\\u{10ffff}"); + assert_eq!("ab\u{200b}".escape_default().to_string(), "ab\\u{200b}"); + assert_eq!("\u{10d4ea}\r".escape_default().to_string(), "\\u{10d4ea}\\r"); } #[test] @@ -1076,9 +1069,10 @@ fn test_rev_iterator() { } #[test] +#[cfg(not(miri))] // Miri is too slow fn test_chars_decoding() { let mut bytes = [0; 4]; - for c in (0..0x110000).filter_map(::std::char::from_u32) { + for c in (0..0x110000).filter_map(std::char::from_u32) { let s = c.encode_utf8(&mut bytes); if Some(c) != s.chars().next() { panic!("character {:x}={} does not decode correctly", c as u32, c); @@ -1087,9 +1081,10 @@ fn test_chars_decoding() { } #[test] +#[cfg(not(miri))] // Miri is too slow fn test_chars_rev_decoding() { let mut bytes = [0; 4]; - for c in (0..0x110000).filter_map(::std::char::from_u32) { + for c in (0..0x110000).filter_map(std::char::from_u32) { let s = c.encode_utf8(&mut bytes); if Some(c) != s.chars().rev().next() { panic!("character {:x}={} does not decode correctly", c as u32, c); @@ -1375,6 +1370,7 @@ fn test_bool_from_str() { assert_eq!("not even a boolean".parse::().ok(), None); } +#[cfg(not(miri))] // Miri is too slow fn check_contains_all_substrings(s: &str) { assert!(s.contains("")); for i in 0..s.len() { @@ -1385,6 +1381,7 @@ fn check_contains_all_substrings(s: &str) { } #[test] +#[cfg(not(miri))] // Miri is too slow fn strslice_issue_16589() { assert!("bananas".contains("nana")); @@ -1401,6 +1398,7 @@ fn strslice_issue_16878() { #[test] +#[cfg(not(miri))] // Miri is too slow fn test_strslice_contains() { let x = "There are moments, Jeeves, when one asks oneself, 'Do trousers matter?'"; check_contains_all_substrings(x); @@ -1609,8 +1607,7 @@ fn test_repeat() { } mod pattern { - use std::str::pattern::Pattern; - use std::str::pattern::{Searcher, ReverseSearcher}; + use std::str::pattern::{Pattern, Searcher, ReverseSearcher}; use std::str::pattern::SearchStep::{self, Match, Reject, Done}; macro_rules! make_test { diff --git a/src/liballoc/tests/string.rs b/src/liballoc/tests/string.rs index befb36baeef1e..7e75b8c4f28c8 100644 --- a/src/liballoc/tests/string.rs +++ b/src/liballoc/tests/string.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::borrow::Cow; use std::collections::CollectionAllocErr::*; use std::mem::size_of; @@ -31,7 +21,7 @@ impl<'a> IntoCow<'a, str> for &'a str { #[test] fn test_from_str() { - let owned: Option<::std::string::String> = "string".parse().ok(); + let owned: Option = "string".parse().ok(); assert_eq!(owned.as_ref().map(|s| &**s), Some("string")); } @@ -132,7 +122,7 @@ fn test_from_utf16() { let s_as_utf16 = s.encode_utf16().collect::>(); let u_as_string = String::from_utf16(&u).unwrap(); - assert!(::core::char::decode_utf16(u.iter().cloned()).all(|r| r.is_ok())); + assert!(core::char::decode_utf16(u.iter().cloned()).all(|r| r.is_ok())); assert_eq!(s_as_utf16, u); assert_eq!(u_as_string, s); @@ -533,6 +523,7 @@ fn test_reserve_exact() { } #[test] +#[cfg(not(miri))] // Miri does not support signalling OOM fn test_try_reserve() { // These are the interesting cases: @@ -610,6 +601,7 @@ fn test_try_reserve() { } #[test] +#[cfg(not(miri))] // Miri does not support signalling OOM fn test_try_reserve_exact() { // This is exactly the same as test_try_reserve with the method changed. diff --git a/src/liballoc/tests/vec.rs b/src/liballoc/tests/vec.rs index 509195cd047d4..545332bcd6a2f 100644 --- a/src/liballoc/tests/vec.rs +++ b/src/liballoc/tests/vec.rs @@ -1,12 +1,4 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +#![cfg(not(miri))] use std::borrow::Cow; use std::mem::size_of; @@ -18,7 +10,7 @@ struct DropCounter<'a> { count: &'a mut u32, } -impl<'a> Drop for DropCounter<'a> { +impl Drop for DropCounter<'_> { fn drop(&mut self) { *self.count += 1; } @@ -648,7 +640,7 @@ fn test_splice_unbounded() { fn test_splice_forget() { let mut v = vec![1, 2, 3, 4, 5]; let a = [10, 11, 12]; - ::std::mem::forget(v.splice(2..4, a.iter().cloned())); + std::mem::forget(v.splice(2..4, a.iter().cloned())); assert_eq!(v, &[1, 2]); } diff --git a/src/liballoc/tests/vec_deque.rs b/src/liballoc/tests/vec_deque.rs index 1f2a7211c657b..e0fe10a55f55c 100644 --- a/src/liballoc/tests/vec_deque.rs +++ b/src/liballoc/tests/vec_deque.rs @@ -1,22 +1,13 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use std::collections::VecDeque; use std::fmt::Debug; -use std::collections::vec_deque::{Drain}; +use std::collections::{VecDeque, vec_deque::Drain}; use std::collections::CollectionAllocErr::*; use std::mem::size_of; use std::{usize, isize}; -use self::Taggy::*; -use self::Taggypar::*; +use crate::hash; + +use Taggy::*; +use Taggypar::*; #[test] fn test_simple() { @@ -593,7 +584,7 @@ fn test_hash() { y.push_back(2); y.push_back(3); - assert!(::hash(&x) == ::hash(&y)); + assert!(hash(&x) == hash(&y)); } #[test] @@ -609,7 +600,7 @@ fn test_hash_after_rotation() { *elt -= 1; } ring.push_back(len - 1); - assert_eq!(::hash(&orig), ::hash(&ring)); + assert_eq!(hash(&orig), hash(&ring)); assert_eq!(orig, ring); assert_eq!(ring, orig); } @@ -952,7 +943,10 @@ fn test_append_permutations() { out } + #[cfg(not(miri))] // Miri is too slow const MAX: usize = 5; + #[cfg(miri)] + const MAX: usize = 3; // Many different permutations of both the `VecDeque` getting appended to // and the one getting appended are generated to check `append`. @@ -1008,7 +1002,7 @@ struct DropCounter<'a> { count: &'a mut u32, } -impl<'a> Drop for DropCounter<'a> { +impl Drop for DropCounter<'_> { fn drop(&mut self) { *self.count += 1; } @@ -1129,6 +1123,7 @@ fn test_reserve_exact_2() { } #[test] +#[cfg(not(miri))] // Miri does not support signalling OOM fn test_try_reserve() { // These are the interesting cases: @@ -1230,6 +1225,7 @@ fn test_try_reserve() { } #[test] +#[cfg(not(miri))] // Miri does not support signalling OOM fn test_try_reserve_exact() { // This is exactly the same as test_try_reserve with the method changed. @@ -1309,3 +1305,238 @@ fn test_try_reserve_exact() { } } + +#[test] +fn test_rotate_nop() { + let mut v: VecDeque<_> = (0..10).collect(); + assert_unchanged(&v); + + v.rotate_left(0); + assert_unchanged(&v); + + v.rotate_left(10); + assert_unchanged(&v); + + v.rotate_right(0); + assert_unchanged(&v); + + v.rotate_right(10); + assert_unchanged(&v); + + v.rotate_left(3); + v.rotate_right(3); + assert_unchanged(&v); + + v.rotate_right(3); + v.rotate_left(3); + assert_unchanged(&v); + + v.rotate_left(6); + v.rotate_right(6); + assert_unchanged(&v); + + v.rotate_right(6); + v.rotate_left(6); + assert_unchanged(&v); + + v.rotate_left(3); + v.rotate_left(7); + assert_unchanged(&v); + + v.rotate_right(4); + v.rotate_right(6); + assert_unchanged(&v); + + v.rotate_left(1); + v.rotate_left(2); + v.rotate_left(3); + v.rotate_left(4); + assert_unchanged(&v); + + v.rotate_right(1); + v.rotate_right(2); + v.rotate_right(3); + v.rotate_right(4); + assert_unchanged(&v); + + fn assert_unchanged(v: &VecDeque) { + assert_eq!(v, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]); + } +} + +#[test] +fn test_rotate_left_parts() { + let mut v: VecDeque<_> = (1..=7).collect(); + v.rotate_left(2); + assert_eq!(v.as_slices(), (&[3, 4, 5, 6, 7, 1][..], &[2][..])); + v.rotate_left(2); + assert_eq!(v.as_slices(), (&[5, 6, 7, 1][..], &[2, 3, 4][..])); + v.rotate_left(2); + assert_eq!(v.as_slices(), (&[7, 1][..], &[2, 3, 4, 5, 6][..])); + v.rotate_left(2); + assert_eq!(v.as_slices(), (&[2, 3, 4, 5, 6, 7, 1][..], &[][..])); + v.rotate_left(2); + assert_eq!(v.as_slices(), (&[4, 5, 6, 7, 1, 2][..], &[3][..])); + v.rotate_left(2); + assert_eq!(v.as_slices(), (&[6, 7, 1, 2][..], &[3, 4, 5][..])); + v.rotate_left(2); + assert_eq!(v.as_slices(), (&[1, 2][..], &[3, 4, 5, 6, 7][..])); +} + +#[test] +fn test_rotate_right_parts() { + let mut v: VecDeque<_> = (1..=7).collect(); + v.rotate_right(2); + assert_eq!(v.as_slices(), (&[6, 7][..], &[1, 2, 3, 4, 5][..])); + v.rotate_right(2); + assert_eq!(v.as_slices(), (&[4, 5, 6, 7][..], &[1, 2, 3][..])); + v.rotate_right(2); + assert_eq!(v.as_slices(), (&[2, 3, 4, 5, 6, 7][..], &[1][..])); + v.rotate_right(2); + assert_eq!(v.as_slices(), (&[7, 1, 2, 3, 4, 5, 6][..], &[][..])); + v.rotate_right(2); + assert_eq!(v.as_slices(), (&[5, 6][..], &[7, 1, 2, 3, 4][..])); + v.rotate_right(2); + assert_eq!(v.as_slices(), (&[3, 4, 5, 6][..], &[7, 1, 2][..])); + v.rotate_right(2); + assert_eq!(v.as_slices(), (&[1, 2, 3, 4, 5, 6][..], &[7][..])); +} + +#[test] +fn test_rotate_left_random() { + let shifts = [ + 6, 1, 0, 11, 12, 1, 11, 7, 9, 3, 6, 1, + 4, 0, 5, 1, 3, 1, 12, 8, 3, 1, 11, 11, + 9, 4, 12, 3, 12, 9, 11, 1, 7, 9, 7, 2, + ]; + let n = 12; + let mut v: VecDeque<_> = (0..n).collect(); + let mut total_shift = 0; + for shift in shifts.iter().cloned() { + v.rotate_left(shift); + total_shift += shift; + for i in 0..n { + assert_eq!(v[i], (i + total_shift) % n); + } + } +} + +#[test] +fn test_rotate_right_random() { + let shifts = [ + 6, 1, 0, 11, 12, 1, 11, 7, 9, 3, 6, 1, + 4, 0, 5, 1, 3, 1, 12, 8, 3, 1, 11, 11, + 9, 4, 12, 3, 12, 9, 11, 1, 7, 9, 7, 2, + ]; + let n = 12; + let mut v: VecDeque<_> = (0..n).collect(); + let mut total_shift = 0; + for shift in shifts.iter().cloned() { + v.rotate_right(shift); + total_shift += shift; + for i in 0..n { + assert_eq!(v[(i + total_shift) % n], i); + } + } +} + +#[test] +fn test_try_fold_empty() { + assert_eq!(Some(0), VecDeque::::new().iter().try_fold(0, |_, _| None)); +} + +#[test] +fn test_try_fold_none() { + let v: VecDeque = (0..12).collect(); + assert_eq!(None, v.into_iter().try_fold(0, |a, b| + if b < 11 { Some(a + b) } else { None })); +} + +#[test] +fn test_try_fold_ok() { + let v: VecDeque = (0..12).collect(); + assert_eq!(Ok::<_, ()>(66), v.into_iter().try_fold(0, |a, b| Ok(a + b))); +} + +#[test] +fn test_try_fold_unit() { + let v: VecDeque<()> = std::iter::repeat(()).take(42).collect(); + assert_eq!(Some(()), v.into_iter().try_fold((), |(), ()| Some(()))); +} + + +#[test] +fn test_try_fold_unit_none() { + let v: std::collections::VecDeque<()> = [(); 10].iter().cloned().collect(); + let mut iter = v.into_iter(); + assert!(iter.try_fold((), |_, _| None).is_none()); + assert_eq!(iter.len(), 9); +} + +#[test] +fn test_try_fold_rotated() { + let mut v: VecDeque<_> = (0..12).collect(); + for n in 0..10 { + if n & 1 == 0 { + v.rotate_left(n); + } else { + v.rotate_right(n); + } + assert_eq!(Ok::<_, ()>(66), v.iter().try_fold(0, |a, b| Ok(a + b))); + } +} + +#[test] +fn test_try_fold_moves_iter() { + let v: VecDeque<_> = [10, 20, 30, 40, 100, 60, 70, 80, 90].iter().collect(); + let mut iter = v.into_iter(); + assert_eq!(iter.try_fold(0_i8, |acc, &x| acc.checked_add(x)), None); + assert_eq!(iter.next(), Some(&60)); +} + +#[test] +fn test_try_fold_exhaust_wrap() { + let mut v = VecDeque::with_capacity(7); + v.push_back(1); + v.push_back(1); + v.push_back(1); + v.pop_front(); + v.pop_front(); + let mut iter = v.iter(); + let _ = iter.try_fold(0, |_, _| Some(1)); + assert!(iter.is_empty()); +} + +#[test] +fn test_try_fold_wraparound() { + let mut v = VecDeque::with_capacity(8); + v.push_back(7); + v.push_back(8); + v.push_back(9); + v.push_front(2); + v.push_front(1); + let mut iter = v.iter(); + let _ = iter.find(|&&x| x == 2); + assert_eq!(Some(&7), iter.next()); +} + +#[test] +fn test_try_rfold_rotated() { + let mut v: VecDeque<_> = (0..12).collect(); + for n in 0..10 { + if n & 1 == 0 { + v.rotate_left(n); + } else { + v.rotate_right(n); + } + assert_eq!(Ok::<_, ()>(66), v.iter().try_rfold(0, |a, b| Ok(a + b))); + } +} + +#[test] +fn test_try_rfold_moves_iter() { + let v : VecDeque<_> = [10, 20, 30, 40, 100, 60, 70, 80, 90].iter().collect(); + let mut iter = v.into_iter(); + assert_eq!(iter.try_rfold(0_i8, |acc, &x| acc.checked_add(x)), None); + assert_eq!(iter.next_back(), Some(&70)); +} diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index 63af69dda1dce..cd62c3e05244c 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A contiguous growable array type with heap-allocated contents, written //! `Vec`. //! @@ -73,18 +63,15 @@ use core::intrinsics::{arith_offset, assume}; use core::iter::{FromIterator, FusedIterator, TrustedLen}; use core::marker::PhantomData; use core::mem; +use core::ops::{self, Index, IndexMut, RangeBounds}; use core::ops::Bound::{Excluded, Included, Unbounded}; -use core::ops::{Index, IndexMut, RangeBounds}; -use core::ops; -use core::ptr; -use core::ptr::NonNull; -use core::slice; - -use collections::CollectionAllocErr; -use borrow::ToOwned; -use borrow::Cow; -use boxed::Box; -use raw_vec::RawVec; +use core::ptr::{self, NonNull}; +use core::slice::{self, SliceIndex}; + +use crate::borrow::{ToOwned, Cow}; +use crate::collections::CollectionAllocErr; +use crate::boxed::Box; +use crate::raw_vec::RawVec; /// A contiguous growable array type, written `Vec` but pronounced 'vector'. /// @@ -476,7 +463,7 @@ impl Vec { /// Does nothing if the capacity is already sufficient. /// /// Note that the allocator may give the collection more space than it - /// requests. Therefore capacity can not be relied upon to be precisely + /// requests. Therefore, capacity can not be relied upon to be precisely /// minimal. Prefer `reserve` if future insertions are expected. /// /// # Panics @@ -538,7 +525,7 @@ impl Vec { /// Does nothing if the capacity is already sufficient. /// /// Note that the allocator may give the collection more space than it - /// requests. Therefore capacity can not be relied upon to be precisely + /// requests. Therefore, capacity can not be relied upon to be precisely /// minimal. Prefer `reserve` if future insertions are expected. /// /// # Errors @@ -748,53 +735,90 @@ impl Vec { self } - /// Sets the length of a vector. + /// Forces the length of the vector to `new_len`. /// - /// This will explicitly set the size of the vector, without actually - /// modifying its buffers, so it is up to the caller to ensure that the - /// vector is actually the specified size. + /// This is a low-level operation that maintains none of the normal + /// invariants of the type. Normally changing the length of a vector + /// is done using one of the safe operations instead, such as + /// [`truncate`], [`resize`], [`extend`], or [`clear`]. /// - /// # Examples + /// [`truncate`]: #method.truncate + /// [`resize`]: #method.resize + /// [`extend`]: #method.extend-1 + /// [`clear`]: #method.clear /// - /// ``` - /// use std::ptr; + /// # Safety /// - /// let mut vec = vec!['r', 'u', 's', 't']; + /// - `new_len` must be less than or equal to [`capacity()`]. + /// - The elements at `old_len..new_len` must be initialized. /// - /// unsafe { - /// ptr::drop_in_place(&mut vec[3]); - /// vec.set_len(3); + /// [`capacity()`]: #method.capacity + /// + /// # Examples + /// + /// This method can be useful for situations in which the vector + /// is serving as a buffer for other code, particularly over FFI: + /// + /// ```no_run + /// # #![allow(dead_code)] + /// # // This is just a minimal skeleton for the doc example; + /// # // don't use this as a starting point for a real library. + /// # pub struct StreamWrapper { strm: *mut std::ffi::c_void } + /// # const Z_OK: i32 = 0; + /// # extern "C" { + /// # fn deflateGetDictionary( + /// # strm: *mut std::ffi::c_void, + /// # dictionary: *mut u8, + /// # dictLength: *mut usize, + /// # ) -> i32; + /// # } + /// # impl StreamWrapper { + /// pub fn get_dictionary(&self) -> Option> { + /// // Per the FFI method's docs, "32768 bytes is always enough". + /// let mut dict = Vec::with_capacity(32_768); + /// let mut dict_length = 0; + /// // SAFETY: When `deflateGetDictionary` returns `Z_OK`, it holds that: + /// // 1. `dict_length` elements were initialized. + /// // 2. `dict_length` <= the capacity (32_768) + /// // which makes `set_len` safe to call. + /// unsafe { + /// // Make the FFI call... + /// let r = deflateGetDictionary(self.strm, dict.as_mut_ptr(), &mut dict_length); + /// if r == Z_OK { + /// // ...and update the length to what was initialized. + /// dict.set_len(dict_length); + /// Some(dict) + /// } else { + /// None + /// } + /// } /// } - /// assert_eq!(vec, ['r', 'u', 's']); + /// # } /// ``` /// - /// In this example, there is a memory leak since the memory locations - /// owned by the inner vectors were not freed prior to the `set_len` call: + /// While the following example is sound, there is a memory leak since + /// the inner vectors were not freed prior to the `set_len` call: /// /// ``` /// let mut vec = vec![vec![1, 0, 0], /// vec![0, 1, 0], /// vec![0, 0, 1]]; + /// // SAFETY: + /// // 1. `old_len..0` is empty so no elements need to be initialized. + /// // 2. `0 <= capacity` always holds whatever `capacity` is. /// unsafe { /// vec.set_len(0); /// } /// ``` /// - /// In this example, the vector gets expanded from zero to four items - /// without any memory allocations occurring, resulting in vector - /// values of unallocated memory: - /// - /// ``` - /// let mut vec: Vec = Vec::new(); - /// - /// unsafe { - /// vec.set_len(4); - /// } - /// ``` + /// Normally, here, one would use [`clear`] instead to correctly drop + /// the contents and thus not leak memory. #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub unsafe fn set_len(&mut self, len: usize) { - self.len = len; + pub unsafe fn set_len(&mut self, new_len: usize) { + debug_assert!(new_len <= self.capacity()); + + self.len = new_len; } /// Removes an element from the vector and returns it. @@ -1091,7 +1115,7 @@ impl Vec { /// assert_eq!(v, &[]); /// ``` #[stable(feature = "drain", since = "1.6.0")] - pub fn drain(&mut self, range: R) -> Drain + pub fn drain(&mut self, range: R) -> Drain<'_, T> where R: RangeBounds { // Memory safety @@ -1236,13 +1260,11 @@ impl Vec { /// This method uses a closure to create new values on every push. If /// you'd rather [`Clone`] a given value, use [`resize`]. If you want /// to use the [`Default`] trait to generate values, you can pass - /// [`Default::default()`] as the second argument.. + /// [`Default::default()`] as the second argument. /// /// # Examples /// /// ``` - /// #![feature(vec_resize_with)] - /// /// let mut vec = vec![1, 2, 3]; /// vec.resize_with(5, Default::default); /// assert_eq!(vec, [1, 2, 3, 0, 0]); @@ -1255,7 +1277,7 @@ impl Vec { /// /// [`resize`]: #method.resize /// [`Clone`]: ../../std/clone/trait.Clone.html - #[unstable(feature = "vec_resize_with", issue = "41758")] + #[stable(feature = "vec_resize_with", since = "1.33.0")] pub fn resize_with(&mut self, new_len: usize, f: F) where F: FnMut() -> T { @@ -1343,6 +1365,7 @@ impl Vec { /// # Examples /// /// ``` + /// # #![allow(deprecated)] /// #![feature(vec_resize_default)] /// /// let mut vec = vec![1, 2, 3]; @@ -1359,6 +1382,9 @@ impl Vec { /// [`Default`]: ../../std/default/trait.Default.html /// [`Clone`]: ../../std/clone/trait.Clone.html #[unstable(feature = "vec_resize_default", issue = "41758")] + #[rustc_deprecated(reason = "This is moving towards being removed in favor \ + of `.resize_with(Default::default)`. If you disagree, please comment \ + in the tracking issue.", since = "1.33.0")] pub fn resize_default(&mut self, new_len: usize) { let len = self.len(); @@ -1452,7 +1478,7 @@ impl<'a> SetLenOnDrop<'a> { } } -impl<'a> Drop for SetLenOnDrop<'a> { +impl Drop for SetLenOnDrop<'_> { #[inline] fn drop(&mut self) { *self.len = self.local_len; @@ -1584,6 +1610,7 @@ impl_is_zero!(u64, |x| x == 0); impl_is_zero!(u128, |x| x == 0); impl_is_zero!(usize, |x| x == 0); +impl_is_zero!(bool, |x| x == false); impl_is_zero!(char, |x| x == '\0'); impl_is_zero!(f32, |x: f32| x.to_bits() == 0); @@ -1621,7 +1648,7 @@ impl Clone for Vec { // NB see the slice::hack module in slice.rs for more information #[cfg(test)] fn clone(&self) -> Vec { - ::slice::to_vec(&**self) + crate::slice::to_vec(&**self) } fn clone_from(&mut self, other: &Vec) { @@ -1642,10 +1669,7 @@ impl Hash for Vec { message="vector indices are of type `usize` or ranges of `usize`", label="vector indices are of type `usize` or ranges of `usize`", )] -impl Index for Vec -where - I: ::core::slice::SliceIndex<[T]>, -{ +impl> Index for Vec { type Output = I::Output; #[inline] @@ -1659,10 +1683,7 @@ where message="vector indices are of type `usize` or ranges of `usize`", label="vector indices are of type `usize` or ranges of `usize`", )] -impl IndexMut for Vec -where - I: ::core::slice::SliceIndex<[T]>, -{ +impl> IndexMut for Vec { #[inline] fn index_mut(&mut self, index: I) -> &mut Self::Output { IndexMut::index_mut(&mut **self, index) @@ -1956,7 +1977,7 @@ impl Vec { /// ``` #[inline] #[stable(feature = "vec_splice", since = "1.21.0")] - pub fn splice(&mut self, range: R, replace_with: I) -> Splice + pub fn splice(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoIter> where R: RangeBounds, I: IntoIterator { Splice { @@ -2011,7 +2032,7 @@ impl Vec { /// assert_eq!(odds, vec![1, 3, 5, 9, 11, 13, 15]); /// ``` #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] - pub fn drain_filter(&mut self, filter: F) -> DrainFilter + pub fn drain_filter(&mut self, filter: F) -> DrainFilter<'_, T, F> where F: FnMut(&mut T) -> bool, { let old_len = self.len(); @@ -2127,7 +2148,7 @@ impl Default for Vec { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for Vec { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&**self, f) } } @@ -2161,26 +2182,26 @@ impl AsMut<[T]> for Vec { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a, T: Clone> From<&'a [T]> for Vec { +impl From<&[T]> for Vec { #[cfg(not(test))] - fn from(s: &'a [T]) -> Vec { + fn from(s: &[T]) -> Vec { s.to_vec() } #[cfg(test)] - fn from(s: &'a [T]) -> Vec { - ::slice::to_vec(s) + fn from(s: &[T]) -> Vec { + crate::slice::to_vec(s) } } #[stable(feature = "vec_from_mut", since = "1.19.0")] -impl<'a, T: Clone> From<&'a mut [T]> for Vec { +impl From<&mut [T]> for Vec { #[cfg(not(test))] - fn from(s: &'a mut [T]) -> Vec { + fn from(s: &mut [T]) -> Vec { s.to_vec() } #[cfg(test)] - fn from(s: &'a mut [T]) -> Vec { - ::slice::to_vec(s) + fn from(s: &mut [T]) -> Vec { + crate::slice::to_vec(s) } } @@ -2210,8 +2231,8 @@ impl From> for Box<[T]> { } #[stable(feature = "rust1", since = "1.0.0")] -impl<'a> From<&'a str> for Vec { - fn from(s: &'a str) -> Vec { +impl From<&str> for Vec { + fn from(s: &str) -> Vec { From::from(s.as_bytes()) } } @@ -2270,7 +2291,7 @@ pub struct IntoIter { #[stable(feature = "vec_intoiter_debug", since = "1.13.0")] impl fmt::Debug for IntoIter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("IntoIter") .field(&self.as_slice()) .finish() @@ -2439,21 +2460,40 @@ pub struct Drain<'a, T: 'a> { } #[stable(feature = "collection_debug", since = "1.17.0")] -impl<'a, T: 'a + fmt::Debug> fmt::Debug for Drain<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl fmt::Debug for Drain<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("Drain") .field(&self.iter.as_slice()) .finish() } } +impl<'a, T> Drain<'a, T> { + /// Returns the remaining items of this iterator as a slice. + /// + /// # Examples + /// + /// ``` + /// # #![feature(vec_drain_as_slice)] + /// let mut vec = vec!['a', 'b', 'c']; + /// let mut drain = vec.drain(..); + /// assert_eq!(drain.as_slice(), &['a', 'b', 'c']); + /// let _ = drain.next().unwrap(); + /// assert_eq!(drain.as_slice(), &['b', 'c']); + /// ``` + #[unstable(feature = "vec_drain_as_slice", reason = "recently added", issue = "58957")] + pub fn as_slice(&self) -> &[T] { + self.iter.as_slice() + } +} + #[stable(feature = "drain", since = "1.6.0")] -unsafe impl<'a, T: Sync> Sync for Drain<'a, T> {} +unsafe impl Sync for Drain<'_, T> {} #[stable(feature = "drain", since = "1.6.0")] -unsafe impl<'a, T: Send> Send for Drain<'a, T> {} +unsafe impl Send for Drain<'_, T> {} #[stable(feature = "drain", since = "1.6.0")] -impl<'a, T> Iterator for Drain<'a, T> { +impl Iterator for Drain<'_, T> { type Item = T; #[inline] @@ -2467,7 +2507,7 @@ impl<'a, T> Iterator for Drain<'a, T> { } #[stable(feature = "drain", since = "1.6.0")] -impl<'a, T> DoubleEndedIterator for Drain<'a, T> { +impl DoubleEndedIterator for Drain<'_, T> { #[inline] fn next_back(&mut self) -> Option { self.iter.next_back().map(|elt| unsafe { ptr::read(elt as *const _) }) @@ -2475,7 +2515,7 @@ impl<'a, T> DoubleEndedIterator for Drain<'a, T> { } #[stable(feature = "drain", since = "1.6.0")] -impl<'a, T> Drop for Drain<'a, T> { +impl Drop for Drain<'_, T> { fn drop(&mut self) { // exhaust self first self.for_each(drop); @@ -2499,14 +2539,14 @@ impl<'a, T> Drop for Drain<'a, T> { #[stable(feature = "drain", since = "1.6.0")] -impl<'a, T> ExactSizeIterator for Drain<'a, T> { +impl ExactSizeIterator for Drain<'_, T> { fn is_empty(&self) -> bool { self.iter.is_empty() } } #[stable(feature = "fused", since = "1.26.0")] -impl<'a, T> FusedIterator for Drain<'a, T> {} +impl FusedIterator for Drain<'_, T> {} /// A splicing iterator for `Vec`. /// @@ -2523,7 +2563,7 @@ pub struct Splice<'a, I: Iterator + 'a> { } #[stable(feature = "vec_splice", since = "1.21.0")] -impl<'a, I: Iterator> Iterator for Splice<'a, I> { +impl Iterator for Splice<'_, I> { type Item = I::Item; fn next(&mut self) -> Option { @@ -2536,18 +2576,18 @@ impl<'a, I: Iterator> Iterator for Splice<'a, I> { } #[stable(feature = "vec_splice", since = "1.21.0")] -impl<'a, I: Iterator> DoubleEndedIterator for Splice<'a, I> { +impl DoubleEndedIterator for Splice<'_, I> { fn next_back(&mut self) -> Option { self.drain.next_back() } } #[stable(feature = "vec_splice", since = "1.21.0")] -impl<'a, I: Iterator> ExactSizeIterator for Splice<'a, I> {} +impl ExactSizeIterator for Splice<'_, I> {} #[stable(feature = "vec_splice", since = "1.21.0")] -impl<'a, I: Iterator> Drop for Splice<'a, I> { +impl Drop for Splice<'_, I> { fn drop(&mut self) { self.drain.by_ref().for_each(drop); @@ -2588,11 +2628,11 @@ impl<'a, I: Iterator> Drop for Splice<'a, I> { } /// Private helper methods for `Splice::drop` -impl<'a, T> Drain<'a, T> { +impl Drain<'_, T> { /// The range from `self.vec.len` to `self.tail_start` contains elements /// that have been moved out. /// Fill that range as much as possible with new elements from the `replace_with` iterator. - /// Return whether we filled the entire range. (`replace_with.next()` didn’t return `None`.) + /// Returns `true` if we filled the entire range. (`replace_with.next()` didn’t return `None`.) unsafe fn fill>(&mut self, replace_with: &mut I) -> bool { let vec = self.vec.as_mut(); let range_start = vec.len; @@ -2612,7 +2652,7 @@ impl<'a, T> Drain<'a, T> { true } - /// Make room for inserting more elements before the tail. + /// Makes room for inserting more elements before the tail. unsafe fn move_tail(&mut self, extra_capacity: usize) { let vec = self.vec.as_mut(); let used_capacity = self.tail_start + self.tail_len; @@ -2629,7 +2669,7 @@ impl<'a, T> Drain<'a, T> { /// An iterator produced by calling `drain_filter` on Vec. #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] #[derive(Debug)] -pub struct DrainFilter<'a, T: 'a, F> +pub struct DrainFilter<'a, T, F> where F: FnMut(&mut T) -> bool, { vec: &'a mut Vec, @@ -2640,7 +2680,7 @@ pub struct DrainFilter<'a, T: 'a, F> } #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] -impl<'a, T, F> Iterator for DrainFilter<'a, T, F> +impl Iterator for DrainFilter<'_, T, F> where F: FnMut(&mut T) -> bool, { type Item = T; @@ -2674,7 +2714,7 @@ impl<'a, T, F> Iterator for DrainFilter<'a, T, F> } #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] -impl<'a, T, F> Drop for DrainFilter<'a, T, F> +impl Drop for DrainFilter<'_, T, F> where F: FnMut(&mut T) -> bool, { fn drop(&mut self) { diff --git a/src/libarena/Cargo.toml b/src/libarena/Cargo.toml index e2af67dd92861..aa1bf38b99597 100644 --- a/src/libarena/Cargo.toml +++ b/src/libarena/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "arena" version = "0.0.0" +edition = "2018" [lib] name = "arena" @@ -9,4 +10,5 @@ path = "lib.rs" crate-type = ["dylib"] [dependencies] -rustc_data_structures = { path = "../librustc_data_structures" } \ No newline at end of file +rustc_data_structures = { path = "../librustc_data_structures" } +smallvec = { version = "0.6.7", features = ["union", "may_dangle"] } diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs index aef3edd9eb64a..6fa15884f5abb 100644 --- a/src/libarena/lib.rs +++ b/src/libarena/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The arena, a fast but limited type of allocator. //! //! Arenas are a type of allocator that destroy the objects within, all at @@ -18,24 +8,25 @@ //! This crate implements `TypedArena`, a simple arena that can only hold //! objects of a single type. -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/", +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(no_crate_inject, attr(deny(warnings))))] +#![deny(rust_2018_idioms)] +#![cfg_attr(not(stage0), deny(internal))] + #![feature(alloc)] #![feature(core_intrinsics)] #![feature(dropck_eyepatch)] -#![feature(nll)] #![feature(raw_vec_internals)] #![cfg_attr(test, feature(test))] #![allow(deprecated)] extern crate alloc; -extern crate rustc_data_structures; +use rustc_data_structures::cold_path; use rustc_data_structures::sync::MTLock; +use smallvec::SmallVec; use std::cell::{Cell, RefCell}; use std::cmp; @@ -67,6 +58,8 @@ pub struct TypedArena { struct TypedArenaChunk { /// The raw storage for the arena chunk. storage: RawVec, + /// The number of valid entries in the chunk. + entries: usize, } impl TypedArenaChunk { @@ -74,6 +67,7 @@ impl TypedArenaChunk { unsafe fn new(capacity: usize) -> TypedArenaChunk { TypedArenaChunk { storage: RawVec::with_capacity(capacity), + entries: 0, } } @@ -129,6 +123,11 @@ impl Default for TypedArena { } impl TypedArena { + pub fn in_arena(&self, ptr: *const T) -> bool { + let ptr = ptr as *const T as *mut T; + + self.chunks.borrow().iter().any(|chunk| chunk.start() <= ptr && ptr < chunk.end()) + } /// Allocates an object in the `TypedArena`, returning a reference to it. #[inline] pub fn alloc(&self, object: T) -> &mut T { @@ -156,6 +155,34 @@ impl TypedArena { } } + #[inline] + fn can_allocate(&self, len: usize) -> bool { + let available_capacity_bytes = self.end.get() as usize - self.ptr.get() as usize; + let at_least_bytes = len.checked_mul(mem::size_of::()).unwrap(); + available_capacity_bytes >= at_least_bytes + } + + /// Ensures there's enough space in the current chunk to fit `len` objects. + #[inline] + fn ensure_capacity(&self, len: usize) { + if !self.can_allocate(len) { + self.grow(len); + debug_assert!(self.can_allocate(len)); + } + } + + #[inline] + unsafe fn alloc_raw_slice(&self, len: usize) -> *mut T { + assert!(mem::size_of::() != 0); + assert!(len != 0); + + self.ensure_capacity(len); + + let start_ptr = self.ptr.get(); + self.ptr.set(start_ptr.add(len)); + start_ptr + } + /// Allocates a slice of objects that are copied into the `TypedArena`, returning a mutable /// reference to it. Will panic if passed a zero-sized types. /// @@ -168,21 +195,64 @@ impl TypedArena { where T: Copy, { + unsafe { + let len = slice.len(); + let start_ptr = self.alloc_raw_slice(len); + slice.as_ptr().copy_to_nonoverlapping(start_ptr, len); + slice::from_raw_parts_mut(start_ptr, len) + } + } + + #[inline] + pub fn alloc_from_iter>(&self, iter: I) -> &mut [T] { assert!(mem::size_of::() != 0); - assert!(slice.len() != 0); + let mut iter = iter.into_iter(); + let size_hint = iter.size_hint(); - let available_capacity_bytes = self.end.get() as usize - self.ptr.get() as usize; - let at_least_bytes = slice.len() * mem::size_of::(); - if available_capacity_bytes < at_least_bytes { - self.grow(slice.len()); - } + match size_hint { + (min, Some(max)) if min == max => { + // We know the exact number of elements the iterator will produce here + let len = min; - unsafe { - let start_ptr = self.ptr.get(); - let arena_slice = slice::from_raw_parts_mut(start_ptr, slice.len()); - self.ptr.set(start_ptr.add(arena_slice.len())); - arena_slice.copy_from_slice(slice); - arena_slice + if len == 0 { + return &mut []; + } + + self.ensure_capacity(len); + + let slice = self.ptr.get(); + + unsafe { + let mut ptr = self.ptr.get(); + for _ in 0..len { + // Write into uninitialized memory. + ptr::write(ptr, iter.next().unwrap()); + // Advance the pointer. + ptr = ptr.offset(1); + // Update the pointer per iteration so if `iter.next()` panics + // we destroy the correct amount + self.ptr.set(ptr); + } + slice::from_raw_parts_mut(slice, len) + } + } + _ => { + cold_path(move || -> &mut [T] { + let mut vec: SmallVec<[_; 8]> = iter.collect(); + if vec.is_empty() { + return &mut []; + } + // Move the content to the arena by copying it and then forgetting + // the content of the SmallVec + unsafe { + let len = vec.len(); + let start_ptr = self.alloc_raw_slice(len); + vec.as_ptr().copy_to_nonoverlapping(start_ptr, len); + vec.set_len(0); + slice::from_raw_parts_mut(start_ptr, len) + } + }) + } } } @@ -196,6 +266,7 @@ impl TypedArena { if let Some(last_chunk) = chunks.last_mut() { let used_bytes = self.ptr.get() as usize - last_chunk.start() as usize; let currently_used_cap = used_bytes / mem::size_of::(); + last_chunk.entries = currently_used_cap; if last_chunk.storage.reserve_in_place(currently_used_cap, n) { self.end.set(last_chunk.end()); return; @@ -229,8 +300,7 @@ impl TypedArena { let len = chunks_borrow.len(); // If `T` is ZST, code below has no effect. for mut chunk in chunks_borrow.drain(..len-1) { - let cap = chunk.storage.cap(); - chunk.destroy(cap); + chunk.destroy(chunk.entries); } } } @@ -272,8 +342,7 @@ unsafe impl<#[may_dangle] T> Drop for TypedArena { self.clear_last_chunk(&mut last_chunk); // The last chunk will be dropped. Destroy all other chunks. for chunk in chunks_borrow.iter_mut() { - let cap = chunk.storage.cap(); - chunk.destroy(cap); + chunk.destroy(chunk.entries); } } // RawVec handles deallocation of `last_chunk` and `self.chunks`. @@ -417,6 +486,54 @@ impl DroplessArena { arena_slice } } + + #[inline] + pub fn alloc_from_iter>(&self, iter: I) -> &mut [T] { + let mut iter = iter.into_iter(); + assert!(mem::size_of::() != 0); + assert!(!mem::needs_drop::()); + + let size_hint = iter.size_hint(); + + match size_hint { + (min, Some(max)) if min == max => { + // We know the exact number of elements the iterator will produce here + let len = min; + + if len == 0 { + return &mut [] + } + let size = len.checked_mul(mem::size_of::()).unwrap(); + let mem = self.alloc_raw(size, mem::align_of::()) as *mut _ as *mut T; + unsafe { + for i in 0..len { + ptr::write(mem.offset(i as isize), iter.next().unwrap()) + } + slice::from_raw_parts_mut(mem, len) + } + } + (_, _) => { + cold_path(move || -> &mut [T] { + let mut vec: SmallVec<[_; 8]> = iter.collect(); + if vec.is_empty() { + return &mut []; + } + // Move the content to the arena by copying it and then forgetting + // the content of the SmallVec + unsafe { + let len = vec.len(); + let start_ptr = self.alloc_raw( + len * mem::size_of::(), + mem::align_of::() + ) as *mut _ as *mut T; + vec.as_ptr().copy_to_nonoverlapping(start_ptr, len); + vec.set_len(0); + slice::from_raw_parts_mut(start_ptr, len) + } + }) + } + } + } } #[derive(Default)] @@ -483,7 +600,7 @@ impl SyncDroplessArena { #[cfg(test)] mod tests { extern crate test; - use self::test::Bencher; + use test::Bencher; use super::TypedArena; use std::cell::Cell; @@ -518,15 +635,15 @@ mod tests { impl<'a> Wrap<'a> { fn alloc_inner Inner>(&self, f: F) -> &Inner { - let r: &EI = self.0.alloc(EI::I(f())); + let r: &EI<'_> = self.0.alloc(EI::I(f())); if let &EI::I(ref i) = r { i } else { panic!("mismatch"); } } - fn alloc_outer Outer<'a>>(&self, f: F) -> &Outer { - let r: &EI = self.0.alloc(EI::O(f())); + fn alloc_outer Outer<'a>>(&self, f: F) -> &Outer<'_> { + let r: &EI<'_> = self.0.alloc(EI::O(f())); if let &EI::O(ref o) = r { o } else { @@ -616,7 +733,7 @@ mod tests { count: &'a Cell, } - impl<'a> Drop for DropCounter<'a> { + impl Drop for DropCounter<'_> { fn drop(&mut self) { self.count.set(self.count.get() + 1); } @@ -626,7 +743,7 @@ mod tests { fn test_typed_arena_drop_count() { let counter = Cell::new(0); { - let arena: TypedArena = TypedArena::default(); + let arena: TypedArena> = TypedArena::default(); for _ in 0..100 { // Allocate something with drop glue to make sure it doesn't leak. arena.alloc(DropCounter { count: &counter }); @@ -638,7 +755,7 @@ mod tests { #[test] fn test_typed_arena_drop_on_clear() { let counter = Cell::new(0); - let mut arena: TypedArena = TypedArena::default(); + let mut arena: TypedArena> = TypedArena::default(); for i in 0..10 { for _ in 0..100 { // Allocate something with drop glue to make sure it doesn't leak. diff --git a/src/libbacktrace b/src/libbacktrace deleted file mode 160000 index f4d02bbdbf8a2..0000000000000 --- a/src/libbacktrace +++ /dev/null @@ -1 +0,0 @@ -Subproject commit f4d02bbdbf8a2c5a31f0801dfef597a86caad9e3 diff --git a/src/libcore/alloc.rs b/src/libcore/alloc.rs index 8db7d33bdecaa..e842020561d35 100644 --- a/src/libcore/alloc.rs +++ b/src/libcore/alloc.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Memory allocation APIs #![stable(feature = "alloc_module", since = "1.28.0")] @@ -430,12 +420,12 @@ impl fmt::Display for CannotReallocInPlace { /// } /// ``` /// -/// # Unsafety +/// # Safety /// /// The `GlobalAlloc` trait is an `unsafe` trait for a number of reasons, and /// implementors must ensure that they adhere to these contracts: /// -/// * It's undefined behavior if global allocators unwind. This restriction may +/// * It's undefined behavior if global allocators unwind. This restriction may /// be lifted in the future, but currently a panic from any of these /// functions may lead to memory unsafety. /// @@ -653,7 +643,7 @@ pub unsafe trait GlobalAlloc { /// currently allocated via an allocator `a`, then it is legal to /// use that layout to deallocate it, i.e., `a.dealloc(ptr, k);`. /// -/// # Unsafety +/// # Safety /// /// The `Alloc` trait is an `unsafe` trait for a number of reasons, and /// implementors must ensure that they adhere to these contracts: diff --git a/src/libcore/any.rs b/src/libcore/any.rs index f521ab994cd9f..01ab523a4c3f6 100644 --- a/src/libcore/any.rs +++ b/src/libcore/any.rs @@ -1,13 +1,3 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This module implements the `Any` trait, which enables dynamic typing //! of any `'static` type through runtime reflection. //! @@ -28,7 +18,7 @@ //! //! Consider a situation where we want to log out a value passed to a function. //! We know the value we're working on implements Debug, but we don't know its -//! concrete type. We want to give special treatment to certain types: in this +//! concrete type. We want to give special treatment to certain types: in this //! case printing out the length of String values prior to their value. //! We don't know the concrete type of our value at compile time, so we need to //! use runtime reflection instead. @@ -41,8 +31,8 @@ //! fn log(value: &T) { //! let value_any = value as &dyn Any; //! -//! // try to convert our value to a String. If successful, we want to -//! // output the String's length as well as its value. If not, it's a +//! // Try to convert our value to a `String`. If successful, we want to +//! // output the String`'s length as well as its value. If not, it's a //! // different type: just print it out unadorned. //! match value_any.downcast_ref::() { //! Some(as_string) => { @@ -91,12 +81,10 @@ pub trait Any: 'static { /// # Examples /// /// ``` - /// #![feature(get_type_id)] - /// /// use std::any::{Any, TypeId}; /// /// fn is_string(s: &dyn Any) -> bool { - /// TypeId::of::() == s.get_type_id() + /// TypeId::of::() == s.type_id() /// } /// /// fn main() { @@ -104,15 +92,13 @@ pub trait Any: 'static { /// assert_eq!(is_string(&"cookie monster".to_string()), true); /// } /// ``` - #[unstable(feature = "get_type_id", - reason = "this method will likely be replaced by an associated static", - issue = "27745")] - fn get_type_id(&self) -> TypeId; + #[stable(feature = "get_type_id", since = "1.34.0")] + fn type_id(&self) -> TypeId; } #[stable(feature = "rust1", since = "1.0.0")] impl Any for T { - fn get_type_id(&self) -> TypeId { TypeId::of::() } + fn type_id(&self) -> TypeId { TypeId::of::() } } /////////////////////////////////////////////////////////////////////////////// @@ -171,10 +157,10 @@ impl dyn Any { let t = TypeId::of::(); // Get TypeId of the type in the trait object - let boxed = self.get_type_id(); + let concrete = self.type_id(); // Compare both TypeIds on equality - t == boxed + t == concrete } /// Returns some reference to the boxed value if it is of type `T`, or diff --git a/src/libcore/array.rs b/src/libcore/array.rs index 26e7a79d35df6..dcd9ce6dad756 100644 --- a/src/libcore/array.rs +++ b/src/libcore/array.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Implementations of things like `Eq` for fixed-length arrays //! up to a certain length. Eventually we should able to generalize //! to all lengths. @@ -59,7 +49,7 @@ unsafe impl> FixedSizeArray for A { } /// The error type returned when a conversion from a slice to an array fails. -#[unstable(feature = "try_from", issue = "33417")] +#[stable(feature = "try_from", since = "1.34.0")] #[derive(Debug, Copy, Clone)] pub struct TryFromSliceError(()); @@ -148,8 +138,8 @@ macro_rules! array_impls { } } - #[unstable(feature = "try_from", issue = "33417")] - impl<'a, T> TryFrom<&'a [T]> for [T; $N] where T: Copy { + #[stable(feature = "try_from", since = "1.34.0")] + impl TryFrom<&[T]> for [T; $N] where T: Copy { type Error = TryFromSliceError; fn try_from(slice: &[T]) -> Result<[T; $N], TryFromSliceError> { @@ -157,7 +147,7 @@ macro_rules! array_impls { } } - #[unstable(feature = "try_from", issue = "33417")] + #[stable(feature = "try_from", since = "1.34.0")] impl<'a, T> TryFrom<&'a [T]> for &'a [T; $N] { type Error = TryFromSliceError; @@ -171,7 +161,7 @@ macro_rules! array_impls { } } - #[unstable(feature = "try_from", issue = "33417")] + #[stable(feature = "try_from", since = "1.34.0")] impl<'a, T> TryFrom<&'a mut [T]> for &'a mut [T; $N] { type Error = TryFromSliceError; diff --git a/src/libcore/ascii.rs b/src/libcore/ascii.rs index 6ee91e0b22ff0..7a06aa2b0d397 100644 --- a/src/libcore/ascii.rs +++ b/src/libcore/ascii.rs @@ -1,13 +1,3 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Operations on ASCII strings and characters. //! //! Most string operations in Rust act on UTF-8 strings. However, at times it diff --git a/src/libcore/benches/any.rs b/src/libcore/benches/any.rs index f4f01eb1cf5d2..ceb507aad38f4 100644 --- a/src/libcore/benches/any.rs +++ b/src/libcore/benches/any.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::any::*; use test::{Bencher, black_box}; diff --git a/src/libcore/benches/ascii.rs b/src/libcore/benches/ascii.rs new file mode 100644 index 0000000000000..10b6cc61d996a --- /dev/null +++ b/src/libcore/benches/ascii.rs @@ -0,0 +1,349 @@ +// Lower-case ASCII 'a' is the first byte that has its highest bit set +// after wrap-adding 0x1F: +// +// b'a' + 0x1F == 0x80 == 0b1000_0000 +// b'z' + 0x1F == 0x98 == 0b1001_1000 +// +// Lower-case ASCII 'z' is the last byte that has its highest bit unset +// after wrap-adding 0x05: +// +// b'a' + 0x05 == 0x66 == 0b0110_0110 +// b'z' + 0x05 == 0x7F == 0b0111_1111 +// +// … except for 0xFB to 0xFF, but those are in the range of bytes +// that have the highest bit unset again after adding 0x1F. +// +// So `(byte + 0x1f) & !(byte + 5)` has its highest bit set +// iff `byte` is a lower-case ASCII letter. +// +// Lower-case ASCII letters all have the 0x20 bit set. +// (Two positions right of 0x80, the highest bit.) +// Unsetting that bit produces the same letter, in upper-case. +// +// Therefore: +fn branchless_to_ascii_upper_case(byte: u8) -> u8 { + byte & + !( + ( + byte.wrapping_add(0x1f) & + !byte.wrapping_add(0x05) & + 0x80 + ) >> 2 + ) +} + + +macro_rules! benches { + ($( fn $name: ident($arg: ident: &mut [u8]) $body: block )+ @iter $( $is_: ident, )+) => { + benches! {@ + $( fn $name($arg: &mut [u8]) $body )+ + $( fn $is_(bytes: &mut [u8]) { bytes.iter().all(u8::$is_) } )+ + } + }; + + (@$( fn $name: ident($arg: ident: &mut [u8]) $body: block )+) => { + benches!(mod short SHORT $($name $arg $body)+); + benches!(mod medium MEDIUM $($name $arg $body)+); + benches!(mod long LONG $($name $arg $body)+); + }; + + (mod $mod_name: ident $input: ident $($name: ident $arg: ident $body: block)+) => { + mod $mod_name { + use super::*; + + $( + #[bench] + fn $name(bencher: &mut Bencher) { + bencher.bytes = $input.len() as u64; + bencher.iter(|| { + let mut vec = $input.as_bytes().to_vec(); + { + let $arg = &mut vec[..]; + black_box($body); + } + vec + }) + } + )+ + } + } +} + +use test::black_box; +use test::Bencher; + +benches! { + fn case00_alloc_only(_bytes: &mut [u8]) {} + + fn case01_black_box_read_each_byte(bytes: &mut [u8]) { + for byte in bytes { + black_box(*byte); + } + } + + fn case02_lookup_table(bytes: &mut [u8]) { + for byte in bytes { + *byte = ASCII_UPPERCASE_MAP[*byte as usize] + } + } + + fn case03_branch_and_subtract(bytes: &mut [u8]) { + for byte in bytes { + *byte = if b'a' <= *byte && *byte <= b'z' { + *byte - b'a' + b'A' + } else { + *byte + } + } + } + + fn case04_branch_and_mask(bytes: &mut [u8]) { + for byte in bytes { + *byte = if b'a' <= *byte && *byte <= b'z' { + *byte & !0x20 + } else { + *byte + } + } + } + + fn case05_branchless(bytes: &mut [u8]) { + for byte in bytes { + *byte = branchless_to_ascii_upper_case(*byte) + } + } + + fn case06_libcore(bytes: &mut [u8]) { + bytes.make_ascii_uppercase() + } + + fn case07_fake_simd_u32(bytes: &mut [u8]) { + let (before, aligned, after) = unsafe { + bytes.align_to_mut::() + }; + for byte in before { + *byte = branchless_to_ascii_upper_case(*byte) + } + for word in aligned { + // FIXME: this is incorrect for some byte values: + // addition within a byte can carry/overflow into the next byte. + // Test case: b"\xFFz " + *word &= !( + ( + word.wrapping_add(0x1f1f1f1f) & + !word.wrapping_add(0x05050505) & + 0x80808080 + ) >> 2 + ) + } + for byte in after { + *byte = branchless_to_ascii_upper_case(*byte) + } + } + + fn case08_fake_simd_u64(bytes: &mut [u8]) { + let (before, aligned, after) = unsafe { + bytes.align_to_mut::() + }; + for byte in before { + *byte = branchless_to_ascii_upper_case(*byte) + } + for word in aligned { + // FIXME: like above, this is incorrect for some byte values. + *word &= !( + ( + word.wrapping_add(0x1f1f1f1f_1f1f1f1f) & + !word.wrapping_add(0x05050505_05050505) & + 0x80808080_80808080 + ) >> 2 + ) + } + for byte in after { + *byte = branchless_to_ascii_upper_case(*byte) + } + } + + fn case09_mask_mult_bool_branchy_lookup_table(bytes: &mut [u8]) { + fn is_ascii_lowercase(b: u8) -> bool { + if b >= 0x80 { return false } + match ASCII_CHARACTER_CLASS[b as usize] { + L | Lx => true, + _ => false, + } + } + for byte in bytes { + *byte &= !(0x20 * (is_ascii_lowercase(*byte) as u8)) + } + } + + fn case10_mask_mult_bool_lookup_table(bytes: &mut [u8]) { + fn is_ascii_lowercase(b: u8) -> bool { + match ASCII_CHARACTER_CLASS[b as usize] { + L | Lx => true, + _ => false + } + } + for byte in bytes { + *byte &= !(0x20 * (is_ascii_lowercase(*byte) as u8)) + } + } + + fn case11_mask_mult_bool_match_range(bytes: &mut [u8]) { + fn is_ascii_lowercase(b: u8) -> bool { + match b { + b'a'...b'z' => true, + _ => false + } + } + for byte in bytes { + *byte &= !(0x20 * (is_ascii_lowercase(*byte) as u8)) + } + } + + fn case12_mask_shifted_bool_match_range(bytes: &mut [u8]) { + fn is_ascii_lowercase(b: u8) -> bool { + match b { + b'a'...b'z' => true, + _ => false + } + } + for byte in bytes { + *byte &= !((is_ascii_lowercase(*byte) as u8) << 5) + } + } + + fn case13_subtract_shifted_bool_match_range(bytes: &mut [u8]) { + fn is_ascii_lowercase(b: u8) -> bool { + match b { + b'a'...b'z' => true, + _ => false + } + } + for byte in bytes { + *byte -= (is_ascii_lowercase(*byte) as u8) << 5 + } + } + + fn case14_subtract_multiplied_bool_match_range(bytes: &mut [u8]) { + fn is_ascii_lowercase(b: u8) -> bool { + match b { + b'a'...b'z' => true, + _ => false + } + } + for byte in bytes { + *byte -= (b'a' - b'A') * is_ascii_lowercase(*byte) as u8 + } + } + + @iter + + is_ascii, + is_ascii_alphabetic, + is_ascii_uppercase, + is_ascii_lowercase, + is_ascii_alphanumeric, + is_ascii_digit, + is_ascii_hexdigit, + is_ascii_punctuation, + is_ascii_graphic, + is_ascii_whitespace, + is_ascii_control, +} + +macro_rules! repeat { + ($s: expr) => { concat!($s, $s, $s, $s, $s, $s, $s, $s, $s, $s) } +} + +const SHORT: &'static str = "Alice's"; +const MEDIUM: &'static str = "Alice's Adventures in Wonderland"; +const LONG: &'static str = repeat!(r#" + La Guida di Bragia, a Ballad Opera for the Marionette Theatre (around 1850) + Alice's Adventures in Wonderland (1865) + Phantasmagoria and Other Poems (1869) + Through the Looking-Glass, and What Alice Found There + (includes "Jabberwocky" and "The Walrus and the Carpenter") (1871) + The Hunting of the Snark (1876) + Rhyme? And Reason? (1883) – shares some contents with the 1869 collection, + including the long poem "Phantasmagoria" + A Tangled Tale (1885) + Sylvie and Bruno (1889) + Sylvie and Bruno Concluded (1893) + Pillow Problems (1893) + What the Tortoise Said to Achilles (1895) + Three Sunsets and Other Poems (1898) + The Manlet (1903)[106] +"#); + +const ASCII_UPPERCASE_MAP: [u8; 256] = [ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + b' ', b'!', b'"', b'#', b'$', b'%', b'&', b'\'', + b'(', b')', b'*', b'+', b',', b'-', b'.', b'/', + b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', + b'8', b'9', b':', b';', b'<', b'=', b'>', b'?', + b'@', b'A', b'B', b'C', b'D', b'E', b'F', b'G', + b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', + b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', + b'X', b'Y', b'Z', b'[', b'\\', b']', b'^', b'_', + b'`', + + b'A', b'B', b'C', b'D', b'E', b'F', b'G', + b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', + b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', + b'X', b'Y', b'Z', + + b'{', b'|', b'}', b'~', 0x7f, + 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, + 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, + 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, + 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, + 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, + 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, + 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, + 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, + 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, + 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, + 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, + 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, + 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, + 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, +]; + +enum AsciiCharacterClass { + C, // control + Cw, // control whitespace + W, // whitespace + D, // digit + L, // lowercase + Lx, // lowercase hex digit + U, // uppercase + Ux, // uppercase hex digit + P, // punctuation + N, // Non-ASCII +} +use self::AsciiCharacterClass::*; + +static ASCII_CHARACTER_CLASS: [AsciiCharacterClass; 256] = [ +// _0 _1 _2 _3 _4 _5 _6 _7 _8 _9 _a _b _c _d _e _f + C, C, C, C, C, C, C, C, C, Cw,Cw,C, Cw,Cw,C, C, // 0_ + C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, // 1_ + W, P, P, P, P, P, P, P, P, P, P, P, P, P, P, P, // 2_ + D, D, D, D, D, D, D, D, D, D, P, P, P, P, P, P, // 3_ + P, Ux,Ux,Ux,Ux,Ux,Ux,U, U, U, U, U, U, U, U, U, // 4_ + U, U, U, U, U, U, U, U, U, U, U, P, P, P, P, P, // 5_ + P, Lx,Lx,Lx,Lx,Lx,Lx,L, L, L, L, L, L, L, L, L, // 6_ + L, L, L, L, L, L, L, L, L, L, L, P, P, P, P, C, // 7_ + N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, + N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, + N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, + N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, + N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, + N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, + N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, + N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, +]; diff --git a/src/libcore/benches/char/methods.rs b/src/libcore/benches/char/methods.rs index faf820d871cfa..af934c1171577 100644 --- a/src/libcore/benches/char/methods.rs +++ b/src/libcore/benches/char/methods.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use test::Bencher; const CHARS: [char; 9] = ['0', 'x', '2', '5', 'A', 'f', '7', '8', '9']; diff --git a/src/libcore/benches/char/mod.rs b/src/libcore/benches/char/mod.rs index a656e82cb61e6..9ca51a7684753 100644 --- a/src/libcore/benches/char/mod.rs +++ b/src/libcore/benches/char/mod.rs @@ -1,11 +1 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod methods; diff --git a/src/libcore/benches/fmt.rs b/src/libcore/benches/fmt.rs new file mode 100644 index 0000000000000..92f10c760c6d2 --- /dev/null +++ b/src/libcore/benches/fmt.rs @@ -0,0 +1,110 @@ +use std::io::{self, Write as IoWrite}; +use std::fmt::{self, Write as FmtWrite}; +use test::Bencher; + +#[bench] +fn write_vec_value(bh: &mut Bencher) { + bh.iter(|| { + let mut mem = Vec::new(); + for _ in 0..1000 { + mem.write_all("abc".as_bytes()).unwrap(); + } + }); +} + +#[bench] +fn write_vec_ref(bh: &mut Bencher) { + bh.iter(|| { + let mut mem = Vec::new(); + let wr = &mut mem as &mut dyn io::Write; + for _ in 0..1000 { + wr.write_all("abc".as_bytes()).unwrap(); + } + }); +} + +#[bench] +fn write_vec_macro1(bh: &mut Bencher) { + bh.iter(|| { + let mut mem = Vec::new(); + let wr = &mut mem as &mut dyn io::Write; + for _ in 0..1000 { + write!(wr, "abc").unwrap(); + } + }); +} + +#[bench] +fn write_vec_macro2(bh: &mut Bencher) { + bh.iter(|| { + let mut mem = Vec::new(); + let wr = &mut mem as &mut dyn io::Write; + for _ in 0..1000 { + write!(wr, "{}", "abc").unwrap(); + } + }); +} + +#[bench] +fn write_vec_macro_debug(bh: &mut Bencher) { + bh.iter(|| { + let mut mem = Vec::new(); + let wr = &mut mem as &mut dyn io::Write; + for _ in 0..1000 { + write!(wr, "{:?}", "☃").unwrap(); + } + }); +} + +#[bench] +fn write_str_value(bh: &mut Bencher) { + bh.iter(|| { + let mut mem = String::new(); + for _ in 0..1000 { + mem.write_str("abc").unwrap(); + } + }); +} + +#[bench] +fn write_str_ref(bh: &mut Bencher) { + bh.iter(|| { + let mut mem = String::new(); + let wr = &mut mem as &mut dyn fmt::Write; + for _ in 0..1000 { + wr.write_str("abc").unwrap(); + } + }); +} + +#[bench] +fn write_str_macro1(bh: &mut Bencher) { + bh.iter(|| { + let mut mem = String::new(); + for _ in 0..1000 { + write!(mem, "abc").unwrap(); + } + }); +} + +#[bench] +fn write_str_macro2(bh: &mut Bencher) { + bh.iter(|| { + let mut mem = String::new(); + let wr = &mut mem as &mut dyn fmt::Write; + for _ in 0..1000 { + write!(wr, "{}", "abc").unwrap(); + } + }); +} + +#[bench] +fn write_str_macro_debug(bh: &mut Bencher) { + bh.iter(|| { + let mut mem = String::new(); + let wr = &mut mem as &mut dyn fmt::Write; + for _ in 0..1000 { + write!(wr, "{:?}", "☃").unwrap(); + } + }); +} diff --git a/src/libcore/benches/hash/mod.rs b/src/libcore/benches/hash/mod.rs index 55d9e3e091380..4f2e152b69526 100644 --- a/src/libcore/benches/hash/mod.rs +++ b/src/libcore/benches/hash/mod.rs @@ -1,11 +1 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod sip; diff --git a/src/libcore/benches/hash/sip.rs b/src/libcore/benches/hash/sip.rs index 3379c85bbec7d..5baba42763e10 100644 --- a/src/libcore/benches/hash/sip.rs +++ b/src/libcore/benches/hash/sip.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(deprecated)] use core::hash::*; diff --git a/src/libcore/benches/iter.rs b/src/libcore/benches/iter.rs index b0aca65834370..7dcfad8306fce 100644 --- a/src/libcore/benches/iter.rs +++ b/src/libcore/benches/iter.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::iter::*; use test::{Bencher, black_box}; @@ -45,7 +35,7 @@ fn scatter(x: i32) -> i32 { (x * 31) % 127 } fn bench_max_by_key(b: &mut Bencher) { b.iter(|| { let it = 0..100; - it.max_by_key(|&x| scatter(x)) + it.map(black_box).max_by_key(|&x| scatter(x)) }) } @@ -66,7 +56,7 @@ fn bench_max_by_key2(b: &mut Bencher) { fn bench_max(b: &mut Bencher) { b.iter(|| { let it = 0..100; - it.map(scatter).max() + it.map(black_box).map(scatter).max() }) } @@ -195,13 +185,13 @@ bench_sums! { bench_sums! { bench_filter_sum, bench_filter_ref_sum, - (0i64..1000000).filter(|x| x % 2 == 0) + (0i64..1000000).filter(|x| x % 3 == 0) } bench_sums! { bench_filter_chain_sum, bench_filter_chain_ref_sum, - (0i64..1000000).chain(0..1000000).filter(|x| x % 2 == 0) + (0i64..1000000).chain(0..1000000).filter(|x| x % 3 == 0) } bench_sums! { @@ -316,3 +306,41 @@ fn bench_skip_then_zip(b: &mut Bencher) { assert_eq!(s, 2009900); }); } + +#[bench] +fn bench_filter_count(b: &mut Bencher) { + b.iter(|| { + (0i64..1000000).map(black_box).filter(|x| x % 3 == 0).count() + }) +} + +#[bench] +fn bench_filter_ref_count(b: &mut Bencher) { + b.iter(|| { + (0i64..1000000).map(black_box).by_ref().filter(|x| x % 3 == 0).count() + }) +} + +#[bench] +fn bench_filter_chain_count(b: &mut Bencher) { + b.iter(|| { + (0i64..1000000).chain(0..1000000).map(black_box).filter(|x| x % 3 == 0).count() + }) +} + +#[bench] +fn bench_filter_chain_ref_count(b: &mut Bencher) { + b.iter(|| { + (0i64..1000000).chain(0..1000000).map(black_box).by_ref().filter(|x| x % 3 == 0).count() + }) +} + +#[bench] +fn bench_partial_cmp(b: &mut Bencher) { + b.iter(|| (0..100000).map(black_box).partial_cmp((0..100000).map(black_box))) +} + +#[bench] +fn bench_lt(b: &mut Bencher) { + b.iter(|| (0..100000).map(black_box).lt((0..100000).map(black_box))) +} diff --git a/src/libcore/benches/lib.rs b/src/libcore/benches/lib.rs index d44f1577d56b0..707cdd5f450ea 100644 --- a/src/libcore/benches/lib.rs +++ b/src/libcore/benches/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![feature(flt2dec)] #![feature(test)] @@ -15,9 +5,11 @@ extern crate core; extern crate test; mod any; +mod ascii; mod char; mod hash; mod iter; mod num; mod ops; mod slice; +mod fmt; diff --git a/src/libcore/benches/num/dec2flt/mod.rs b/src/libcore/benches/num/dec2flt/mod.rs index 562866e11777c..561a4bee87ad9 100644 --- a/src/libcore/benches/num/dec2flt/mod.rs +++ b/src/libcore/benches/num/dec2flt/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::f64; use test::Bencher; diff --git a/src/libcore/benches/num/flt2dec/mod.rs b/src/libcore/benches/num/flt2dec/mod.rs index 7f3b98a1c7614..4153745d0424a 100644 --- a/src/libcore/benches/num/flt2dec/mod.rs +++ b/src/libcore/benches/num/flt2dec/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod strategy { mod dragon; mod grisu; diff --git a/src/libcore/benches/num/flt2dec/strategy/dragon.rs b/src/libcore/benches/num/flt2dec/strategy/dragon.rs index 6824cf40ed2ae..60660b1da1118 100644 --- a/src/libcore/benches/num/flt2dec/strategy/dragon.rs +++ b/src/libcore/benches/num/flt2dec/strategy/dragon.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::{i16, f64}; use super::super::*; use core::num::flt2dec::strategy::dragon::*; diff --git a/src/libcore/benches/num/flt2dec/strategy/grisu.rs b/src/libcore/benches/num/flt2dec/strategy/grisu.rs index 82e1a858fca9f..841feba50dd5b 100644 --- a/src/libcore/benches/num/flt2dec/strategy/grisu.rs +++ b/src/libcore/benches/num/flt2dec/strategy/grisu.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::{i16, f64}; use super::super::*; use core::num::flt2dec::strategy::grisu::*; diff --git a/src/libcore/benches/num/mod.rs b/src/libcore/benches/num/mod.rs index b57e167b05d9e..f5c49ea5bf0d5 100644 --- a/src/libcore/benches/num/mod.rs +++ b/src/libcore/benches/num/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod flt2dec; mod dec2flt; diff --git a/src/libcore/benches/ops.rs b/src/libcore/benches/ops.rs index 7f36a4b0771aa..80649f33562f2 100644 --- a/src/libcore/benches/ops.rs +++ b/src/libcore/benches/ops.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::ops::*; use test::Bencher; diff --git a/src/libcore/benches/slice.rs b/src/libcore/benches/slice.rs index b2fc74544f1df..484753c1a045e 100644 --- a/src/libcore/benches/slice.rs +++ b/src/libcore/benches/slice.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use test::black_box; use test::Bencher; diff --git a/src/libcore/borrow.rs b/src/libcore/borrow.rs index 84d4b21784180..4d58aaca94183 100644 --- a/src/libcore/borrow.rs +++ b/src/libcore/borrow.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A module for working with borrowed data. #![stable(feature = "rust1", since = "1.0.0")] @@ -42,6 +32,10 @@ /// on the identical behavior of these additional trait implementations. /// These traits will likely appear as additional trait bounds. /// +/// In particular `Eq`, `Ord` and `Hash` must be equivalent for +/// borrowed and owned values: `x.borrow() == y.borrow()` should give the +/// same result as `x == y`. +/// /// If generic code merely needs to work for all types that can /// provide a reference to related type `T`, it is often better to use /// [`AsRef`] as more types can safely implement it. diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index 0a16c92928d44..5325b339151dc 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Shareable mutable containers. //! //! Rust memory safety is based on this rule: Given an object `T`, it is only possible to @@ -140,7 +130,7 @@ //! //! This is simply a special - but common - case of the previous: hiding mutability for operations //! that appear to be immutable. The `clone` method is expected to not change the source value, and -//! is declared to take `&self`, not `&mut self`. Therefore any mutation that happens in the +//! is declared to take `&self`, not `&mut self`. Therefore, any mutation that happens in the //! `clone` method must use cell types. For example, `Rc` maintains its reference counts within a //! `Cell`. //! @@ -712,8 +702,6 @@ impl RefCell { /// Replaces the wrapped value with a new one computed from `f`, returning /// the old value, without deinitializing either one. /// - /// This function corresponds to [`std::mem::replace`](../mem/fn.replace.html). - /// /// # Panics /// /// Panics if the value is currently borrowed. @@ -721,7 +709,6 @@ impl RefCell { /// # Examples /// /// ``` - /// #![feature(refcell_replace_swap)] /// use std::cell::RefCell; /// let cell = RefCell::new(5); /// let old_value = cell.replace_with(|&mut old| old + 1); @@ -729,7 +716,7 @@ impl RefCell { /// assert_eq!(cell, RefCell::new(6)); /// ``` #[inline] - #[unstable(feature = "refcell_replace_swap", issue="43570")] + #[stable(feature = "refcell_replace_swap", since="1.35.0")] pub fn replace_with T>(&self, f: F) -> T { let mut_borrow = &mut *self.borrow_mut(); let replacement = f(mut_borrow); @@ -968,6 +955,44 @@ impl RefCell { &mut *self.value.get() } } + + /// Immutably borrows the wrapped value, returning an error if the value is + /// currently mutably borrowed. + /// + /// # Safety + /// + /// Unlike `RefCell::borrow`, this method is unsafe because it does not + /// return a `Ref`, thus leaving the borrow flag untouched. Mutably + /// borrowing the `RefCell` while the reference returned by this method + /// is alive is undefined behaviour. + /// + /// # Examples + /// + /// ``` + /// #![feature(borrow_state)] + /// use std::cell::RefCell; + /// + /// let c = RefCell::new(5); + /// + /// { + /// let m = c.borrow_mut(); + /// assert!(unsafe { c.try_borrow_unguarded() }.is_err()); + /// } + /// + /// { + /// let m = c.borrow(); + /// assert!(unsafe { c.try_borrow_unguarded() }.is_ok()); + /// } + /// ``` + #[unstable(feature = "borrow_state", issue = "27733")] + #[inline] + pub unsafe fn try_borrow_unguarded(&self) -> Result<&T, BorrowError> { + if !is_writing(self.borrow.get()) { + Ok(&*self.value.get()) + } else { + Err(BorrowError { _private: () }) + } + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -1143,7 +1168,7 @@ impl<'b, T: ?Sized> Ref<'b, T> { /// The `RefCell` is already immutably borrowed, so this cannot fail. /// /// This is an associated function that needs to be used as - /// `Ref::clone(...)`. A `Clone` implementation or a method would interfere + /// `Ref::clone(...)`. A `Clone` implementation or a method would interfere /// with the widespread use of `r.borrow().clone()` to clone the contents of /// a `RefCell`. #[stable(feature = "cell_extras", since = "1.15.0")] @@ -1155,7 +1180,7 @@ impl<'b, T: ?Sized> Ref<'b, T> { } } - /// Make a new `Ref` for a component of the borrowed data. + /// Makes a new `Ref` for a component of the borrowed data. /// /// The `RefCell` is already immutably borrowed, so this cannot fail. /// @@ -1184,7 +1209,7 @@ impl<'b, T: ?Sized> Ref<'b, T> { } } - /// Split a `Ref` into multiple `Ref`s for different components of the + /// Splits a `Ref` into multiple `Ref`s for different components of the /// borrowed data. /// /// The `RefCell` is already immutably borrowed, so this cannot fail. @@ -1196,7 +1221,6 @@ impl<'b, T: ?Sized> Ref<'b, T> { /// # Examples /// /// ``` - /// #![feature(refcell_map_split)] /// use std::cell::{Ref, RefCell}; /// /// let cell = RefCell::new([1, 2, 3, 4]); @@ -1205,7 +1229,7 @@ impl<'b, T: ?Sized> Ref<'b, T> { /// assert_eq!(*begin, [1, 2]); /// assert_eq!(*end, [3, 4]); /// ``` - #[unstable(feature = "refcell_map_split", issue = "51476")] + #[stable(feature = "refcell_map_split", since = "1.35.0")] #[inline] pub fn map_split(orig: Ref<'b, T>, f: F) -> (Ref<'b, U>, Ref<'b, V>) where F: FnOnce(&T) -> (&U, &V) @@ -1227,13 +1251,13 @@ impl fmt::Display for Ref<'_, T> { } impl<'b, T: ?Sized> RefMut<'b, T> { - /// Make a new `RefMut` for a component of the borrowed data, e.g., an enum + /// Makes a new `RefMut` for a component of the borrowed data, e.g., an enum /// variant. /// /// The `RefCell` is already mutably borrowed, so this cannot fail. /// /// This is an associated function that needs to be used as - /// `RefMut::map(...)`. A method would interfere with methods of the same + /// `RefMut::map(...)`. A method would interfere with methods of the same /// name on the contents of a `RefCell` used through `Deref`. /// /// # Examples @@ -1263,7 +1287,7 @@ impl<'b, T: ?Sized> RefMut<'b, T> { } } - /// Split a `RefMut` into multiple `RefMut`s for different components of the + /// Splits a `RefMut` into multiple `RefMut`s for different components of the /// borrowed data. /// /// The underlying `RefCell` will remain mutably borrowed until both @@ -1278,7 +1302,6 @@ impl<'b, T: ?Sized> RefMut<'b, T> { /// # Examples /// /// ``` - /// #![feature(refcell_map_split)] /// use std::cell::{RefCell, RefMut}; /// /// let cell = RefCell::new([1, 2, 3, 4]); @@ -1289,7 +1312,7 @@ impl<'b, T: ?Sized> RefMut<'b, T> { /// begin.copy_from_slice(&[4, 3]); /// end.copy_from_slice(&[2, 1]); /// ``` - #[unstable(feature = "refcell_map_split", issue = "51476")] + #[stable(feature = "refcell_map_split", since = "1.35.0")] #[inline] pub fn map_split( orig: RefMut<'b, T>, f: F @@ -1426,14 +1449,13 @@ impl fmt::Display for RefMut<'_, T> { /// co-exist with it. A `&mut T` must always be unique. /// /// Note that while mutating or mutably aliasing the contents of an `&UnsafeCell` is -/// okay (provided you enforce the invariants some other way), it is still undefined behavior +/// ok (provided you enforce the invariants some other way), it is still undefined behavior /// to have multiple `&mut UnsafeCell` aliases. /// /// # Examples /// /// ``` /// use std::cell::UnsafeCell; -/// use std::marker::Sync; /// /// # #[allow(dead_code)] /// struct NotThreadSafe { diff --git a/src/libcore/char/convert.rs b/src/libcore/char/convert.rs index 160728f923dbc..6a5abfb408f5b 100644 --- a/src/libcore/char/convert.rs +++ b/src/libcore/char/convert.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Character conversions. use convert::TryFrom; @@ -228,7 +218,7 @@ impl FromStr for char { } -#[unstable(feature = "try_from", issue = "33417")] +#[stable(feature = "try_from", since = "1.34.0")] impl TryFrom for char { type Error = CharTryFromError; @@ -243,11 +233,11 @@ impl TryFrom for char { } /// The error type returned when a conversion from u32 to char fails. -#[unstable(feature = "try_from", issue = "33417")] +#[stable(feature = "try_from", since = "1.34.0")] #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct CharTryFromError(()); -#[unstable(feature = "try_from", issue = "33417")] +#[stable(feature = "try_from", since = "1.34.0")] impl fmt::Display for CharTryFromError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { "converted integer out of range for `char`".fmt(f) diff --git a/src/libcore/char/decode.rs b/src/libcore/char/decode.rs index cc52f048b891b..133c9169df858 100644 --- a/src/libcore/char/decode.rs +++ b/src/libcore/char/decode.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! UTF-8 and UTF-16 decoding iterators use fmt; @@ -30,7 +20,7 @@ pub struct DecodeUtf16Error { code: u16, } -/// Create an iterator over the UTF-16 encoded code points in `iter`, +/// Creates an iterator over the UTF-16 encoded code points in `iter`, /// returning unpaired surrogates as `Err`s. /// /// # Examples diff --git a/src/libcore/char/methods.rs b/src/libcore/char/methods.rs index d6fcff644acf6..122e5f3affdc2 100644 --- a/src/libcore/char/methods.rs +++ b/src/libcore/char/methods.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! impl char {} use slice; @@ -199,10 +189,8 @@ impl char { /// An extended version of `escape_debug` that optionally permits escaping /// Extended Grapheme codepoints. This allows us to format characters like /// nonspacing marks better when they're at the start of a string. - #[doc(hidden)] - #[unstable(feature = "str_internals", issue = "0")] #[inline] - pub fn escape_debug_ext(self, escape_grapheme_extended: bool) -> EscapeDebug { + pub(crate) fn escape_debug_ext(self, escape_grapheme_extended: bool) -> EscapeDebug { let init_state = match self { '\t' => EscapeDefaultState::Backslash('t'), '\r' => EscapeDefaultState::Backslash('r'), @@ -534,7 +522,7 @@ impl char { } } - /// Returns true if this `char` is an alphabetic code point, and false if not. + /// Returns `true` if this `char` is an alphabetic code point, and false if not. /// /// # Examples /// @@ -558,7 +546,7 @@ impl char { } } - /// Returns true if this `char` satisfies the 'XID_Start' Unicode property, and false + /// Returns `true` if this `char` satisfies the 'XID_Start' Unicode property, and false /// otherwise. /// /// 'XID_Start' is a Unicode Derived Property specified in @@ -572,7 +560,7 @@ impl char { derived_property::XID_Start(self) } - /// Returns true if this `char` satisfies the 'XID_Continue' Unicode property, and false + /// Returns `true` if this `char` satisfies the 'XID_Continue' Unicode property, and false /// otherwise. /// /// 'XID_Continue' is a Unicode Derived Property specified in @@ -586,7 +574,7 @@ impl char { derived_property::XID_Continue(self) } - /// Returns true if this `char` is lowercase, and false otherwise. + /// Returns `true` if this `char` is lowercase. /// /// 'Lowercase' is defined according to the terms of the Unicode Derived Core /// Property `Lowercase`. @@ -614,7 +602,7 @@ impl char { } } - /// Returns true if this `char` is uppercase, and false otherwise. + /// Returns `true` if this `char` is uppercase. /// /// 'Uppercase' is defined according to the terms of the Unicode Derived Core /// Property `Uppercase`. @@ -642,7 +630,7 @@ impl char { } } - /// Returns true if this `char` is whitespace, and false otherwise. + /// Returns `true` if this `char` is whitespace. /// /// 'Whitespace' is defined according to the terms of the Unicode Derived Core /// Property `White_Space`. @@ -669,7 +657,7 @@ impl char { } } - /// Returns true if this `char` is alphanumeric, and false otherwise. + /// Returns `true` if this `char` is alphanumeric. /// /// 'Alphanumeric'-ness is defined in terms of the Unicode General Categories /// 'Nd', 'Nl', 'No' and the Derived Core Property 'Alphabetic'. @@ -694,7 +682,7 @@ impl char { self.is_alphabetic() || self.is_numeric() } - /// Returns true if this `char` is a control code point, and false otherwise. + /// Returns `true` if this `char` is a control code point. /// /// 'Control code point' is defined in terms of the Unicode General /// Category `Cc`. @@ -714,7 +702,7 @@ impl char { general_category::Cc(self) } - /// Returns true if this `char` is an extended grapheme character, and false otherwise. + /// Returns `true` if this `char` is an extended grapheme character. /// /// 'Extended grapheme character' is defined in terms of the Unicode Shaping and Rendering /// Category `Grapheme_Extend`. @@ -723,7 +711,7 @@ impl char { derived_property::Grapheme_Extend(self) } - /// Returns true if this `char` is numeric, and false otherwise. + /// Returns `true` if this `char` is numeric. /// /// 'Numeric'-ness is defined in terms of the Unicode General Categories /// 'Nd', 'Nl', 'No'. diff --git a/src/libcore/char/mod.rs b/src/libcore/char/mod.rs index e07a0f5d712b0..6683976642d40 100644 --- a/src/libcore/char/mod.rs +++ b/src/libcore/char/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A character type. //! //! The `char` type represents a single character. More specifically, since @@ -40,7 +30,7 @@ pub use self::convert::{from_u32, from_digit}; pub use self::convert::from_u32_unchecked; #[stable(feature = "char_from_str", since = "1.20.0")] pub use self::convert::ParseCharError; -#[unstable(feature = "try_from", issue = "33417")] +#[stable(feature = "try_from", since = "1.34.0")] pub use self::convert::CharTryFromError; #[stable(feature = "decode_utf16", since = "1.9.0")] pub use self::decode::{decode_utf16, DecodeUtf16, DecodeUtf16Error}; @@ -399,11 +389,17 @@ impl Iterator for ToLowercase { fn next(&mut self) -> Option { self.0.next() } + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } } #[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for ToLowercase {} +#[stable(feature = "exact_size_case_mapping_iter", since = "1.35.0")] +impl ExactSizeIterator for ToLowercase {} + /// Returns an iterator that yields the uppercase equivalent of a `char`. /// /// This `struct` is created by the [`to_uppercase`] method on [`char`]. See @@ -421,11 +417,17 @@ impl Iterator for ToUppercase { fn next(&mut self) -> Option { self.0.next() } + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } } #[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for ToUppercase {} +#[stable(feature = "exact_size_case_mapping_iter", since = "1.35.0")] +impl ExactSizeIterator for ToUppercase {} + #[derive(Debug, Clone)] enum CaseMappingIter { Three(char, char, char), @@ -467,6 +469,16 @@ impl Iterator for CaseMappingIter { CaseMappingIter::Zero => None, } } + + fn size_hint(&self) -> (usize, Option) { + let size = match self { + CaseMappingIter::Three(..) => 3, + CaseMappingIter::Two(..) => 2, + CaseMappingIter::One(_) => 1, + CaseMappingIter::Zero => 0, + }; + (size, Some(size)) + } } impl fmt::Display for CaseMappingIter { diff --git a/src/libcore/clone.rs b/src/libcore/clone.rs index 225ea3de9cd68..ed90b7de26417 100644 --- a/src/libcore/clone.rs +++ b/src/libcore/clone.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The `Clone` trait for types that cannot be 'implicitly copied'. //! //! In Rust, some simple types are "implicitly copyable" and when you @@ -63,6 +53,17 @@ /// This trait can be used with `#[derive]` if all fields are `Clone`. The `derive`d /// implementation of [`clone`] calls [`clone`] on each field. /// +/// For a generic struct, `#[derive]` implements `Clone` conditionally by adding bound `Clone` on +/// generic parameters. +/// +/// ``` +/// // `derive` implements Clone for Reading when T is Clone. +/// #[derive(Clone)] +/// struct Reading { +/// frequency: T, +/// } +/// ``` +/// /// ## How can I implement `Clone`? /// /// Types that are [`Copy`] should have a trivial implementation of `Clone`. More formally: @@ -70,21 +71,21 @@ /// Manual implementations should be careful to uphold this invariant; however, unsafe code /// must not rely on it to ensure memory safety. /// -/// An example is an array holding more than 32 elements of a type that is `Clone`; the standard -/// library only implements `Clone` up until arrays of size 32. In this case, the implementation of -/// `Clone` cannot be `derive`d, but can be implemented as: +/// An example is a generic struct holding a function pointer. In this case, the +/// implementation of `Clone` cannot be `derive`d, but can be implemented as: /// /// [`Copy`]: ../../std/marker/trait.Copy.html /// [`clone`]: trait.Clone.html#tymethod.clone /// /// ``` -/// #[derive(Copy)] -/// struct Stats { -/// frequencies: [i32; 100], -/// } +/// struct Generate(fn() -> T); +/// +/// impl Copy for Generate {} /// -/// impl Clone for Stats { -/// fn clone(&self) -> Stats { *self } +/// impl Clone for Generate { +/// fn clone(&self) -> Self { +/// *self +/// } /// } /// ``` /// diff --git a/src/libcore/cmp.rs b/src/libcore/cmp.rs index 33881de30527e..14908108fc5a6 100644 --- a/src/libcore/cmp.rs +++ b/src/libcore/cmp.rs @@ -1,37 +1,22 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Functionality for ordering and comparison. //! -//! This module defines both [`PartialOrd`] and [`PartialEq`] traits which are used -//! by the compiler to implement comparison operators. Rust programs may -//! implement [`PartialOrd`] to overload the `<`, `<=`, `>`, and `>=` operators, -//! and may implement [`PartialEq`] to overload the `==` and `!=` operators. -//! -//! [`PartialOrd`]: trait.PartialOrd.html -//! [`PartialEq`]: trait.PartialEq.html -//! -//! # Examples -//! -//! ``` -//! let x: u32 = 0; -//! let y: u32 = 1; +//! This module contains various tools for ordering and comparing values. In +//! summary: //! -//! // these two lines are equivalent -//! assert_eq!(x < y, true); -//! assert_eq!(x.lt(&y), true); +//! * [`Eq`] and [`PartialEq`] are traits that allow you to define total and +//! partial equality between values, respectively. Implementing them overloads +//! the `==` and `!=` operators. +//! * [`Ord`] and [`PartialOrd`] are traits that allow you to define total and +//! partial orderings between values, respectively. Implementing them overloads +//! the `<`, `<=`, `>`, and `>=` operators. +//! * [`Ordering`][cmp::Ordering] is an enum returned by the +//! main functions of [`Ord`] and [`PartialOrd`], and describes an ordering. +//! * [`Reverse`][cmp::Reverse] is a struct that allows you to easily reverse +//! an ordering. +//! * [`max`][cmp::max] and [`min`][cmp::min] are functions that build off of +//! [`Ord`] and allow you to find the maximum or minimum of two values. //! -//! // these two lines are also equivalent -//! assert_eq!(x == y, false); -//! assert_eq!(x.eq(&y), false); -//! ``` +//! For more details, see the respective documentation of each item in the list. #![stable(feature = "rust1", since = "1.0.0")] @@ -41,7 +26,7 @@ use self::Ordering::*; /// relations](http://en.wikipedia.org/wiki/Partial_equivalence_relation). /// /// This trait allows for partial equality, for types that do not have a full -/// equivalence relation. For example, in floating point numbers `NaN != NaN`, +/// equivalence relation. For example, in floating point numbers `NaN != NaN`, /// so floating point types implement `PartialEq` but not `Eq`. /// /// Formally, the equality must be (for all `a`, `b` and `c`): @@ -87,7 +72,7 @@ use self::Ordering::*; /// } /// /// impl PartialEq for Book { -/// fn eq(&self, other: &Book) -> bool { +/// fn eq(&self, other: &Self) -> bool { /// self.isbn == other.isbn /// } /// } @@ -106,6 +91,8 @@ use self::Ordering::*; /// For example, let's tweak our previous code a bit: /// /// ``` +/// // The derive implements == comparisons +/// #[derive(PartialEq)] /// enum BookFormat { /// Paperback, /// Hardback, @@ -117,31 +104,34 @@ use self::Ordering::*; /// format: BookFormat, /// } /// +/// // Implement == comparisons /// impl PartialEq for Book { /// fn eq(&self, other: &BookFormat) -> bool { -/// match (&self.format, other) { -/// (BookFormat::Paperback, BookFormat::Paperback) => true, -/// (BookFormat::Hardback, BookFormat::Hardback) => true, -/// (BookFormat::Ebook, BookFormat::Ebook) => true, -/// (_, _) => false, -/// } +/// self.format == *other +/// } +/// } +/// +/// // Implement == comparisons +/// impl PartialEq for BookFormat { +/// fn eq(&self, other: &Book) -> bool { +/// *self == other.format /// } /// } /// /// let b1 = Book { isbn: 3, format: BookFormat::Paperback }; /// /// assert!(b1 == BookFormat::Paperback); -/// assert!(b1 != BookFormat::Ebook); +/// assert!(BookFormat::Ebook != b1); /// ``` /// /// By changing `impl PartialEq for Book` to `impl PartialEq for Book`, -/// we've changed what type we can use on the right side of the `==` operator. -/// This lets us use it in the `assert!` statements at the bottom. +/// we allow `BookFormat`s to be compared with `Book`s. /// /// You can also combine these implementations to let the `==` operator work with /// two different types: /// /// ``` +/// #[derive(PartialEq)] /// enum BookFormat { /// Paperback, /// Hardback, @@ -155,12 +145,13 @@ use self::Ordering::*; /// /// impl PartialEq for Book { /// fn eq(&self, other: &BookFormat) -> bool { -/// match (&self.format, other) { -/// (&BookFormat::Paperback, &BookFormat::Paperback) => true, -/// (&BookFormat::Hardback, &BookFormat::Hardback) => true, -/// (&BookFormat::Ebook, &BookFormat::Ebook) => true, -/// (_, _) => false, -/// } +/// self.format == *other +/// } +/// } +/// +/// impl PartialEq for BookFormat { +/// fn eq(&self, other: &Book) -> bool { +/// *self == other.format /// } /// } /// @@ -174,7 +165,7 @@ use self::Ordering::*; /// let b2 = Book { isbn: 3, format: BookFormat::Ebook }; /// /// assert!(b1 == BookFormat::Paperback); -/// assert!(b1 != BookFormat::Ebook); +/// assert!(BookFormat::Ebook != b1); /// assert!(b1 == b2); /// ``` /// @@ -242,7 +233,7 @@ pub trait PartialEq { /// format: BookFormat, /// } /// impl PartialEq for Book { -/// fn eq(&self, other: &Book) -> bool { +/// fn eq(&self, other: &Self) -> bool { /// self.isbn == other.isbn /// } /// } @@ -295,13 +286,13 @@ pub struct AssertParamIsEq { _field: ::marker::PhantomData } #[derive(Clone, Copy, PartialEq, Debug, Hash)] #[stable(feature = "rust1", since = "1.0.0")] pub enum Ordering { - /// An ordering where a compared value is less [than another]. + /// An ordering where a compared value is less than another. #[stable(feature = "rust1", since = "1.0.0")] Less = -1, - /// An ordering where a compared value is equal [to another]. + /// An ordering where a compared value is equal to another. #[stable(feature = "rust1", since = "1.0.0")] Equal = 0, - /// An ordering where a compared value is greater [than another]. + /// An ordering where a compared value is greater than another. #[stable(feature = "rust1", since = "1.0.0")] Greater = 1, } @@ -482,9 +473,11 @@ impl Ord for Reverse { /// Then you must define an implementation for `cmp()`. You may find it useful to use /// `cmp()` on your type's fields. /// -/// Implementations of `PartialEq`, `PartialOrd`, and `Ord` *must* agree with each other. It's -/// easy to accidentally make them disagree by deriving some of the traits and manually -/// implementing others. +/// Implementations of `PartialEq`, `PartialOrd`, and `Ord` *must* +/// agree with each other. That is, `a.cmp(b) == Ordering::Equal` if +/// and only if `a == b` and `Some(a.cmp(b)) == a.partial_cmp(b)` for +/// all `a` and `b`. It's easy to accidentally make them disagree by +/// deriving some of the traits and manually implementing others. /// /// Here's an example where you want to sort people by height only, disregarding `id` /// and `name`: @@ -500,19 +493,19 @@ impl Ord for Reverse { /// } /// /// impl Ord for Person { -/// fn cmp(&self, other: &Person) -> Ordering { +/// fn cmp(&self, other: &Self) -> Ordering { /// self.height.cmp(&other.height) /// } /// } /// /// impl PartialOrd for Person { -/// fn partial_cmp(&self, other: &Person) -> Option { +/// fn partial_cmp(&self, other: &Self) -> Option { /// Some(self.cmp(other)) /// } /// } /// /// impl PartialEq for Person { -/// fn eq(&self, other: &Person) -> bool { +/// fn eq(&self, other: &Self) -> bool { /// self.height == other.height /// } /// } @@ -574,6 +567,37 @@ pub trait Ord: Eq + PartialOrd { where Self: Sized { if self <= other { self } else { other } } + + /// Restrict a value to a certain interval. + /// + /// Returns `max` if `self` is greater than `max`, and `min` if `self` is + /// less than `min`. Otherwise this returns `self`. + /// + /// # Panics + /// + /// Panics if `min > max`. + /// + /// # Examples + /// + /// ``` + /// #![feature(clamp)] + /// + /// assert!((-3).clamp(-2, 1) == -2); + /// assert!(0.clamp(-2, 1) == 0); + /// assert!(2.clamp(-2, 1) == 1); + /// ``` + #[unstable(feature = "clamp", issue = "44095")] + fn clamp(self, min: Self, max: Self) -> Self + where Self: Sized { + assert!(min <= max); + if self < min { + min + } else if self > max { + max + } else { + self + } + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -672,13 +696,13 @@ impl PartialOrd for Ordering { /// } /// /// impl PartialOrd for Person { -/// fn partial_cmp(&self, other: &Person) -> Option { +/// fn partial_cmp(&self, other: &Self) -> Option { /// self.height.partial_cmp(&other.height) /// } /// } /// /// impl PartialEq for Person { -/// fn eq(&self, other: &Person) -> bool { +/// fn eq(&self, other: &Self) -> bool { /// self.height == other.height /// } /// } @@ -1011,26 +1035,26 @@ mod impls { // & pointers #[stable(feature = "rust1", since = "1.0.0")] - impl<'a, 'b, A: ?Sized, B: ?Sized> PartialEq<&'b B> for &'a A where A: PartialEq { + impl PartialEq<&B> for &A where A: PartialEq { #[inline] - fn eq(&self, other: & &'b B) -> bool { PartialEq::eq(*self, *other) } + fn eq(&self, other: & &B) -> bool { PartialEq::eq(*self, *other) } #[inline] - fn ne(&self, other: & &'b B) -> bool { PartialEq::ne(*self, *other) } + fn ne(&self, other: & &B) -> bool { PartialEq::ne(*self, *other) } } #[stable(feature = "rust1", since = "1.0.0")] - impl<'a, 'b, A: ?Sized, B: ?Sized> PartialOrd<&'b B> for &'a A where A: PartialOrd { + impl PartialOrd<&B> for &A where A: PartialOrd { #[inline] - fn partial_cmp(&self, other: &&'b B) -> Option { + fn partial_cmp(&self, other: &&B) -> Option { PartialOrd::partial_cmp(*self, *other) } #[inline] - fn lt(&self, other: & &'b B) -> bool { PartialOrd::lt(*self, *other) } + fn lt(&self, other: & &B) -> bool { PartialOrd::lt(*self, *other) } #[inline] - fn le(&self, other: & &'b B) -> bool { PartialOrd::le(*self, *other) } + fn le(&self, other: & &B) -> bool { PartialOrd::le(*self, *other) } #[inline] - fn ge(&self, other: & &'b B) -> bool { PartialOrd::ge(*self, *other) } + fn ge(&self, other: & &B) -> bool { PartialOrd::ge(*self, *other) } #[inline] - fn gt(&self, other: & &'b B) -> bool { PartialOrd::gt(*self, *other) } + fn gt(&self, other: & &B) -> bool { PartialOrd::gt(*self, *other) } } #[stable(feature = "rust1", since = "1.0.0")] impl Ord for &A where A: Ord { @@ -1043,26 +1067,26 @@ mod impls { // &mut pointers #[stable(feature = "rust1", since = "1.0.0")] - impl<'a, 'b, A: ?Sized, B: ?Sized> PartialEq<&'b mut B> for &'a mut A where A: PartialEq { + impl PartialEq<&mut B> for &mut A where A: PartialEq { #[inline] - fn eq(&self, other: &&'b mut B) -> bool { PartialEq::eq(*self, *other) } + fn eq(&self, other: &&mut B) -> bool { PartialEq::eq(*self, *other) } #[inline] - fn ne(&self, other: &&'b mut B) -> bool { PartialEq::ne(*self, *other) } + fn ne(&self, other: &&mut B) -> bool { PartialEq::ne(*self, *other) } } #[stable(feature = "rust1", since = "1.0.0")] - impl<'a, 'b, A: ?Sized, B: ?Sized> PartialOrd<&'b mut B> for &'a mut A where A: PartialOrd { + impl PartialOrd<&mut B> for &mut A where A: PartialOrd { #[inline] - fn partial_cmp(&self, other: &&'b mut B) -> Option { + fn partial_cmp(&self, other: &&mut B) -> Option { PartialOrd::partial_cmp(*self, *other) } #[inline] - fn lt(&self, other: &&'b mut B) -> bool { PartialOrd::lt(*self, *other) } + fn lt(&self, other: &&mut B) -> bool { PartialOrd::lt(*self, *other) } #[inline] - fn le(&self, other: &&'b mut B) -> bool { PartialOrd::le(*self, *other) } + fn le(&self, other: &&mut B) -> bool { PartialOrd::le(*self, *other) } #[inline] - fn ge(&self, other: &&'b mut B) -> bool { PartialOrd::ge(*self, *other) } + fn ge(&self, other: &&mut B) -> bool { PartialOrd::ge(*self, *other) } #[inline] - fn gt(&self, other: &&'b mut B) -> bool { PartialOrd::gt(*self, *other) } + fn gt(&self, other: &&mut B) -> bool { PartialOrd::gt(*self, *other) } } #[stable(feature = "rust1", since = "1.0.0")] impl Ord for &mut A where A: Ord { @@ -1073,18 +1097,18 @@ mod impls { impl Eq for &mut A where A: Eq {} #[stable(feature = "rust1", since = "1.0.0")] - impl<'a, 'b, A: ?Sized, B: ?Sized> PartialEq<&'b mut B> for &'a A where A: PartialEq { + impl PartialEq<&mut B> for &A where A: PartialEq { #[inline] - fn eq(&self, other: &&'b mut B) -> bool { PartialEq::eq(*self, *other) } + fn eq(&self, other: &&mut B) -> bool { PartialEq::eq(*self, *other) } #[inline] - fn ne(&self, other: &&'b mut B) -> bool { PartialEq::ne(*self, *other) } + fn ne(&self, other: &&mut B) -> bool { PartialEq::ne(*self, *other) } } #[stable(feature = "rust1", since = "1.0.0")] - impl<'a, 'b, A: ?Sized, B: ?Sized> PartialEq<&'b B> for &'a mut A where A: PartialEq { + impl PartialEq<&B> for &mut A where A: PartialEq { #[inline] - fn eq(&self, other: &&'b B) -> bool { PartialEq::eq(*self, *other) } + fn eq(&self, other: &&B) -> bool { PartialEq::eq(*self, *other) } #[inline] - fn ne(&self, other: &&'b B) -> bool { PartialEq::ne(*self, *other) } + fn ne(&self, other: &&B) -> bool { PartialEq::ne(*self, *other) } } } diff --git a/src/libcore/convert.rs b/src/libcore/convert.rs index 2d4813718f41a..e903bd936c484 100644 --- a/src/libcore/convert.rs +++ b/src/libcore/convert.rs @@ -1,33 +1,25 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Traits for conversions between types. //! -//! The traits in this module provide a general way to talk about conversions -//! from one type to another. They follow the standard Rust conventions of -//! `as`/`into`/`from`. +//! The traits in this module provide a way to convert from one type to another type. +//! Each trait serves a different purpose: //! -//! Like many traits, these are often used as bounds for generic functions, to -//! support arguments of multiple types. +//! - Implement the [`AsRef`] trait for cheap reference-to-reference conversions +//! - Implement the [`AsMut`] trait for cheap mutable-to-mutable conversions +//! - Implement the [`From`] trait for consuming value-to-value conversions +//! - Implement the [`Into`] trait for consuming value-to-value conversions to types +//! outside the current crate +//! - The [`TryFrom`] and [`TryInto`] traits behave like [`From`] and [`Into`], +//! but should be implemented when the conversion can fail. //! -//! - Implement the `As*` traits for reference-to-reference conversions -//! - Implement the [`Into`] trait when you want to consume the value in the conversion -//! - The [`From`] trait is the most flexible, useful for value _and_ reference conversions -//! - The [`TryFrom`] and [`TryInto`] traits behave like [`From`] and [`Into`], but allow for the -//! conversion to fail +//! The traits in this module are often used as trait bounds for generic functions such that to +//! arguments of multiple types are supported. See the documentation of each trait for examples. //! -//! As a library author, you should prefer implementing [`From`][`From`] or +//! As a library author, you should always prefer implementing [`From`][`From`] or //! [`TryFrom`][`TryFrom`] rather than [`Into`][`Into`] or [`TryInto`][`TryInto`], //! as [`From`] and [`TryFrom`] provide greater flexibility and offer //! equivalent [`Into`] or [`TryInto`] implementations for free, thanks to a -//! blanket implementation in the standard library. +//! blanket implementation in the standard library. Only implement [`Into`] or [`TryInto`] +//! when a conversion to a type outside the current crate is required. //! //! # Generic Implementations //! @@ -48,6 +40,8 @@ #![stable(feature = "rust1", since = "1.0.0")] +use fmt; + /// An identity function. /// /// Two things are important to note about this function: @@ -65,7 +59,6 @@ /// Using `identity` to do nothing among other interesting functions: /// /// ```rust -/// #![feature(convert_id)] /// use std::convert::identity; /// /// fn manipulation(x: u32) -> u32 { @@ -79,7 +72,6 @@ /// Using `identity` to get a function that changes nothing in a conditional: /// /// ```rust -/// #![feature(convert_id)] /// use std::convert::identity; /// /// # let condition = true; @@ -96,42 +88,34 @@ /// Using `identity` to keep the `Some` variants of an iterator of `Option`: /// /// ```rust -/// #![feature(convert_id)] /// use std::convert::identity; /// /// let iter = vec![Some(1), None, Some(3)].into_iter(); /// let filtered = iter.filter_map(identity).collect::>(); /// assert_eq!(vec![1, 3], filtered); /// ``` -#[unstable(feature = "convert_id", issue = "53500")] +#[stable(feature = "convert_id", since = "1.33.0")] #[inline] pub const fn identity(x: T) -> T { x } -/// A cheap reference-to-reference conversion. Used to convert a value to a -/// reference value within generic code. -/// -/// `AsRef` is very similar to, but serves a slightly different purpose than, -/// [`Borrow`]. +/// Used to do a cheap reference-to-reference conversion. /// -/// `AsRef` is to be used when wishing to convert to a reference of another -/// type. -/// `Borrow` is more related to the notion of taking the reference. It is -/// useful when wishing to abstract over the type of reference -/// (`&T`, `&mut T`) or allow both the referenced and owned type to be treated -/// in the same manner. +/// This trait is similar to [`AsMut`] which is used for converting between mutable references. +/// If you need to do a costly conversion it is better to implement [`From`] with type +/// `&T` or write a custom function. /// -/// The key difference between the two traits is the intention: /// -/// - Use `AsRef` when the goal is to simply convert into a reference -/// - Use `Borrow` when the goal is related to writing code that is agnostic to -/// the type of borrow and whether it is a reference or value +/// `AsRef` has the same signature as [`Borrow`], but `Borrow` is different in few aspects: /// -/// See [the book][book] for a more detailed comparison. +/// - Unlike `AsRef`, `Borrow` has a blanket impl for any `T`, and can be used to accept either +/// a reference or a value. +/// - `Borrow` also requires that `Hash`, `Eq` and `Ord` for borrowed value are +/// equivalent to those of the owned value. For this reason, if you want to +/// borrow only a single field of a struct you can implement `AsRef`, but not `Borrow`. /// -/// [book]: ../../book/first-edition/borrow-and-asref.html /// [`Borrow`]: ../../std/borrow/trait.Borrow.html /// -/// **Note: this trait must not fail**. If the conversion can fail, use a +/// **Note: This trait must not fail**. If the conversion can fail, use a /// dedicated method which returns an [`Option`] or a [`Result`]. /// /// [`Option`]: ../../std/option/enum.Option.html @@ -145,7 +129,12 @@ pub const fn identity(x: T) -> T { x } /// /// # Examples /// -/// Both [`String`] and `&str` implement `AsRef`: +/// By using trait bounds we can accept arguments of different types as long as they can be +/// converted a the specified type `T`. +/// +/// For example: By creating a generic function that takes an `AsRef` we express that we +/// want to accept all references that can be converted to &str as an argument. +/// Since both [`String`] and `&str` implement `AsRef` we can accept both as input argument. /// /// [`String`]: ../../std/string/struct.String.html /// @@ -168,12 +157,13 @@ pub trait AsRef { fn as_ref(&self) -> &T; } -/// A cheap, mutable reference-to-mutable reference conversion. +/// Used to do a cheap mutable-to-mutable reference conversion. /// -/// This trait is similar to `AsRef` but used for converting between mutable -/// references. +/// This trait is similar to [`AsRef`] but used for converting between mutable +/// references. If you need to do a costly conversion it is better to +/// implement [`From`] with type `&mut T` or write a custom function. /// -/// **Note: this trait must not fail**. If the conversion can fail, use a +/// **Note: This trait must not fail**. If the conversion can fail, use a /// dedicated method which returns an [`Option`] or a [`Result`]. /// /// [`Option`]: ../../std/option/enum.Option.html @@ -187,10 +177,11 @@ pub trait AsRef { /// /// # Examples /// -/// [`Box`] implements `AsMut`: -/// -/// [`Box`]: ../../std/boxed/struct.Box.html -/// +/// Using `AsMut` as trait bound for a generic function we can accept all mutable references +/// that can be converted to type `&mut T`. Because [`Box`] implements `AsMut` we can +/// write a function `add_one`that takes all arguments that can be converted to `&mut u64`. +/// Because [`Box`] implements `AsMut` `add_one` accepts arguments of type +/// `&mut Box` as well: /// ``` /// fn add_one>(num: &mut T) { /// *num.as_mut() += 1; @@ -200,7 +191,7 @@ pub trait AsRef { /// add_one(&mut boxed_num); /// assert_eq!(*boxed_num, 1); /// ``` -/// +/// [`Box`]: ../../std/boxed/struct.Box.html /// #[stable(feature = "rust1", since = "1.0.0")] pub trait AsMut { @@ -209,29 +200,27 @@ pub trait AsMut { fn as_mut(&mut self) -> &mut T; } -/// A conversion that consumes `self`, which may or may not be expensive. The -/// reciprocal of [`From`][From]. +/// A value-to-value conversion that consumes the input value. The +/// opposite of [`From`]. /// -/// **Note: this trait must not fail**. If the conversion can fail, use -/// [`TryInto`] or a dedicated method which returns an [`Option`] or a -/// [`Result`]. +/// One should only implement [`Into`] if a conversion to a type outside the current crate is +/// required. Otherwise one should always prefer implementing [`From`] over [`Into`] because +/// implementing [`From`] automatically provides one with a implementation of [`Into`] thanks to +/// the blanket implementation in the standard library. [`From`] cannot do these type of +/// conversions because of Rust's orphaning rules. /// -/// Library authors should not directly implement this trait, but should prefer -/// implementing the [`From`][From] trait, which offers greater flexibility and -/// provides an equivalent `Into` implementation for free, thanks to a blanket -/// implementation in the standard library. +/// **Note: This trait must not fail**. If the conversion can fail, use [`TryInto`]. /// /// # Generic Implementations /// -/// - [`From`][From]` for U` implies `Into for T` -/// - [`into`] is reflexive, which means that `Into for T` is implemented +/// - [`From`]` for U` implies `Into for T` +/// - [`Into`]` is reflexive, which means that `Into for T` is implemented /// -/// # Implementing `Into` +/// # Implementing `Into` for conversions to external types /// -/// There is one exception to implementing `Into`, and it's kind of esoteric. -/// If the destination type is not part of the current crate, and it uses a -/// generic variable, then you can't implement `From` directly. For example, -/// take this crate: +/// If the destination type is not part of the current crate +/// then you can't implement [`From`] directly. +/// For example, take this code: /// /// ```compile_fail /// struct Wrapper(Vec); @@ -241,8 +230,9 @@ pub trait AsMut { /// } /// } /// ``` -/// -/// To fix this, you can implement `Into` directly: +/// This will fail to compile because we cannot implement a trait for a type +/// if both the trait and the type are not defined by the current crate. +/// This is due to Rust's orphaning rules. To bypass this, you can implement `Into` directly: /// /// ``` /// struct Wrapper(Vec); @@ -253,17 +243,22 @@ pub trait AsMut { /// } /// ``` /// -/// This won't always allow the conversion: for example, `try!` and `?` -/// always use `From`. However, in most cases, people use `Into` to do the -/// conversions, and this will allow that. +/// It is important to understand that `Into` does not provide a [`From`] implementation +/// (as [`From`] does with `Into`). Therefore, you should always try to implement [`From`] +/// and then fall back to `Into` if [`From`] can't be implemented. /// -/// In almost all cases, you should try to implement `From`, then fall back -/// to `Into` if `From` can't be implemented. +/// Prefer using `Into` over [`From`] when specifying trait bounds on a generic function +/// to ensure that types that only implement `Into` can be used as well. /// /// # Examples /// /// [`String`] implements `Into>`: /// +/// In order to express that we want a generic function to take all arguments that can be +/// converted to a specified type `T`, we can use a trait bound of `Into`. +/// For example: The function `is_hello` takes all arguments that can be converted into a +/// `Vec`. +/// /// ``` /// fn is_hello>>(s: T) { /// let bytes = b"hello".to_vec(); @@ -287,36 +282,38 @@ pub trait Into: Sized { fn into(self) -> T; } -/// Simple and safe type conversions in to `Self`. It is the reciprocal of -/// `Into`. +/// Used to do value-to-value conversions while consuming the input value. It is the reciprocal of +/// [`Into`]. /// -/// This trait is useful when performing error handling as described by -/// [the book][book] and is closely related to the `?` operator. +/// One should always prefer implementing [`From`] over [`Into`] +/// because implementing [`From`] automatically provides one with a implementation of [`Into`] +/// thanks to the blanket implementation in the standard library. /// -/// When constructing a function that is capable of failing the return type -/// will generally be of the form `Result`. +/// Only implement [`Into`] if a conversion to a type outside the current crate is required. +/// [`From`] cannot do these type of conversions because of Rust's orphaning rules. +/// See [`Into`] for more details. /// -/// The `From` trait allows for simplification of error handling by providing a -/// means of returning a single error type that encapsulates numerous possible -/// erroneous situations. +/// Prefer using [`Into`] over using [`From`] when specifying trait bounds on a generic function. +/// This way, types that directly implement [`Into`] can be used as arguments as well. /// -/// This trait is not limited to error handling, rather the general case for -/// this trait would be in any type conversions to have an explicit definition -/// of how they are performed. +/// The [`From`] is also very useful when performing error handling. When constructing a function +/// that is capable of failing, the return type will generally be of the form `Result`. +/// The `From` trait simplifies error handling by allowing a function to return a single error type +/// that encapsulate multiple error types. See the "Examples" section and [the book][book] for more +/// details. /// -/// **Note: this trait must not fail**. If the conversion can fail, use -/// [`TryFrom`] or a dedicated method which returns an [`Option`] or a -/// [`Result`]. +/// **Note: This trait must not fail**. If the conversion can fail, use [`TryFrom`]. /// /// # Generic Implementations /// -/// - `From for U` implies [`Into`]` for T` -/// - [`from`] is reflexive, which means that `From for T` is implemented +/// - [`From`]` for U` implies [`Into`]` for T` +/// - [`From`] is reflexive, which means that `From for T` is implemented /// /// # Examples /// /// [`String`] implements `From<&str>`: /// +/// An explicit conversion from a &str to a String is done as follows: /// ``` /// let string = "hello".to_string(); /// let other_string = String::from("hello"); @@ -324,7 +321,12 @@ pub trait Into: Sized { /// assert_eq!(string, other_string); /// ``` /// -/// An example usage for error handling: +/// While performing error handling it is often useful to implement `From` for your own error type. +/// By converting underlying error types to our own custom error type that encapsulates the +/// underlying error type, we can return a single error type without losing information on the +/// underlying cause. The '?' operator automatically converts the underlying error type to our +/// custom error type by calling `Into::into` which is automatically provided when +/// implementing `From`. The compiler then infers which implementation of `Into` should be used. /// /// ``` /// use std::fs; @@ -361,8 +363,14 @@ pub trait Into: Sized { /// [`String`]: ../../std/string/struct.String.html /// [`Into`]: trait.Into.html /// [`from`]: trait.From.html#tymethod.from -/// [book]: ../../book/first-edition/error-handling.html +/// [book]: ../../book/ch09-00-error-handling.html #[stable(feature = "rust1", since = "1.0.0")] +#[rustc_on_unimplemented( + on( + all(_Self="&str", T="std::string::String"), + note="to coerce a `{T}` into a `{Self}`, use `&*` as a prefix", + ) +)] pub trait From: Sized { /// Performs the conversion. #[stable(feature = "rust1", since = "1.0.0")] @@ -372,30 +380,108 @@ pub trait From: Sized { /// An attempted conversion that consumes `self`, which may or may not be /// expensive. /// -/// Library authors should not directly implement this trait, but should prefer -/// implementing the [`TryFrom`] trait, which offers greater flexibility and -/// provides an equivalent `TryInto` implementation for free, thanks to a -/// blanket implementation in the standard library. For more information on this, -/// see the documentation for [`Into`]. +/// Library authors should usually not directly implement this trait, +/// but should prefer implementing the [`TryFrom`] trait, which offers +/// greater flexibility and provides an equivalent `TryInto` +/// implementation for free, thanks to a blanket implementation in the +/// standard library. For more information on this, see the +/// documentation for [`Into`]. +/// +/// # Implementing `TryInto` +/// +/// This suffers the same restrictions and reasoning as implementing +/// [`Into`], see there for details. /// /// [`TryFrom`]: trait.TryFrom.html /// [`Into`]: trait.Into.html -#[unstable(feature = "try_from", issue = "33417")] +#[stable(feature = "try_from", since = "1.34.0")] pub trait TryInto: Sized { /// The type returned in the event of a conversion error. + #[stable(feature = "try_from", since = "1.34.0")] type Error; /// Performs the conversion. + #[stable(feature = "try_from", since = "1.34.0")] fn try_into(self) -> Result; } -/// Attempt to construct `Self` via a conversion. -#[unstable(feature = "try_from", issue = "33417")] +/// Simple and safe type conversions that may fail in a controlled +/// way under some circumstances. It is the reciprocal of [`TryInto`]. +/// +/// This is useful when you are doing a type conversion that may +/// trivially succeed but may also need special handling. +/// For example, there is no way to convert an `i64` into an `i32` +/// using the [`From`] trait, because an `i64` may contain a value +/// that an `i32` cannot represent and so the conversion would lose data. +/// This might be handled by truncating the `i64` to an `i32` (essentially +/// giving the `i64`'s value modulo `i32::MAX`) or by simply returning +/// `i32::MAX`, or by some other method. The `From` trait is intended +/// for perfect conversions, so the `TryFrom` trait informs the +/// programmer when a type conversion could go bad and lets them +/// decide how to handle it. +/// +/// # Generic Implementations +/// +/// - `TryFrom for U` implies [`TryInto`]` for T` +/// - [`try_from`] is reflexive, which means that `TryFrom for T` +/// is implemented and cannot fail -- the associated `Error` type for +/// calling `T::try_from()` on a value of type `T` is `Infallible`. +/// When the `!` type is stablized `Infallible` and `!` will be +/// equivalent. +/// +/// `TryFrom` can be implemented as follows: +/// +/// ``` +/// use std::convert::TryFrom; +/// +/// struct SuperiorThanZero(i32); +/// +/// impl TryFrom for SuperiorThanZero { +/// type Error = &'static str; +/// +/// fn try_from(value: i32) -> Result { +/// if value < 0 { +/// Err("SuperiorThanZero only accepts value superior than zero!") +/// } else { +/// Ok(SuperiorThanZero(value)) +/// } +/// } +/// } +/// ``` +/// +/// # Examples +/// +/// As described, [`i32`] implements `TryFrom`: +/// +/// ``` +/// use std::convert::TryFrom; +/// +/// let big_number = 1_000_000_000_000i64; +/// // Silently truncates `big_number`, requires detecting +/// // and handling the truncation after the fact. +/// let smaller_number = big_number as i32; +/// assert_eq!(smaller_number, -727379968); +/// +/// // Returns an error because `big_number` is too big to +/// // fit in an `i32`. +/// let try_smaller_number = i32::try_from(big_number); +/// assert!(try_smaller_number.is_err()); +/// +/// // Returns `Ok(3)`. +/// let try_successful_smaller_number = i32::try_from(3); +/// assert!(try_successful_smaller_number.is_ok()); +/// ``` +/// +/// [`try_from`]: trait.TryFrom.html#tymethod.try_from +/// [`TryInto`]: trait.TryInto.html +#[stable(feature = "try_from", since = "1.34.0")] pub trait TryFrom: Sized { /// The type returned in the event of a conversion error. + #[stable(feature = "try_from", since = "1.34.0")] type Error; /// Performs the conversion. + #[stable(feature = "try_from", since = "1.34.0")] fn try_from(value: T) -> Result; } @@ -463,7 +549,7 @@ impl From for T { // TryFrom implies TryInto -#[unstable(feature = "try_from", issue = "33417")] +#[stable(feature = "try_from", since = "1.34.0")] impl TryInto for T where U: TryFrom { type Error = U::Error; @@ -475,12 +561,12 @@ impl TryInto for T where U: TryFrom // Infallible conversions are semantically equivalent to fallible conversions // with an uninhabited error type. -#[unstable(feature = "try_from", issue = "33417")] -impl TryFrom for T where T: From { - type Error = !; +#[stable(feature = "try_from", since = "1.34.0")] +impl TryFrom for T where U: Into { + type Error = Infallible; fn try_from(value: U) -> Result { - Ok(T::from(value)) + Ok(U::into(value)) } } @@ -509,3 +595,115 @@ impl AsRef for str { self } } + +//////////////////////////////////////////////////////////////////////////////// +// THE NO-ERROR ERROR TYPE +//////////////////////////////////////////////////////////////////////////////// + +/// The error type for errors that can never happen. +/// +/// Since this enum has no variant, a value of this type can never actually exist. +/// This can be useful for generic APIs that use [`Result`] and parameterize the error type, +/// to indicate that the result is always [`Ok`]. +/// +/// For example, the [`TryFrom`] trait (conversion that returns a [`Result`]) +/// has a blanket implementation for all types where a reverse [`Into`] implementation exists. +/// +/// ```ignore (illustrates std code, duplicating the impl in a doctest would be an error) +/// impl TryFrom for T where U: Into { +/// type Error = Infallible; +/// +/// fn try_from(value: U) -> Result { +/// Ok(U::into(value)) // Never returns `Err` +/// } +/// } +/// ``` +/// +/// # Future compatibility +/// +/// This enum has the same role as [the `!` “never” type][never], +/// which is unstable in this version of Rust. +/// When `!` is stabilized, we plan to make `Infallible` a type alias to it: +/// +/// ```ignore (illustrates future std change) +/// pub type Infallible = !; +/// ``` +/// +/// … and eventually deprecate `Infallible`. +/// +/// +/// However there is one case where `!` syntax can be used +/// before `!` is stabilized as a full-fleged type: in the position of a function’s return type. +/// Specifically, it is possible implementations for two different function pointer types: +/// +/// ``` +/// trait MyTrait {} +/// impl MyTrait for fn() -> ! {} +/// impl MyTrait for fn() -> std::convert::Infallible {} +/// ``` +/// +/// With `Infallible` being an enum, this code is valid. +/// However when `Infallible` becomes an alias for the never type, +/// the two `impl`s will start to overlap +/// and therefore will be disallowed by the language’s trait coherence rules. +/// +/// [`Ok`]: ../result/enum.Result.html#variant.Ok +/// [`Result`]: ../result/enum.Result.html +/// [`TryFrom`]: trait.TryFrom.html +/// [`Into`]: trait.Into.html +/// [never]: ../../std/primitive.never.html +#[stable(feature = "convert_infallible", since = "1.34.0")] +#[derive(Copy)] +pub enum Infallible {} + +#[stable(feature = "convert_infallible", since = "1.34.0")] +impl Clone for Infallible { + fn clone(&self) -> Infallible { + match *self {} + } +} + +#[stable(feature = "convert_infallible", since = "1.34.0")] +impl fmt::Debug for Infallible { + fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self {} + } +} + +#[stable(feature = "convert_infallible", since = "1.34.0")] +impl fmt::Display for Infallible { + fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self {} + } +} + +#[stable(feature = "convert_infallible", since = "1.34.0")] +impl PartialEq for Infallible { + fn eq(&self, _: &Infallible) -> bool { + match *self {} + } +} + +#[stable(feature = "convert_infallible", since = "1.34.0")] +impl Eq for Infallible {} + +#[stable(feature = "convert_infallible", since = "1.34.0")] +impl PartialOrd for Infallible { + fn partial_cmp(&self, _other: &Self) -> Option { + match *self {} + } +} + +#[stable(feature = "convert_infallible", since = "1.34.0")] +impl Ord for Infallible { + fn cmp(&self, _other: &Self) -> crate::cmp::Ordering { + match *self {} + } +} + +#[stable(feature = "convert_infallible", since = "1.34.0")] +impl From for Infallible { + fn from(x: !) -> Self { + x + } +} diff --git a/src/libcore/default.rs b/src/libcore/default.rs index 638acebd617bf..5ad05b3824764 100644 --- a/src/libcore/default.rs +++ b/src/libcore/default.rs @@ -1,13 +1,3 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The `Default` trait for types which may have meaningful default values. #![stable(feature = "rust1", since = "1.0.0")] @@ -64,7 +54,7 @@ /// /// ## How can I implement `Default`? /// -/// Provide an implementation for the `default()` method that returns the value of +/// Provides an implementation for the `default()` method that returns the value of /// your type that should be the default: /// /// ``` diff --git a/src/libcore/ffi.rs b/src/libcore/ffi.rs index 899fae909467a..896710609642a 100644 --- a/src/libcore/ffi.rs +++ b/src/libcore/ffi.rs @@ -12,24 +12,27 @@ use ::fmt; /// and `*mut c_void` is equivalent to C's `void*`. That said, this is /// *not* the same as C's `void` return type, which is Rust's `()` type. /// -/// Ideally, this type would be equivalent to [`!`], but currently it may -/// be more ideal to use `c_void` for FFI purposes. +/// To model pointers to opaque types in FFI, until `extern type` is +/// stabilized, it is recommended to use a newtype wrapper around an empty +/// byte array. See the [Nomicon] for details. /// -/// [`!`]: ../../std/primitive.never.html /// [pointer]: ../../std/primitive.pointer.html +/// [Nomicon]: https://doc.rust-lang.org/nomicon/ffi.html#representing-opaque-structs // N.B., for LLVM to recognize the void pointer type and by extension // functions like malloc(), we need to have it represented as i8* in // LLVM bitcode. The enum used here ensures this and prevents misuse -// of the "raw" type by only having private variants.. We need two +// of the "raw" type by only having private variants. We need two // variants, because the compiler complains about the repr attribute -// otherwise. +// otherwise and we need at least one variant as otherwise the enum +// would be uninhabited and at least dereferencing such pointers would +// be UB. #[repr(u8)] #[stable(feature = "raw_os", since = "1.1.0")] pub enum c_void { - #[unstable(feature = "c_void_variant", reason = "should not have to exist", + #[unstable(feature = "c_void_variant", reason = "temporary implementation detail", issue = "0")] #[doc(hidden)] __variant1, - #[unstable(feature = "c_void_variant", reason = "should not have to exist", + #[unstable(feature = "c_void_variant", reason = "temporary implementation detail", issue = "0")] #[doc(hidden)] __variant2, } @@ -44,18 +47,19 @@ impl fmt::Debug for c_void { /// Basic implementation of a `va_list`. #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")), - all(target_arch = "aarch4", target_os = "ios"), + all(target_arch = "aarch64", target_os = "ios"), windows))] #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", - issue = "27745")] + issue = "44930")] extern { type VaListImpl; } #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")), + all(target_arch = "aarch64", target_os = "ios"), windows))] impl fmt::Debug for VaListImpl { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -64,21 +68,21 @@ impl fmt::Debug for VaListImpl { } /// AArch64 ABI implementation of a `va_list`. See the -/// [Aarch64 Procedure Call Standard] for more details. +/// [AArch64 Procedure Call Standard] for more details. /// /// [AArch64 Procedure Call Standard]: /// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf -#[cfg(all(target_arch = "aarch64", not(windows)))] +#[cfg(all(target_arch = "aarch64", not(target_os = "ios"), not(windows)))] #[repr(C)] #[derive(Debug)] #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", - issue = "27745")] + issue = "44930")] struct VaListImpl { - stack: *mut (), - gr_top: *mut (), - vr_top: *mut (), + stack: *mut c_void, + gr_top: *mut c_void, + vr_top: *mut c_void, gr_offs: i32, vr_offs: i32, } @@ -90,13 +94,13 @@ struct VaListImpl { #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", - issue = "27745")] + issue = "44930")] struct VaListImpl { gpr: u8, fpr: u8, reserved: u16, - overflow_arg_area: *mut (), - reg_save_area: *mut (), + overflow_arg_area: *mut c_void, + reg_save_area: *mut c_void, } /// x86_64 ABI implementation of a `va_list`. @@ -106,12 +110,12 @@ struct VaListImpl { #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", - issue = "27745")] + issue = "44930")] struct VaListImpl { gp_offset: i32, fp_offset: i32, - overflow_arg_area: *mut (), - reg_save_area: *mut (), + overflow_arg_area: *mut c_void, + reg_save_area: *mut c_void, } /// A wrapper for a `va_list` @@ -120,7 +124,7 @@ struct VaListImpl { #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", - issue = "27745")] + issue = "44930")] #[repr(transparent)] pub struct VaList<'a>(&'a mut VaListImpl); @@ -140,7 +144,7 @@ mod sealed_trait { #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", - issue = "27745")] + issue = "44930")] pub trait VaArgSafe {} } @@ -150,7 +154,7 @@ macro_rules! impl_va_arg_safe { #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", - issue = "27745")] + issue = "44930")] impl sealed_trait::VaArgSafe for $t {} )+ } @@ -163,12 +167,12 @@ impl_va_arg_safe!{f64} #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", - issue = "27745")] + issue = "44930")] impl sealed_trait::VaArgSafe for *mut T {} #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", - issue = "27745")] + issue = "44930")] impl sealed_trait::VaArgSafe for *const T {} impl<'a> VaList<'a> { @@ -176,28 +180,28 @@ impl<'a> VaList<'a> { #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", - issue = "27745")] + issue = "44930")] pub unsafe fn arg(&mut self) -> T { va_arg(self) } - /// Copy the `va_list` at the current location. + /// Copies the `va_list` at the current location. #[unstable(feature = "c_variadic", reason = "the `c_variadic` feature has not been properly tested on \ all supported platforms", - issue = "27745")] - pub unsafe fn copy(&mut self, f: F) -> R + issue = "44930")] + pub unsafe fn with_copy(&self, f: F) -> R where F: for<'copy> FnOnce(VaList<'copy>) -> R { #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")), - all(target_arch = "aarch4", target_os = "ios"), + all(target_arch = "aarch64", target_os = "ios"), windows))] let mut ap = va_copy(self); #[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"), - not(windows)))] + not(windows), not(all(target_arch = "aarch64", target_os = "ios"))))] let mut ap_inner = va_copy(self); #[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"), - not(windows)))] + not(windows), not(all(target_arch = "aarch64", target_os = "ios"))))] let mut ap = VaList(&mut ap_inner); let ret = f(VaList(ap.0)); va_end(&mut ap); @@ -210,13 +214,14 @@ extern "rust-intrinsic" { /// `va_copy`. fn va_end(ap: &mut VaList); - /// Copy the current location of arglist `src` to the arglist `dst`. + /// Copies the current location of arglist `src` to the arglist `dst`. #[cfg(any(all(not(target_arch = "aarch64"), not(target_arch = "powerpc"), not(target_arch = "x86_64")), + all(target_arch = "aarch64", target_os = "ios"), windows))] fn va_copy<'a>(src: &VaList<'a>) -> VaList<'a>; #[cfg(all(any(target_arch = "aarch64", target_arch = "powerpc", target_arch = "x86_64"), - not(windows)))] + not(windows), not(all(target_arch = "aarch64", target_os = "ios"))))] fn va_copy(src: &VaList) -> VaListImpl; /// Loads an argument of type `T` from the `va_list` `ap` and increment the diff --git a/src/libcore/fmt/builders.rs b/src/libcore/fmt/builders.rs index 4bc5b36d82ba2..df3852973b8dd 100644 --- a/src/libcore/fmt/builders.rs +++ b/src/libcore/fmt/builders.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use fmt; struct PadAdapter<'a> { @@ -21,7 +11,7 @@ impl<'a> PadAdapter<'a> { fmt.wrap_buf(move |buf| { *slot = Some(PadAdapter { buf, - on_newline: false, + on_newline: true, }); slot.as_mut().unwrap() }) @@ -81,8 +71,10 @@ impl fmt::Write for PadAdapter<'_> { /// } /// } /// -/// // prints "Foo { bar: 10, baz: "Hello World" }" -/// println!("{:?}", Foo { bar: 10, baz: "Hello World".to_string() }); +/// assert_eq!( +/// format!("{:?}", Foo { bar: 10, baz: "Hello World".to_string() }), +/// "Foo { bar: 10, baz: \"Hello World\" }", +/// ); /// ``` #[must_use = "must eventually call `finish()` on Debug builders"] #[allow(missing_debug_implementations)] @@ -106,25 +98,51 @@ pub fn debug_struct_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>, impl<'a, 'b: 'a> DebugStruct<'a, 'b> { /// Adds a new field to the generated struct output. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Bar { + /// bar: i32, + /// another: String, + /// } + /// + /// impl fmt::Debug for Bar { + /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + /// fmt.debug_struct("Bar") + /// .field("bar", &self.bar) // We add `bar` field. + /// .field("another", &self.another) // We add `another` field. + /// // We even add a field which doesn't exist (because why not?). + /// .field("not_existing_field", &1) + /// .finish() // We're good to go! + /// } + /// } + /// + /// assert_eq!( + /// format!("{:?}", Bar { bar: 10, another: "Hello World".to_string() }), + /// "Bar { bar: 10, another: \"Hello World\", not_existing_field: 1 }", + /// ); + /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn field(&mut self, name: &str, value: &dyn fmt::Debug) -> &mut DebugStruct<'a, 'b> { self.result = self.result.and_then(|_| { - let prefix = if self.has_fields { - "," - } else { - " {" - }; - if self.is_pretty() { + if !self.has_fields { + self.fmt.write_str(" {\n")?; + } let mut slot = None; let mut writer = PadAdapter::wrap(&mut self.fmt, &mut slot); - writer.write_str(prefix)?; - writer.write_str("\n")?; writer.write_str(name)?; writer.write_str(": ")?; - value.fmt(&mut writer) + value.fmt(&mut writer)?; + writer.write_str(",\n") } else { - write!(self.fmt, "{} {}: ", prefix, name)?; + let prefix = if self.has_fields { ", " } else { " { " }; + self.fmt.write_str(prefix)?; + self.fmt.write_str(name)?; + self.fmt.write_str(": ")?; value.fmt(self.fmt) } }); @@ -134,12 +152,38 @@ impl<'a, 'b: 'a> DebugStruct<'a, 'b> { } /// Finishes output and returns any error encountered. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Bar { + /// bar: i32, + /// baz: String, + /// } + /// + /// impl fmt::Debug for Bar { + /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + /// fmt.debug_struct("Bar") + /// .field("bar", &self.bar) + /// .field("baz", &self.baz) + /// .finish() // You need to call it to "finish" the + /// // struct formatting. + /// } + /// } + /// + /// assert_eq!( + /// format!("{:?}", Bar { bar: 10, baz: "Hello World".to_string() }), + /// "Bar { bar: 10, baz: \"Hello World\" }", + /// ); + /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn finish(&mut self) -> fmt::Result { if self.has_fields { self.result = self.result.and_then(|_| { if self.is_pretty() { - self.fmt.write_str("\n}") + self.fmt.write_str("}") } else { self.fmt.write_str(" }") } @@ -178,8 +222,10 @@ impl<'a, 'b: 'a> DebugStruct<'a, 'b> { /// } /// } /// -/// // prints "Foo(10, "Hello World")" -/// println!("{:?}", Foo(10, "Hello World".to_string())); +/// assert_eq!( +/// format!("{:?}", Foo(10, "Hello World".to_string())), +/// "Foo(10, \"Hello World\")", +/// ); /// ``` #[must_use = "must eventually call `finish()` on Debug builders"] #[allow(missing_debug_implementations)] @@ -203,24 +249,42 @@ pub fn debug_tuple_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>, name: &str) -> D impl<'a, 'b: 'a> DebugTuple<'a, 'b> { /// Adds a new field to the generated tuple struct output. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(i32, String); + /// + /// impl fmt::Debug for Foo { + /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + /// fmt.debug_tuple("Foo") + /// .field(&self.0) // We add the first field. + /// .field(&self.1) // We add the second field. + /// .finish() // We're good to go! + /// } + /// } + /// + /// assert_eq!( + /// format!("{:?}", Foo(10, "Hello World".to_string())), + /// "Foo(10, \"Hello World\")", + /// ); + /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn field(&mut self, value: &dyn fmt::Debug) -> &mut DebugTuple<'a, 'b> { self.result = self.result.and_then(|_| { - let (prefix, space) = if self.fields > 0 { - (",", " ") - } else { - ("(", "") - }; - if self.is_pretty() { + if self.fields == 0 { + self.fmt.write_str("(\n")?; + } let mut slot = None; let mut writer = PadAdapter::wrap(&mut self.fmt, &mut slot); - writer.write_str(prefix)?; - writer.write_str("\n")?; - value.fmt(&mut writer) + value.fmt(&mut writer)?; + writer.write_str(",\n") } else { + let prefix = if self.fields == 0 { "(" } else { ", " }; self.fmt.write_str(prefix)?; - self.fmt.write_str(space)?; value.fmt(self.fmt) } }); @@ -230,14 +294,34 @@ impl<'a, 'b: 'a> DebugTuple<'a, 'b> { } /// Finishes output and returns any error encountered. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(i32, String); + /// + /// impl fmt::Debug for Foo { + /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + /// fmt.debug_tuple("Foo") + /// .field(&self.0) + /// .field(&self.1) + /// .finish() // You need to call it to "finish" the + /// // tuple formatting. + /// } + /// } + /// + /// assert_eq!( + /// format!("{:?}", Foo(10, "Hello World".to_string())), + /// "Foo(10, \"Hello World\")", + /// ); + /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn finish(&mut self) -> fmt::Result { if self.fields > 0 { self.result = self.result.and_then(|_| { - if self.is_pretty() { - self.fmt.write_str("\n")?; - } - if self.fields == 1 && self.empty_name { + if self.fields == 1 && self.empty_name && !self.is_pretty() { self.fmt.write_str(",")?; } self.fmt.write_str(")") @@ -261,14 +345,13 @@ impl<'a, 'b: 'a> DebugInner<'a, 'b> { fn entry(&mut self, entry: &dyn fmt::Debug) { self.result = self.result.and_then(|_| { if self.is_pretty() { + if !self.has_fields { + self.fmt.write_str("\n")?; + } let mut slot = None; let mut writer = PadAdapter::wrap(&mut self.fmt, &mut slot); - writer.write_str(if self.has_fields { - ",\n" - } else { - "\n" - })?; - entry.fmt(&mut writer) + entry.fmt(&mut writer)?; + writer.write_str(",\n") } else { if self.has_fields { self.fmt.write_str(", ")? @@ -280,15 +363,6 @@ impl<'a, 'b: 'a> DebugInner<'a, 'b> { self.has_fields = true; } - pub fn finish(&mut self) { - let prefix = if self.is_pretty() && self.has_fields { - "\n" - } else { - "" - }; - self.result = self.result.and_then(|_| self.fmt.write_str(prefix)); - } - fn is_pretty(&self) -> bool { self.fmt.alternate() } @@ -316,8 +390,10 @@ impl<'a, 'b: 'a> DebugInner<'a, 'b> { /// } /// } /// -/// // prints "{10, 11}" -/// println!("{:?}", Foo(vec![10, 11])); +/// assert_eq!( +/// format!("{:?}", Foo(vec![10, 11])), +/// "{10, 11}", +/// ); /// ``` #[must_use = "must eventually call `finish()` on Debug builders"] #[allow(missing_debug_implementations)] @@ -327,7 +403,7 @@ pub struct DebugSet<'a, 'b: 'a> { } pub fn debug_set_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugSet<'a, 'b> { - let result = write!(fmt, "{{"); + let result = fmt.write_str("{"); DebugSet { inner: DebugInner { fmt, @@ -339,6 +415,28 @@ pub fn debug_set_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugSet<'a, 'b impl<'a, 'b: 'a> DebugSet<'a, 'b> { /// Adds a new entry to the set output. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(Vec, Vec); + /// + /// impl fmt::Debug for Foo { + /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + /// fmt.debug_set() + /// .entry(&self.0) // Adds the first "entry". + /// .entry(&self.1) // Adds the second "entry". + /// .finish() + /// } + /// } + /// + /// assert_eq!( + /// format!("{:?}", Foo(vec![10, 11], vec![12, 13])), + /// "{[10, 11], [12, 13]}", + /// ); + /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn entry(&mut self, entry: &dyn fmt::Debug) -> &mut DebugSet<'a, 'b> { self.inner.entry(entry); @@ -346,6 +444,28 @@ impl<'a, 'b: 'a> DebugSet<'a, 'b> { } /// Adds the contents of an iterator of entries to the set output. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(Vec, Vec); + /// + /// impl fmt::Debug for Foo { + /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + /// fmt.debug_set() + /// .entries(self.0.iter()) // Adds the first "entry". + /// .entries(self.1.iter()) // Adds the second "entry". + /// .finish() + /// } + /// } + /// + /// assert_eq!( + /// format!("{:?}", Foo(vec![10, 11], vec![12, 13])), + /// "{10, 11, 12, 13}", + /// ); + /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn entries(&mut self, entries: I) -> &mut DebugSet<'a, 'b> where D: fmt::Debug, @@ -358,9 +478,29 @@ impl<'a, 'b: 'a> DebugSet<'a, 'b> { } /// Finishes output and returns any error encountered. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(Vec); + /// + /// impl fmt::Debug for Foo { + /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + /// fmt.debug_set() + /// .entries(self.0.iter()) + /// .finish() // Ends the struct formatting. + /// } + /// } + /// + /// assert_eq!( + /// format!("{:?}", Foo(vec![10, 11])), + /// "{10, 11}", + /// ); + /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn finish(&mut self) -> fmt::Result { - self.inner.finish(); self.inner.result.and_then(|_| self.inner.fmt.write_str("}")) } } @@ -387,8 +527,10 @@ impl<'a, 'b: 'a> DebugSet<'a, 'b> { /// } /// } /// -/// // prints "[10, 11]" -/// println!("{:?}", Foo(vec![10, 11])); +/// assert_eq!( +/// format!("{:?}", Foo(vec![10, 11])), +/// "[10, 11]", +/// ); /// ``` #[must_use = "must eventually call `finish()` on Debug builders"] #[allow(missing_debug_implementations)] @@ -398,7 +540,7 @@ pub struct DebugList<'a, 'b: 'a> { } pub fn debug_list_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugList<'a, 'b> { - let result = write!(fmt, "["); + let result = fmt.write_str("["); DebugList { inner: DebugInner { fmt, @@ -410,6 +552,28 @@ pub fn debug_list_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugList<'a, impl<'a, 'b: 'a> DebugList<'a, 'b> { /// Adds a new entry to the list output. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(Vec, Vec); + /// + /// impl fmt::Debug for Foo { + /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + /// fmt.debug_list() + /// .entry(&self.0) // We add the first "entry". + /// .entry(&self.1) // We add the second "entry". + /// .finish() + /// } + /// } + /// + /// assert_eq!( + /// format!("{:?}", Foo(vec![10, 11], vec![12, 13])), + /// "[[10, 11], [12, 13]]", + /// ); + /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn entry(&mut self, entry: &dyn fmt::Debug) -> &mut DebugList<'a, 'b> { self.inner.entry(entry); @@ -417,6 +581,28 @@ impl<'a, 'b: 'a> DebugList<'a, 'b> { } /// Adds the contents of an iterator of entries to the list output. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(Vec, Vec); + /// + /// impl fmt::Debug for Foo { + /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + /// fmt.debug_list() + /// .entries(self.0.iter()) + /// .entries(self.1.iter()) + /// .finish() + /// } + /// } + /// + /// assert_eq!( + /// format!("{:?}", Foo(vec![10, 11], vec![12, 13])), + /// "[10, 11, 12, 13]", + /// ); + /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn entries(&mut self, entries: I) -> &mut DebugList<'a, 'b> where D: fmt::Debug, @@ -429,9 +615,29 @@ impl<'a, 'b: 'a> DebugList<'a, 'b> { } /// Finishes output and returns any error encountered. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(Vec); + /// + /// impl fmt::Debug for Foo { + /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + /// fmt.debug_list() + /// .entries(self.0.iter()) + /// .finish() // Ends the struct formatting. + /// } + /// } + /// + /// assert_eq!( + /// format!("{:?}", Foo(vec![10, 11])), + /// "[10, 11]", + /// ); + /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn finish(&mut self) -> fmt::Result { - self.inner.finish(); self.inner.result.and_then(|_| self.inner.fmt.write_str("]")) } } @@ -458,8 +664,10 @@ impl<'a, 'b: 'a> DebugList<'a, 'b> { /// } /// } /// -/// // prints "{"A": 10, "B": 11}" -/// println!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])); +/// assert_eq!( +/// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])), +/// "{\"A\": 10, \"B\": 11}", +/// ); /// ``` #[must_use = "must eventually call `finish()` on Debug builders"] #[allow(missing_debug_implementations)] @@ -471,7 +679,7 @@ pub struct DebugMap<'a, 'b: 'a> { } pub fn debug_map_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugMap<'a, 'b> { - let result = write!(fmt, "{{"); + let result = fmt.write_str("{"); DebugMap { fmt, result, @@ -481,20 +689,40 @@ pub fn debug_map_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugMap<'a, 'b impl<'a, 'b: 'a> DebugMap<'a, 'b> { /// Adds a new entry to the map output. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(Vec<(String, i32)>); + /// + /// impl fmt::Debug for Foo { + /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + /// fmt.debug_map() + /// .entry(&"whole", &self.0) // We add the "whole" entry. + /// .finish() + /// } + /// } + /// + /// assert_eq!( + /// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])), + /// "{\"whole\": [(\"A\", 10), (\"B\", 11)]}", + /// ); + /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn entry(&mut self, key: &dyn fmt::Debug, value: &dyn fmt::Debug) -> &mut DebugMap<'a, 'b> { self.result = self.result.and_then(|_| { if self.is_pretty() { + if !self.has_fields { + self.fmt.write_str("\n")?; + } let mut slot = None; let mut writer = PadAdapter::wrap(&mut self.fmt, &mut slot); - writer.write_str(if self.has_fields { - ",\n" - } else { - "\n" - })?; key.fmt(&mut writer)?; writer.write_str(": ")?; - value.fmt(&mut writer) + value.fmt(&mut writer)?; + writer.write_str(",\n") } else { if self.has_fields { self.fmt.write_str(", ")? @@ -510,6 +738,29 @@ impl<'a, 'b: 'a> DebugMap<'a, 'b> { } /// Adds the contents of an iterator of entries to the map output. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(Vec<(String, i32)>); + /// + /// impl fmt::Debug for Foo { + /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + /// fmt.debug_map() + /// // We map our vec so each entries' first field will become + /// // the "key". + /// .entries(self.0.iter().map(|&(ref k, ref v)| (k, v))) + /// .finish() + /// } + /// } + /// + /// assert_eq!( + /// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])), + /// "{\"A\": 10, \"B\": 11}", + /// ); + /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn entries(&mut self, entries: I) -> &mut DebugMap<'a, 'b> where K: fmt::Debug, @@ -523,14 +774,30 @@ impl<'a, 'b: 'a> DebugMap<'a, 'b> { } /// Finishes output and returns any error encountered. + /// + /// # Examples + /// + /// ``` + /// use std::fmt; + /// + /// struct Foo(Vec<(String, i32)>); + /// + /// impl fmt::Debug for Foo { + /// fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + /// fmt.debug_map() + /// .entries(self.0.iter().map(|&(ref k, ref v)| (k, v))) + /// .finish() // Ends the struct formatting. + /// } + /// } + /// + /// assert_eq!( + /// format!("{:?}", Foo(vec![("A".to_string(), 10), ("B".to_string(), 11)])), + /// "{\"A\": 10, \"B\": 11}", + /// ); + /// ``` #[stable(feature = "debug_builders", since = "1.2.0")] pub fn finish(&mut self) -> fmt::Result { - let prefix = if self.is_pretty() && self.has_fields { - "\n" - } else { - "" - }; - self.result.and_then(|_| write!(self.fmt, "{}}}", prefix)) + self.result.and_then(|_| self.fmt.write_str("}")) } fn is_pretty(&self) -> bool { diff --git a/src/libcore/fmt/float.rs b/src/libcore/fmt/float.rs index 3717a783f2411..5f4c6f7b0a3f0 100644 --- a/src/libcore/fmt/float.rs +++ b/src/libcore/fmt/float.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use fmt::{Formatter, Result, LowerExp, UpperExp, Display, Debug}; use mem::MaybeUninit; use num::flt2dec; @@ -20,11 +10,12 @@ fn float_to_decimal_common_exact(fmt: &mut Formatter, num: &T, where T: flt2dec::DecodableFloat { unsafe { - let mut buf = MaybeUninit::<[u8; 1024]>::uninitialized(); // enough for f32 and f64 - let mut parts = MaybeUninit::<[flt2dec::Part; 4]>::uninitialized(); + let mut buf = MaybeUninit::<[u8; 1024]>::uninit(); // enough for f32 and f64 + let mut parts = MaybeUninit::<[flt2dec::Part; 4]>::uninit(); // FIXME(#53491): Technically, this is calling `get_mut` on an uninitialized // `MaybeUninit` (here and elsewhere in this file). Revisit this once // we decided whether that is valid or not. + // Using `freeze` is *not enough*; `flt2dec::Part` is an enum! let formatted = flt2dec::to_exact_fixed_str(flt2dec::strategy::grisu::format_exact, *num, sign, precision, false, buf.get_mut(), parts.get_mut()); @@ -41,8 +32,9 @@ fn float_to_decimal_common_shortest(fmt: &mut Formatter, num: &T, { unsafe { // enough for f32 and f64 - let mut buf = MaybeUninit::<[u8; flt2dec::MAX_SIG_DIGITS]>::uninitialized(); - let mut parts = MaybeUninit::<[flt2dec::Part; 4]>::uninitialized(); + let mut buf = MaybeUninit::<[u8; flt2dec::MAX_SIG_DIGITS]>::uninit(); + let mut parts = MaybeUninit::<[flt2dec::Part; 4]>::uninit(); + // FIXME(#53491) let formatted = flt2dec::to_shortest_str(flt2dec::strategy::grisu::format_shortest, *num, sign, precision, false, buf.get_mut(), parts.get_mut()); @@ -79,8 +71,9 @@ fn float_to_exponential_common_exact(fmt: &mut Formatter, num: &T, where T: flt2dec::DecodableFloat { unsafe { - let mut buf = MaybeUninit::<[u8; 1024]>::uninitialized(); // enough for f32 and f64 - let mut parts = MaybeUninit::<[flt2dec::Part; 6]>::uninitialized(); + let mut buf = MaybeUninit::<[u8; 1024]>::uninit(); // enough for f32 and f64 + let mut parts = MaybeUninit::<[flt2dec::Part; 6]>::uninit(); + // FIXME(#53491) let formatted = flt2dec::to_exact_exp_str(flt2dec::strategy::grisu::format_exact, *num, sign, precision, upper, buf.get_mut(), parts.get_mut()); @@ -98,8 +91,9 @@ fn float_to_exponential_common_shortest(fmt: &mut Formatter, { unsafe { // enough for f32 and f64 - let mut buf = MaybeUninit::<[u8; flt2dec::MAX_SIG_DIGITS]>::uninitialized(); - let mut parts = MaybeUninit::<[flt2dec::Part; 6]>::uninitialized(); + let mut buf = MaybeUninit::<[u8; flt2dec::MAX_SIG_DIGITS]>::uninit(); + let mut parts = MaybeUninit::<[flt2dec::Part; 6]>::uninit(); + // FIXME(#53491) let formatted = flt2dec::to_shortest_exp_str(flt2dec::strategy::grisu::format_shortest, *num, sign, (0, 0), upper, buf.get_mut(), parts.get_mut()); diff --git a/src/libcore/fmt/mod.rs b/src/libcore/fmt/mod.rs index 8e0caa5ae330d..7efb7f31298bf 100644 --- a/src/libcore/fmt/mod.rs +++ b/src/libcore/fmt/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Utilities for formatting and printing strings. #![stable(feature = "rust1", since = "1.0.0")] @@ -201,29 +191,8 @@ pub trait Write { /// assert_eq!(&buf, "world"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - fn write_fmt(&mut self, args: Arguments) -> Result { - // This Adapter is needed to allow `self` (of type `&mut - // Self`) to be cast to a Write (below) without - // requiring a `Sized` bound. - struct Adapter<'a,T: ?Sized +'a>(&'a mut T); - - impl Write for Adapter<'_, T> - where T: Write - { - fn write_str(&mut self, s: &str) -> Result { - self.0.write_str(s) - } - - fn write_char(&mut self, c: char) -> Result { - self.0.write_char(c) - } - - fn write_fmt(&mut self, args: Arguments) -> Result { - self.0.write_fmt(args) - } - } - - write(&mut Adapter(self), args) + fn write_fmt(mut self: &mut Self, args: Arguments) -> Result { + write(&mut self, args) } } @@ -242,9 +211,18 @@ impl Write for &mut W { } } -/// A struct to represent both where to emit formatting strings to and how they -/// should be formatted. A mutable version of this is passed to all formatting -/// traits. +/// Configuration for formatting. +/// +/// A `Formatter` represents various options related to formatting. Users do not +/// construct `Formatter`s directly; a mutable reference to one is passed to +/// the `fmt` method of all formatting traits, like [`Debug`] and [`Display`]. +/// +/// To interact with a `Formatter`, you'll call various methods to change the +/// various options related to formatting. For examples, please see the +/// documentation of the methods defined on `Formatter` below. +/// +/// [`Debug`]: trait.Debug.html +/// [`Display`]: trait.Display.html #[allow(missing_debug_implementations)] #[stable(feature = "rust1", since = "1.0.0")] pub struct Formatter<'a> { @@ -278,7 +256,7 @@ struct Void { /// family of functions. It contains a function to format the given value. At /// compile time it is ensured that the function and the value have the correct /// types, and then this struct is used to canonicalize arguments to one type. -#[derive(Copy)] +#[derive(Copy, Clone)] #[allow(missing_debug_implementations)] #[unstable(feature = "fmt_internals", reason = "internal to format_args!", issue = "0")] @@ -288,14 +266,6 @@ pub struct ArgumentV1<'a> { formatter: fn(&Void, &mut Formatter) -> Result, } -#[unstable(feature = "fmt_internals", reason = "internal to format_args!", - issue = "0")] -impl Clone for ArgumentV1<'_> { - fn clone(&self) -> Self { - *self - } -} - impl<'a> ArgumentV1<'a> { #[inline(never)] fn show_usize(x: &usize, f: &mut Formatter) -> Result { @@ -513,12 +483,12 @@ impl Display for Arguments<'_> { /// implementations, such as [`debug_struct`][debug_struct]. /// /// `Debug` implementations using either `derive` or the debug builder API -/// on [`Formatter`] support pretty printing using the alternate flag: `{:#?}`. +/// on [`Formatter`] support pretty-printing using the alternate flag: `{:#?}`. /// /// [debug_struct]: ../../std/fmt/struct.Formatter.html#method.debug_struct /// [`Formatter`]: ../../std/fmt/struct.Formatter.html /// -/// Pretty printing with `#?`: +/// Pretty-printing with `#?`: /// /// ``` /// #[derive(Debug)] @@ -1036,34 +1006,57 @@ pub fn write(output: &mut dyn Write, args: Arguments) -> Result { curarg: args.args.iter(), }; - let mut pieces = args.pieces.iter(); + let mut idx = 0; match args.fmt { None => { // We can use default formatting parameters for all arguments. - for (arg, piece) in args.args.iter().zip(pieces.by_ref()) { + for (arg, piece) in args.args.iter().zip(args.pieces.iter()) { formatter.buf.write_str(*piece)?; (arg.formatter)(arg.value, &mut formatter)?; + idx += 1; } } Some(fmt) => { // Every spec has a corresponding argument that is preceded by // a string piece. - for (arg, piece) in fmt.iter().zip(pieces.by_ref()) { + for (arg, piece) in fmt.iter().zip(args.pieces.iter()) { formatter.buf.write_str(*piece)?; formatter.run(arg)?; + idx += 1; } } } // There can be only one trailing string piece left. - if let Some(piece) = pieces.next() { + if let Some(piece) = args.pieces.get(idx) { formatter.buf.write_str(*piece)?; } Ok(()) } +/// Padding after the end of something. Returned by `Formatter::padding`. +#[must_use = "don't forget to write the post padding"] +struct PostPadding { + fill: char, + padding: usize, +} + +impl PostPadding { + fn new(fill: char, padding: usize) -> PostPadding { + PostPadding { fill, padding } + } + + /// Write this post padding. + fn write(self, buf: &mut dyn Write) -> Result { + for _ in 0..self.padding { + buf.write_char(self.fill)?; + } + Ok(()) + } +} + impl<'a> Formatter<'a> { fn wrap_buf<'b, 'c, F>(&'b mut self, wrap: F) -> Formatter<'c> where 'b: 'c, F: FnOnce(&'b mut (dyn Write+'b)) -> &'c mut (dyn Write+'c) @@ -1115,7 +1108,7 @@ impl<'a> Formatter<'a> { self.args[i].as_usize() } rt::v1::Count::NextParam => { - self.curarg.next().and_then(|arg| arg.as_usize()) + self.curarg.next()?.as_usize() } } } @@ -1181,47 +1174,56 @@ impl<'a> Formatter<'a> { sign = Some('+'); width += 1; } - let mut prefixed = false; - if self.alternate() { - prefixed = true; width += prefix.chars().count(); - } + let prefix = if self.alternate() { + width += prefix.chars().count(); + Some(prefix) + } else { + None + }; // Writes the sign if it exists, and then the prefix if it was requested - let write_prefix = |f: &mut Formatter| { + #[inline(never)] + fn write_prefix(f: &mut Formatter, sign: Option, prefix: Option<&str>) -> Result { if let Some(c) = sign { - f.buf.write_str(c.encode_utf8(&mut [0; 4]))?; + f.buf.write_char(c)?; } - if prefixed { f.buf.write_str(prefix) } - else { Ok(()) } - }; + if let Some(prefix) = prefix { + f.buf.write_str(prefix) + } else { + Ok(()) + } + } // The `width` field is more of a `min-width` parameter at this point. match self.width { // If there's no minimum length requirements then we can just // write the bytes. None => { - write_prefix(self)?; self.buf.write_str(buf) + write_prefix(self, sign, prefix)?; + self.buf.write_str(buf) } // Check if we're over the minimum width, if so then we can also // just write the bytes. Some(min) if width >= min => { - write_prefix(self)?; self.buf.write_str(buf) + write_prefix(self, sign, prefix)?; + self.buf.write_str(buf) } // The sign and prefix goes before the padding if the fill character // is zero Some(min) if self.sign_aware_zero_pad() => { self.fill = '0'; self.align = rt::v1::Alignment::Right; - write_prefix(self)?; - self.with_padding(min - width, rt::v1::Alignment::Right, |f| { - f.buf.write_str(buf) - }) + write_prefix(self, sign, prefix)?; + let post_padding = self.padding(min - width, rt::v1::Alignment::Right)?; + self.buf.write_str(buf)?; + post_padding.write(self.buf) } // Otherwise, the sign and prefix goes after the padding Some(min) => { - self.with_padding(min - width, rt::v1::Alignment::Right, |f| { - write_prefix(f)?; f.buf.write_str(buf) - }) + let post_padding = self.padding(min - width, rt::v1::Alignment::Right)?; + write_prefix(self, sign, prefix)?; + self.buf.write_str(buf)?; + post_padding.write(self.buf) } } } @@ -1292,19 +1294,21 @@ impl<'a> Formatter<'a> { // up the minimum width with the specified string + some alignment. Some(width) => { let align = rt::v1::Alignment::Left; - self.with_padding(width - s.chars().count(), align, |me| { - me.buf.write_str(s) - }) + let post_padding = self.padding(width - s.chars().count(), align)?; + self.buf.write_str(s)?; + post_padding.write(self.buf) } } } - /// Runs a callback, emitting the correct padding either before or - /// afterwards depending on whether right or left alignment is requested. - fn with_padding(&mut self, padding: usize, default: rt::v1::Alignment, - f: F) -> Result - where F: FnOnce(&mut Formatter) -> Result, - { + /// Write the pre-padding and return the unwritten post-padding. Callers are + /// responsible for ensuring post-padding is written after the thing that is + /// being padded. + fn padding( + &mut self, + padding: usize, + default: rt::v1::Alignment + ) -> result::Result { let align = match self.align { rt::v1::Alignment::Unknown => default, _ => self.align @@ -1317,20 +1321,11 @@ impl<'a> Formatter<'a> { rt::v1::Alignment::Center => (padding / 2, (padding + 1) / 2), }; - let mut fill = [0; 4]; - let fill = self.fill.encode_utf8(&mut fill); - for _ in 0..pre_pad { - self.buf.write_str(fill)?; + self.buf.write_char(self.fill)?; } - f(self)?; - - for _ in 0..post_pad { - self.buf.write_str(fill)?; - } - - Ok(()) + Ok(PostPadding::new(self.fill, post_pad)) } /// Takes the formatted parts and applies the padding. @@ -1351,7 +1346,7 @@ impl<'a> Formatter<'a> { // remove the sign from the formatted parts formatted.sign = b""; - width = if width < sign.len() { 0 } else { width - sign.len() }; + width = width.saturating_sub(sign.len()); align = rt::v1::Alignment::Right; self.fill = '0'; self.align = rt::v1::Alignment::Right; @@ -1362,9 +1357,9 @@ impl<'a> Formatter<'a> { let ret = if width <= len { // no padding self.write_formatted_parts(&formatted) } else { - self.with_padding(width - len, align, |f| { - f.write_formatted_parts(&formatted) - }) + let post_padding = self.padding(width - len, align)?; + self.write_formatted_parts(&formatted)?; + post_padding.write(self.buf) }; self.fill = old_fill; self.align = old_align; @@ -2076,7 +2071,7 @@ macro_rules! tuple { ( $($name:ident,)+ ) => ( #[stable(feature = "rust1", since = "1.0.0")] impl<$($name:Debug),*> Debug for ($($name,)*) where last_type!($($name,)+): ?Sized { - #[allow(non_snake_case, unused_assignments, deprecated)] + #[allow(non_snake_case, unused_assignments)] fn fmt(&self, f: &mut Formatter) -> Result { let mut builder = f.debug_tuple(""); let ($(ref $name,)*) = *self; diff --git a/src/libcore/fmt/num.rs b/src/libcore/fmt/num.rs index 51391fa50d56f..e96dbcaa14416 100644 --- a/src/libcore/fmt/num.rs +++ b/src/libcore/fmt/num.rs @@ -1,24 +1,12 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Integer and floating-point number formatting -#![allow(deprecated)] - use fmt; use ops::{Div, Rem, Sub}; use str; use slice; use ptr; -use mem; +use mem::MaybeUninit; #[doc(hidden)] trait Int: PartialEq + PartialOrd + Div + Rem + @@ -63,7 +51,7 @@ trait GenericRadix { // characters for a base 2 number. let zero = T::zero(); let is_nonnegative = x >= zero; - let mut buf: [u8; 128] = unsafe { mem::uninitialized() }; + let mut buf = uninitialized_array![u8; 128]; let mut curr = buf.len(); let base = T::from_u8(Self::BASE); if is_nonnegative { @@ -72,7 +60,7 @@ trait GenericRadix { for byte in buf.iter_mut().rev() { let n = x % base; // Get the current place value. x = x / base; // Deaccumulate the number. - *byte = Self::digit(n.to_u8()); // Store the digit in the buffer. + byte.write(Self::digit(n.to_u8())); // Store the digit in the buffer. curr -= 1; if x == zero { // No more digits left to accumulate. @@ -84,7 +72,7 @@ trait GenericRadix { for byte in buf.iter_mut().rev() { let n = zero - (x % base); // Get the current place value. x = x / base; // Deaccumulate the number. - *byte = Self::digit(n.to_u8()); // Store the digit in the buffer. + byte.write(Self::digit(n.to_u8())); // Store the digit in the buffer. curr -= 1; if x == zero { // No more digits left to accumulate. @@ -92,7 +80,11 @@ trait GenericRadix { }; } } - let buf = unsafe { str::from_utf8_unchecked(&buf[curr..]) }; + let buf = &buf[curr..]; + let buf = unsafe { str::from_utf8_unchecked(slice::from_raw_parts( + MaybeUninit::first_ptr(buf), + buf.len() + )) }; f.pad_integral(is_nonnegative, Self::PREFIX, buf) } } @@ -186,7 +178,8 @@ integer! { i32, u32 } integer! { i64, u64 } integer! { i128, u128 } -const DEC_DIGITS_LUT: &'static[u8] = + +static DEC_DIGITS_LUT: &[u8; 200] = b"0001020304050607080910111213141516171819\ 2021222324252627282930313233343536373839\ 4041424344454647484950515253545556575859\ @@ -194,37 +187,27 @@ const DEC_DIGITS_LUT: &'static[u8] = 8081828384858687888990919293949596979899"; macro_rules! impl_Display { - ($($t:ident),*: $conv_fn:ident) => ($( - #[stable(feature = "rust1", since = "1.0.0")] - impl fmt::Display for $t { - #[allow(unused_comparisons)] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let is_nonnegative = *self >= 0; - let mut n = if is_nonnegative { - self.$conv_fn() - } else { - // convert the negative num to positive by summing 1 to it's 2 complement - (!self.$conv_fn()).wrapping_add(1) - }; - let mut buf: [u8; 39] = unsafe { mem::uninitialized() }; + ($($t:ident),* as $u:ident via $conv_fn:ident named $name:ident) => { + fn $name(mut n: $u, is_nonnegative: bool, f: &mut fmt::Formatter) -> fmt::Result { + let mut buf = uninitialized_array![u8; 39]; let mut curr = buf.len() as isize; - let buf_ptr = buf.as_mut_ptr(); + let buf_ptr = MaybeUninit::first_ptr_mut(&mut buf); let lut_ptr = DEC_DIGITS_LUT.as_ptr(); unsafe { // need at least 16 bits for the 4-characters-at-a-time to work. - if ::mem::size_of::<$t>() >= 2 { - // eagerly decode 4 characters at a time - while n >= 10000 { - let rem = (n % 10000) as isize; - n /= 10000; - - let d1 = (rem / 100) << 1; - let d2 = (rem % 100) << 1; - curr -= 4; - ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); - ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2); - } + assert!(::mem::size_of::<$u>() >= 2); + + // eagerly decode 4 characters at a time + while n >= 10000 { + let rem = (n % 10000) as isize; + n /= 10000; + + let d1 = (rem / 100) << 1; + let d2 = (rem % 100) << 1; + curr -= 4; + ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); + ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2); } // if we reach here numbers are <= 9999, so at most 4 chars long @@ -255,15 +238,41 @@ macro_rules! impl_Display { }; f.pad_integral(is_nonnegative, "", buf_slice) } - })*); + + $( + #[stable(feature = "rust1", since = "1.0.0")] + impl fmt::Display for $t { + #[allow(unused_comparisons)] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let is_nonnegative = *self >= 0; + let n = if is_nonnegative { + self.$conv_fn() + } else { + // convert the negative num to positive by summing 1 to it's 2 complement + (!self.$conv_fn()).wrapping_add(1) + }; + $name(n, is_nonnegative, f) + } + })* + }; +} + +// Include wasm32 in here since it doesn't reflect the native pointer size, and +// often cares strongly about getting a smaller code size. +#[cfg(any(target_pointer_width = "64", target_arch = "wasm32"))] +mod imp { + use super::*; + impl_Display!( + i8, u8, i16, u16, i32, u32, i64, u64, usize, isize + as u64 via to_u64 named fmt_u64 + ); +} + +#[cfg(not(any(target_pointer_width = "64", target_arch = "wasm32")))] +mod imp { + use super::*; + impl_Display!(i8, u8, i16, u16, i32, u32, isize, usize as u32 via to_u32 named fmt_u32); + impl_Display!(i64, u64 as u64 via to_u64 named fmt_u64); } -impl_Display!(i8, u8, i16, u16, i32, u32: to_u32); -impl_Display!(i64, u64: to_u64); -impl_Display!(i128, u128: to_u128); -#[cfg(target_pointer_width = "16")] -impl_Display!(isize, usize: to_u16); -#[cfg(target_pointer_width = "32")] -impl_Display!(isize, usize: to_u32); -#[cfg(target_pointer_width = "64")] -impl_Display!(isize, usize: to_u64); +impl_Display!(i128, u128 as u128 via to_u128 named fmt_u128); diff --git a/src/libcore/fmt/rt/v1.rs b/src/libcore/fmt/rt/v1.rs index ec7add9c3759f..826ae36d2d100 100644 --- a/src/libcore/fmt/rt/v1.rs +++ b/src/libcore/fmt/rt/v1.rs @@ -1,13 +1,3 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This is an internal module used by the ifmt! runtime. These structures are //! emitted to static arrays to precompile format strings ahead of time. //! diff --git a/src/libcore/future/future.rs b/src/libcore/future/future.rs index 5dee1d6dd3a39..114a6b9336777 100644 --- a/src/libcore/future/future.rs +++ b/src/libcore/future/future.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![unstable(feature = "futures_api", reason = "futures in libcore are unstable", issue = "50547")] @@ -15,7 +5,7 @@ use marker::Unpin; use ops; use pin::Pin; -use task::{Poll, LocalWaker}; +use task::{Context, Poll}; /// A future represents an asynchronous computation. /// @@ -29,13 +19,15 @@ use task::{Poll, LocalWaker}; /// final value. This method does not block if the value is not ready. Instead, /// the current task is scheduled to be woken up when it's possible to make /// further progress by `poll`ing again. The wake up is performed using -/// `cx.waker()`, a handle for waking up the current task. +/// the `waker` argument of the `poll()` method, which is a handle for waking +/// up the current task. /// /// When using a future, you generally won't call `poll` directly, but instead /// `await!` the value. -#[must_use] +#[doc(spotlight)] +#[must_use = "futures do nothing unless polled"] pub trait Future { - /// The result of the `Future`. + /// The type of value produced on completion. type Output; /// Attempt to resolve the future to a final value, registering @@ -52,16 +44,17 @@ pub trait Future { /// Once a future has finished, clients should not `poll` it again. /// /// When a future is not ready yet, `poll` returns `Poll::Pending` and - /// stores a clone of the [`LocalWaker`] to be woken once the future can - /// make progress. For example, a future waiting for a socket to become - /// readable would call `.clone()` on the [`LocalWaker`] and store it. + /// stores a clone of the [`Waker`] copied from the current [`Context`]. + /// This [`Waker`] is then woken once the future can make progress. + /// For example, a future waiting for a socket to become + /// readable would call `.clone()` on the [`Waker`] and store it. /// When a signal arrives elsewhere indicating that the socket is readable, - /// `[LocalWaker::wake]` is called and the socket future's task is awoken. + /// `[Waker::wake]` is called and the socket future's task is awoken. /// Once a task has been woken up, it should attempt to `poll` the future /// again, which may or may not produce a final value. /// /// Note that on multiple calls to `poll`, only the most recent - /// [`LocalWaker`] passed to `poll` should be scheduled to receive a + /// [`Waker`] passed to `poll` should be scheduled to receive a /// wakeup. /// /// # Runtime characteristics @@ -70,62 +63,54 @@ pub trait Future { /// progress, meaning that each time the current task is woken up, it should /// actively re-`poll` pending futures that it still has an interest in. /// - /// The `poll` function is not called repeatedly in a tight loop-- instead, + /// The `poll` function is not called repeatedly in a tight loop -- instead, /// it should only be called when the future indicates that it is ready to /// make progress (by calling `wake()`). If you're familiar with the /// `poll(2)` or `select(2)` syscalls on Unix it's worth noting that futures /// typically do *not* suffer the same problems of "all wakeups must poll /// all events"; they are more like `epoll(4)`. /// - /// An implementation of `poll` should strive to return quickly, and must - /// *never* block. Returning quickly prevents unnecessarily clogging up + /// An implementation of `poll` should strive to return quickly, and should + /// not block. Returning quickly prevents unnecessarily clogging up /// threads or event loops. If it is known ahead of time that a call to /// `poll` may end up taking awhile, the work should be offloaded to a /// thread pool (or something similar) to ensure that `poll` can return /// quickly. /// - /// # [`LocalWaker`], [`Waker`] and thread-safety - /// - /// The `poll` function takes a [`LocalWaker`], an object which knows how to - /// awaken the current task. [`LocalWaker`] is not `Send` nor `Sync`, so in - /// order to make thread-safe futures the [`LocalWaker::into_waker`] method - /// should be used to convert the [`LocalWaker`] into a thread-safe version. - /// [`LocalWaker::wake`] implementations have the ability to be more - /// efficient, however, so when thread safety is not necessary, - /// [`LocalWaker`] should be preferred. + /// An implementation of `poll` may also never cause memory unsafety. /// /// # Panics /// /// Once a future has completed (returned `Ready` from `poll`), /// then any future calls to `poll` may panic, block forever, or otherwise - /// cause bad behavior. The `Future` trait itself provides no guarantees - /// about the behavior of `poll` after a future has completed. + /// cause any kind of bad behavior except causing memory unsafety. + /// The `Future` trait itself provides no guarantees about the behavior + /// of `poll` after a future has completed. /// /// [`Poll::Pending`]: ../task/enum.Poll.html#variant.Pending /// [`Poll::Ready(val)`]: ../task/enum.Poll.html#variant.Ready - /// [`LocalWaker`]: ../task/struct.LocalWaker.html - /// [`LocalWaker::into_waker`]: ../task/struct.LocalWaker.html#method.into_waker - /// [`LocalWaker::wake`]: ../task/struct.LocalWaker.html#method.wake + /// [`Context`]: ../task/struct.Context.html /// [`Waker`]: ../task/struct.Waker.html - fn poll(self: Pin<&mut Self>, lw: &LocalWaker) -> Poll; + /// [`Waker::wake`]: ../task/struct.Waker.html#method.wake + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll; } -impl<'a, F: ?Sized + Future + Unpin> Future for &'a mut F { +impl Future for &mut F { type Output = F::Output; - fn poll(mut self: Pin<&mut Self>, lw: &LocalWaker) -> Poll { - F::poll(Pin::new(&mut **self), lw) + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + F::poll(Pin::new(&mut **self), cx) } } impl

Future for Pin

where - P: ops::DerefMut, + P: Unpin + ops::DerefMut, P::Target: Future, { type Output = <

::Target as Future>::Output; - fn poll(self: Pin<&mut Self>, lw: &LocalWaker) -> Poll { - Pin::get_mut(self).as_mut().poll(lw) + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + Pin::get_mut(self).as_mut().poll(cx) } } diff --git a/src/libcore/future/mod.rs b/src/libcore/future/mod.rs index 1dc4f361f3adb..6693ecbac41fa 100644 --- a/src/libcore/future/mod.rs +++ b/src/libcore/future/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![unstable(feature = "futures_api", reason = "futures in libcore are unstable", issue = "50547")] diff --git a/src/libcore/hash/mod.rs b/src/libcore/hash/mod.rs index 3e59ee1f8e5f5..d5d29c91e0346 100644 --- a/src/libcore/hash/mod.rs +++ b/src/libcore/hash/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Generic hashing support. //! //! This module provides a generic way to compute the hash of a value. The diff --git a/src/libcore/hash/sip.rs b/src/libcore/hash/sip.rs index e3bdecdc4b1fd..235c79307ab8d 100644 --- a/src/libcore/hash/sip.rs +++ b/src/libcore/hash/sip.rs @@ -1,16 +1,6 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! An implementation of SipHash. -#![allow(deprecated)] +#![allow(deprecated)] // the types in this module are deprecated use marker::PhantomData; use ptr; @@ -20,7 +10,7 @@ use mem; /// An implementation of SipHash 1-3. /// /// This is currently the default hashing function used by standard library -/// (eg. `collections::HashMap` uses it by default). +/// (e.g., `collections::HashMap` uses it by default). /// /// See: #[unstable(feature = "hashmap_internals", issue = "0")] @@ -100,7 +90,7 @@ macro_rules! compress { }); } -/// Load an integer of the desired type from a byte stream, in LE order. Uses +/// Loads an integer of the desired type from a byte stream, in LE order. Uses /// `copy_nonoverlapping` to let the compiler generate the most efficient way /// to load it from a possibly unaligned address. /// @@ -117,7 +107,7 @@ macro_rules! load_int_le { }); } -/// Load an u64 using up to 7 bytes of a byte slice. +/// Loads an u64 using up to 7 bytes of a byte slice. /// /// Unsafe because: unchecked indexing at start..start+len #[inline] diff --git a/src/libcore/hint.rs b/src/libcore/hint.rs index 0bfdd937abd63..d43e6c49f4c99 100644 --- a/src/libcore/hint.rs +++ b/src/libcore/hint.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![stable(feature = "core_hint", since = "1.27.0")] //! Hints to compiler that affects how code should be emitted or optimized. @@ -44,7 +34,7 @@ use intrinsics; /// use std::hint::unreachable_unchecked; /// /// // `b.saturating_add(1)` is always positive (not zero), -/// // hence `checked_div` will never return None. +/// // hence `checked_div` will never return `None`. /// // Therefore, the else branch is unreachable. /// a.checked_div(b.saturating_add(1)) /// .unwrap_or_else(|| unsafe { unreachable_unchecked() }) @@ -59,3 +49,94 @@ use intrinsics; pub unsafe fn unreachable_unchecked() -> ! { intrinsics::unreachable() } + +/// Signals the processor that it is entering a busy-wait spin-loop. +/// +/// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving +/// power or switching hyper-threads. +/// +/// This function is different than [`std::thread::yield_now`] which directly yields to the +/// system's scheduler, whereas `spin_loop` only signals the processor that it is entering a +/// busy-wait spin-loop without yielding control to the system's scheduler. +/// +/// Using a busy-wait spin-loop with `spin_loop` is ideally used in situations where a +/// contended lock is held by another thread executed on a different CPU and where the waiting +/// times are relatively small. Because entering busy-wait spin-loop does not trigger the system's +/// scheduler, no overhead for switching threads occurs. However, if the thread holding the +/// contended lock is running on the same CPU, the spin-loop is likely to occupy an entire CPU slice +/// before switching to the thread that holds the lock. If the contending lock is held by a thread +/// on the same CPU or if the waiting times for acquiring the lock are longer, it is often better to +/// use [`std::thread::yield_now`]. +/// +/// **Note**: On platforms that do not support receiving spin-loop hints this function does not +/// do anything at all. +/// +/// [`std::thread::yield_now`]: ../../std/thread/fn.yield_now.html +#[inline] +#[unstable(feature = "renamed_spin_loop", issue = "55002")] +pub fn spin_loop() { + #[cfg( + all( + any(target_arch = "x86", target_arch = "x86_64"), + target_feature = "sse2" + ) + )] { + #[cfg(target_arch = "x86")] { + unsafe { crate::arch::x86::_mm_pause() }; + } + + #[cfg(target_arch = "x86_64")] { + unsafe { crate::arch::x86_64::_mm_pause() }; + } + } + + #[cfg( + any( + target_arch = "aarch64", + all(target_arch = "arm", target_feature = "v6") + ) + )] { + #[cfg(target_arch = "aarch64")] { + unsafe { crate::arch::aarch64::__yield() }; + } + #[cfg(target_arch = "arm")] { + unsafe { crate::arch::arm::__yield() }; + } + } +} + +/// A function that is opaque to the optimizer, to allow benchmarks to +/// pretend to use outputs to assist in avoiding dead-code +/// elimination. +/// +/// This function is a no-op, and does not even read from `dummy`. +#[inline] +#[unstable(feature = "test", issue = "27812")] +pub fn black_box(dummy: T) -> T { + cfg_if! { + if #[cfg(any( + target_arch = "asmjs", + all( + target_arch = "wasm32", + target_os = "emscripten" + ) + ))] { + #[inline] + unsafe fn black_box_impl(d: T) -> T { + // these targets do not support inline assembly + let ret = crate::ptr::read_volatile(&d); + crate::mem::forget(d); + ret + } + } else { + #[inline] + unsafe fn black_box_impl(d: T) -> T { + // we need to "use" the argument in some way LLVM can't + // introspect. + asm!("" : : "r"(&d)); + d + } + } + } + unsafe { black_box_impl(dummy) } +} diff --git a/src/libcore/internal_macros.rs b/src/libcore/internal_macros.rs index db75f9bf210fc..ee6b7d3db48a6 100644 --- a/src/libcore/internal_macros.rs +++ b/src/libcore/internal_macros.rs @@ -1,14 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - // implements the unary operator "op &T" // based on "op T" where T is expected to be `Copy`able macro_rules! forward_ref_unop { @@ -18,7 +7,7 @@ macro_rules! forward_ref_unop { }; (impl $imp:ident, $method:ident for $t:ty, #[$attr:meta]) => { #[$attr] - impl<'a> $imp for &'a $t { + impl $imp for &$t { type Output = <$t as $imp>::Output; #[inline] @@ -48,21 +37,21 @@ macro_rules! forward_ref_binop { } #[$attr] - impl<'a> $imp<&'a $u> for $t { + impl $imp<&$u> for $t { type Output = <$t as $imp<$u>>::Output; #[inline] - fn $method(self, other: &'a $u) -> <$t as $imp<$u>>::Output { + fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output { $imp::$method(self, *other) } } #[$attr] - impl<'a, 'b> $imp<&'a $u> for &'b $t { + impl $imp<&$u> for &$t { type Output = <$t as $imp<$u>>::Output; #[inline] - fn $method(self, other: &'a $u) -> <$t as $imp<$u>>::Output { + fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output { $imp::$method(*self, *other) } } @@ -78,11 +67,136 @@ macro_rules! forward_ref_op_assign { }; (impl $imp:ident, $method:ident for $t:ty, $u:ty, #[$attr:meta]) => { #[$attr] - impl<'a> $imp<&'a $u> for $t { + impl $imp<&$u> for $t { #[inline] - fn $method(&mut self, other: &'a $u) { + fn $method(&mut self, other: &$u) { $imp::$method(self, *other); } } } } + +/// Create a zero-size type similar to a closure type, but named. +#[unstable(feature = "std_internals", issue = "0")] +macro_rules! impl_fn_for_zst { + ($( + $( #[$attr: meta] )* + // FIXME: when libcore is in the 2018 edition, use `?` repetition in + // $( <$( $li : lifetime ),+> )? + struct $Name: ident impl$( <$( $lifetime : lifetime ),+> )* Fn = + |$( $arg: ident: $ArgTy: ty ),*| -> $ReturnTy: ty + $body: block; + )+) => { + $( + $( #[$attr] )* + struct $Name; + + impl $( <$( $lifetime ),+> )* Fn<($( $ArgTy, )*)> for $Name { + #[inline] + extern "rust-call" fn call(&self, ($( $arg, )*): ($( $ArgTy, )*)) -> $ReturnTy { + $body + } + } + + impl $( <$( $lifetime ),+> )* FnMut<($( $ArgTy, )*)> for $Name { + #[inline] + extern "rust-call" fn call_mut( + &mut self, + ($( $arg, )*): ($( $ArgTy, )*) + ) -> $ReturnTy { + Fn::call(&*self, ($( $arg, )*)) + } + } + + impl $( <$( $lifetime ),+> )* FnOnce<($( $ArgTy, )*)> for $Name { + type Output = $ReturnTy; + + #[inline] + extern "rust-call" fn call_once(self, ($( $arg, )*): ($( $ArgTy, )*)) -> $ReturnTy { + Fn::call(&self, ($( $arg, )*)) + } + } + )+ + } +} + +/// A macro for defining `#[cfg]` if-else statements. +/// +/// The macro provided by this crate, `cfg_if`, is similar to the `if/elif` C +/// preprocessor macro by allowing definition of a cascade of `#[cfg]` cases, +/// emitting the implementation which matches first. +/// +/// This allows you to conveniently provide a long list `#[cfg]`'d blocks of code +/// without having to rewrite each clause multiple times. +/// +/// # Example +/// +/// ``` +/// #[macro_use] +/// extern crate cfg_if; +/// +/// cfg_if! { +/// if #[cfg(unix)] { +/// fn foo() { /* unix specific functionality */ } +/// } else if #[cfg(target_pointer_width = "32")] { +/// fn foo() { /* non-unix, 32-bit functionality */ } +/// } else { +/// fn foo() { /* fallback implementation */ } +/// } +/// } +/// +/// # fn main() {} +/// ``` +macro_rules! cfg_if { + // match if/else chains with a final `else` + ($( + if #[cfg($($meta:meta),*)] { $($it:item)* } + ) else * else { + $($it2:item)* + }) => { + cfg_if! { + @__items + () ; + $( ( ($($meta),*) ($($it)*) ), )* + ( () ($($it2)*) ), + } + }; + + // match if/else chains lacking a final `else` + ( + if #[cfg($($i_met:meta),*)] { $($i_it:item)* } + $( + else if #[cfg($($e_met:meta),*)] { $($e_it:item)* } + )* + ) => { + cfg_if! { + @__items + () ; + ( ($($i_met),*) ($($i_it)*) ), + $( ( ($($e_met),*) ($($e_it)*) ), )* + ( () () ), + } + }; + + // Internal and recursive macro to emit all the items + // + // Collects all the negated cfgs in a list at the beginning and after the + // semicolon is all the remaining items + (@__items ($($not:meta,)*) ; ) => {}; + (@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => { + // Emit all items within one block, applying an approprate #[cfg]. The + // #[cfg] will require all `$m` matchers specified and must also negate + // all previous matchers. + cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* } + + // Recurse to emit all other items in `$rest`, and when we do so add all + // our `$m` matchers to the list of `$not` matchers as future emissions + // will have to negate everything we just matched as well. + cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* } + }; + + // Internal macro to Apply a cfg attribute to a list of items + (@__apply $m:meta, $($it:item)*) => { + $(#[$m] $it)* + }; +} diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs index b94d5b4adcf09..05acd7bd01187 100644 --- a/src/libcore/intrinsics.rs +++ b/src/libcore/intrinsics.rs @@ -1,16 +1,6 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! rustc compiler intrinsics. +//! Compiler intrinsics. //! -//! The corresponding definitions are in librustc_codegen_llvm/intrinsic.rs. +//! The corresponding definitions are in `librustc_codegen_llvm/intrinsic.rs`. //! //! # Volatiles //! @@ -325,35 +315,35 @@ extern "rust-intrinsic" { /// [`AtomicBool::swap`](../../std/sync/atomic/struct.AtomicBool.html#method.swap). pub fn atomic_xchg_relaxed(dst: *mut T, src: T) -> T; - /// Add to the current value, returning the previous value. + /// Adds to the current value, returning the previous value. /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_add` method by passing /// [`Ordering::SeqCst`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicIsize::fetch_add`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_add). pub fn atomic_xadd(dst: *mut T, src: T) -> T; - /// Add to the current value, returning the previous value. + /// Adds to the current value, returning the previous value. /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_add` method by passing /// [`Ordering::Acquire`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicIsize::fetch_add`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_add). pub fn atomic_xadd_acq(dst: *mut T, src: T) -> T; - /// Add to the current value, returning the previous value. + /// Adds to the current value, returning the previous value. /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_add` method by passing /// [`Ordering::Release`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicIsize::fetch_add`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_add). pub fn atomic_xadd_rel(dst: *mut T, src: T) -> T; - /// Add to the current value, returning the previous value. + /// Adds to the current value, returning the previous value. /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_add` method by passing /// [`Ordering::AcqRel`](../../std/sync/atomic/enum.Ordering.html) /// as the `order`. For example, /// [`AtomicIsize::fetch_add`](../../std/sync/atomic/struct.AtomicIsize.html#method.fetch_add). pub fn atomic_xadd_acqrel(dst: *mut T, src: T) -> T; - /// Add to the current value, returning the previous value. + /// Adds to the current value, returning the previous value. /// The stabilized version of this intrinsic is available on the /// `std::sync::atomic` types via the `fetch_add` method by passing /// [`Ordering::Relaxed`](../../std/sync/atomic/enum.Ordering.html) @@ -566,7 +556,7 @@ extern "rust-intrinsic" { pub fn atomic_umax_relaxed(dst: *mut T, src: T) -> T; /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction - /// if supported; otherwise, it is a noop. + /// if supported; otherwise, it is a no-op. /// Prefetches have no effect on the behavior of the program but can change its performance /// characteristics. /// @@ -574,7 +564,7 @@ extern "rust-intrinsic" { /// ranging from (0) - no locality, to (3) - extremely local keep in cache pub fn prefetch_read_data(data: *const T, locality: i32); /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction - /// if supported; otherwise, it is a noop. + /// if supported; otherwise, it is a no-op. /// Prefetches have no effect on the behavior of the program but can change its performance /// characteristics. /// @@ -582,7 +572,7 @@ extern "rust-intrinsic" { /// ranging from (0) - no locality, to (3) - extremely local keep in cache pub fn prefetch_write_data(data: *const T, locality: i32); /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction - /// if supported; otherwise, it is a noop. + /// if supported; otherwise, it is a no-op. /// Prefetches have no effect on the behavior of the program but can change its performance /// characteristics. /// @@ -590,7 +580,7 @@ extern "rust-intrinsic" { /// ranging from (0) - no locality, to (3) - extremely local keep in cache pub fn prefetch_read_instruction(data: *const T, locality: i32); /// The `prefetch` intrinsic is a hint to the code generator to insert a prefetch instruction - /// if supported; otherwise, it is a noop. + /// if supported; otherwise, it is a no-op. /// Prefetches have no effect on the behavior of the program but can change its performance /// characteristics. /// @@ -700,10 +690,14 @@ extern "rust-intrinsic" { /// crate it is invoked in. pub fn type_id() -> u64; + /// A guard for unsafe functions that cannot ever be executed if `T` is uninhabited: + /// This will statically either panic, or do nothing. + pub fn panic_if_uninhabited(); + /// Creates a value initialized to zero. /// /// `init` is unsafe because it returns a zeroed-out datum, - /// which is unsafe unless T is `Copy`. Also, even if T is + /// which is unsafe unless `T` is `Copy`. Also, even if T is /// `Copy`, an all-zero value may not correspond to any legitimate /// state for the type in question. pub fn init() -> T; @@ -743,16 +737,6 @@ extern "rust-intrinsic" { /// /// There are a few things that `transmute` is really useful for. /// - /// Getting the bitpattern of a floating point type (or, more generally, - /// type punning, when `T` and `U` aren't pointers): - /// - /// ``` - /// let bitpattern = unsafe { - /// std::mem::transmute::(1.0) - /// }; - /// assert_eq!(bitpattern, 0x3F800000); - /// ``` - /// /// Turning a pointer into a function pointer. This is *not* portable to /// machines where function pointers and data pointers have different sizes. /// @@ -863,7 +847,7 @@ extern "rust-intrinsic" { /// /// // The no-copy, unsafe way, still using transmute, but not UB. /// // This is equivalent to the original, but safer, and reuses the - /// // same Vec internals. Therefore the new inner type must have the + /// // same `Vec` internals. Therefore, the new inner type must have the /// // exact same size, and the same alignment, as the old type. /// // The same caveats exist for this method as transmute, for /// // the original inner type (`&i32`) to the converted inner type @@ -881,8 +865,8 @@ extern "rust-intrinsic" { /// ``` /// use std::{slice, mem}; /// - /// // There are multiple ways to do this; and there are multiple problems - /// // with the following, transmute, way. + /// // There are multiple ways to do this, and there are multiple problems + /// // with the following (transmute) way. /// fn split_at_mut_transmute(slice: &mut [T], mid: usize) /// -> (&mut [T], &mut [T]) { /// let len = slice.len(); @@ -968,221 +952,6 @@ extern "rust-intrinsic" { /// value is not necessarily valid to be used to actually access memory. pub fn arith_offset(dst: *const T, offset: isize) -> *const T; - /// Copies `count * size_of::()` bytes from `src` to `dst`. The source - /// and destination must *not* overlap. - /// - /// For regions of memory which might overlap, use [`copy`] instead. - /// - /// `copy_nonoverlapping` is semantically equivalent to C's [`memcpy`], but - /// with the argument order swapped. - /// - /// [`copy`]: ./fn.copy.html - /// [`memcpy`]: https://en.cppreference.com/w/c/string/byte/memcpy - /// - /// # Safety - /// - /// Behavior is undefined if any of the following conditions are violated: - /// - /// * `src` must be [valid] for reads of `count * size_of::()` bytes. - /// - /// * `dst` must be [valid] for writes of `count * size_of::()` bytes. - /// - /// * Both `src` and `dst` must be properly aligned. - /// - /// * The region of memory beginning at `src` with a size of `count * - /// size_of::()` bytes must *not* overlap with the region of memory - /// beginning at `dst` with the same size. - /// - /// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of - /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values - /// in the region beginning at `*src` and the region beginning at `*dst` can - /// [violate memory safety][read-ownership]. - /// - /// Note that even if the effectively copied size (`count * size_of::()`) is - /// `0`, the pointers must be non-NULL and properly aligned. - /// - /// [`Copy`]: ../marker/trait.Copy.html - /// [`read`]: ../ptr/fn.read.html - /// [read-ownership]: ../ptr/fn.read.html#ownership-of-the-returned-value - /// [valid]: ../ptr/index.html#safety - /// - /// # Examples - /// - /// Manually implement [`Vec::append`]: - /// - /// ``` - /// use std::ptr; - /// - /// /// Moves all the elements of `src` into `dst`, leaving `src` empty. - /// fn append(dst: &mut Vec, src: &mut Vec) { - /// let src_len = src.len(); - /// let dst_len = dst.len(); - /// - /// // Ensure that `dst` has enough capacity to hold all of `src`. - /// dst.reserve(src_len); - /// - /// unsafe { - /// // The call to offset is always safe because `Vec` will never - /// // allocate more than `isize::MAX` bytes. - /// let dst_ptr = dst.as_mut_ptr().offset(dst_len as isize); - /// let src_ptr = src.as_ptr(); - /// - /// // Truncate `src` without dropping its contents. We do this first, - /// // to avoid problems in case something further down panics. - /// src.set_len(0); - /// - /// // The two regions cannot overlap because mutable references do - /// // not alias, and two different vectors cannot own the same - /// // memory. - /// ptr::copy_nonoverlapping(src_ptr, dst_ptr, src_len); - /// - /// // Notify `dst` that it now holds the contents of `src`. - /// dst.set_len(dst_len + src_len); - /// } - /// } - /// - /// let mut a = vec!['r']; - /// let mut b = vec!['u', 's', 't']; - /// - /// append(&mut a, &mut b); - /// - /// assert_eq!(a, &['r', 'u', 's', 't']); - /// assert!(b.is_empty()); - /// ``` - /// - /// [`Vec::append`]: ../../std/vec/struct.Vec.html#method.append - #[stable(feature = "rust1", since = "1.0.0")] - pub fn copy_nonoverlapping(src: *const T, dst: *mut T, count: usize); - - /// Copies `count * size_of::()` bytes from `src` to `dst`. The source - /// and destination may overlap. - /// - /// If the source and destination will *never* overlap, - /// [`copy_nonoverlapping`] can be used instead. - /// - /// `copy` is semantically equivalent to C's [`memmove`], but with the argument - /// order swapped. Copying takes place as if the bytes were copied from `src` - /// to a temporary array and then copied from the array to `dst`. - /// - /// [`copy_nonoverlapping`]: ./fn.copy_nonoverlapping.html - /// [`memmove`]: https://en.cppreference.com/w/c/string/byte/memmove - /// - /// # Safety - /// - /// Behavior is undefined if any of the following conditions are violated: - /// - /// * `src` must be [valid] for reads of `count * size_of::()` bytes. - /// - /// * `dst` must be [valid] for writes of `count * size_of::()` bytes. - /// - /// * Both `src` and `dst` must be properly aligned. - /// - /// Like [`read`], `copy` creates a bitwise copy of `T`, regardless of - /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the values - /// in the region beginning at `*src` and the region beginning at `*dst` can - /// [violate memory safety][read-ownership]. - /// - /// Note that even if the effectively copied size (`count * size_of::()`) is - /// `0`, the pointers must be non-NULL and properly aligned. - /// - /// [`Copy`]: ../marker/trait.Copy.html - /// [`read`]: ../ptr/fn.read.html - /// [read-ownership]: ../ptr/fn.read.html#ownership-of-the-returned-value - /// [valid]: ../ptr/index.html#safety - /// - /// # Examples - /// - /// Efficiently create a Rust vector from an unsafe buffer: - /// - /// ``` - /// use std::ptr; - /// - /// # #[allow(dead_code)] - /// unsafe fn from_buf_raw(ptr: *const T, elts: usize) -> Vec { - /// let mut dst = Vec::with_capacity(elts); - /// dst.set_len(elts); - /// ptr::copy(ptr, dst.as_mut_ptr(), elts); - /// dst - /// } - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn copy(src: *const T, dst: *mut T, count: usize); - - /// Sets `count * size_of::()` bytes of memory starting at `dst` to - /// `val`. - /// - /// `write_bytes` is similar to C's [`memset`], but sets `count * - /// size_of::()` bytes to `val`. - /// - /// [`memset`]: https://en.cppreference.com/w/c/string/byte/memset - /// - /// # Safety - /// - /// Behavior is undefined if any of the following conditions are violated: - /// - /// * `dst` must be [valid] for writes of `count * size_of::()` bytes. - /// - /// * `dst` must be properly aligned. - /// - /// Additionally, the caller must ensure that writing `count * - /// size_of::()` bytes to the given region of memory results in a valid - /// value of `T`. Using a region of memory typed as a `T` that contains an - /// invalid value of `T` is undefined behavior. - /// - /// Note that even if the effectively copied size (`count * size_of::()`) is - /// `0`, the pointer must be non-NULL and properly aligned. - /// - /// [valid]: ../ptr/index.html#safety - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::ptr; - /// - /// let mut vec = vec![0u32; 4]; - /// unsafe { - /// let vec_ptr = vec.as_mut_ptr(); - /// ptr::write_bytes(vec_ptr, 0xfe, 2); - /// } - /// assert_eq!(vec, [0xfefefefe, 0xfefefefe, 0, 0]); - /// ``` - /// - /// Creating an invalid value: - /// - /// ``` - /// use std::ptr; - /// - /// let mut v = Box::new(0i32); - /// - /// unsafe { - /// // Leaks the previously held value by overwriting the `Box` with - /// // a null pointer. - /// ptr::write_bytes(&mut v as *mut Box, 0, 1); - /// } - /// - /// // At this point, using or dropping `v` results in undefined behavior. - /// // drop(v); // ERROR - /// - /// // Even leaking `v` "uses" it, and hence is undefined behavior. - /// // mem::forget(v); // ERROR - /// - /// // In fact, `v` is invalid according to basic type layout invariants, so *any* - /// // operation touching it is undefined behavior. - /// // let v2 = v; // ERROR - /// - /// unsafe { - /// // Let us instead put in a valid value - /// ptr::write(&mut v as *mut Box, Box::new(42i32)); - /// } - /// - /// // Now the box is fine - /// assert_eq!(*v, 42); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - pub fn write_bytes(dst: *mut T, val: u8, count: usize); - /// Equivalent to the appropriate `llvm.memcpy.p0i8.0i8.*` intrinsic, with /// a size of `count` * `size_of::()` and an alignment of /// `min_align_of::()` @@ -1206,19 +975,19 @@ extern "rust-intrinsic" { /// unless size is equal to zero. pub fn volatile_set_memory(dst: *mut T, val: u8, count: usize); - /// Perform a volatile load from the `src` pointer. + /// Performs a volatile load from the `src` pointer. /// The stabilized version of this intrinsic is /// [`std::ptr::read_volatile`](../../std/ptr/fn.read_volatile.html). pub fn volatile_load(src: *const T) -> T; - /// Perform a volatile store to the `dst` pointer. + /// Performs a volatile store to the `dst` pointer. /// The stabilized version of this intrinsic is /// [`std::ptr::write_volatile`](../../std/ptr/fn.write_volatile.html). pub fn volatile_store(dst: *mut T, val: T); - /// Perform a volatile load from the `src` pointer + /// Performs a volatile load from the `src` pointer /// The pointer is not required to be aligned. pub fn unaligned_volatile_load(src: *const T) -> T; - /// Perform a volatile store to the `dst` pointer. + /// Performs a volatile store to the `dst` pointer. /// The pointer is not required to be aligned. pub fn unaligned_volatile_store(dst: *mut T, val: T); @@ -1353,7 +1122,7 @@ extern "rust-intrinsic" { /// use std::intrinsics::ctlz; /// /// let x = 0b0001_1100_u8; - /// let num_leading = unsafe { ctlz(x) }; + /// let num_leading = ctlz(x); /// assert_eq!(num_leading, 3); /// ``` /// @@ -1365,7 +1134,7 @@ extern "rust-intrinsic" { /// use std::intrinsics::ctlz; /// /// let x = 0u16; - /// let num_leading = unsafe { ctlz(x) }; + /// let num_leading = ctlz(x); /// assert_eq!(num_leading, 16); /// ``` pub fn ctlz(x: T) -> T; @@ -1396,7 +1165,7 @@ extern "rust-intrinsic" { /// use std::intrinsics::cttz; /// /// let x = 0b0011_1000_u8; - /// let num_trailing = unsafe { cttz(x) }; + /// let num_trailing = cttz(x); /// assert_eq!(num_trailing, 3); /// ``` /// @@ -1408,7 +1177,7 @@ extern "rust-intrinsic" { /// use std::intrinsics::cttz; /// /// let x = 0u16; - /// let num_trailing = unsafe { cttz(x) }; + /// let num_trailing = cttz(x); /// assert_eq!(num_trailing, 16); /// ``` pub fn cttz(x: T) -> T; @@ -1499,6 +1268,17 @@ extern "rust-intrinsic" { /// [`std::u32::wrapping_mul`](../../std/primitive.u32.html#method.wrapping_mul) pub fn overflowing_mul(a: T, b: T) -> T; + /// Computes `a + b`, while saturating at numeric bounds. + /// The stabilized versions of this intrinsic are available on the integer + /// primitives via the `saturating_add` method. For example, + /// [`std::u32::saturating_add`](../../std/primitive.u32.html#method.saturating_add) + pub fn saturating_add(a: T, b: T) -> T; + /// Computes `a - b`, while saturating at numeric bounds. + /// The stabilized versions of this intrinsic are available on the integer + /// primitives via the `saturating_sub` method. For example, + /// [`std::u32::saturating_sub`](../../std/primitive.u32.html#method.saturating_sub) + pub fn saturating_sub(a: T, b: T) -> T; + /// Returns the value of the discriminant for the variant in 'v', /// cast to a `u64`; if `T` has no discriminant, returns 0. pub fn discriminant_value(v: &T) -> u64; @@ -1517,3 +1297,252 @@ extern "rust-intrinsic" { /// Probably will never become stable. pub fn nontemporal_store(ptr: *mut T, val: T); } + +mod real_intrinsics { + extern "rust-intrinsic" { + /// Copies `count * size_of::()` bytes from `src` to `dst`. The source + /// and destination must *not* overlap. + /// For the full docs, see the stabilized wrapper [`copy_nonoverlapping`]. + /// + /// [`copy_nonoverlapping`]: ../../std/ptr/fn.copy_nonoverlapping.html + pub fn copy_nonoverlapping(src: *const T, dst: *mut T, count: usize); + + /// Copies `count * size_of::()` bytes from `src` to `dst`. The source + /// and destination may overlap. + /// For the full docs, see the stabilized wrapper [`copy`]. + /// + /// [`copy`]: ../../std/ptr/fn.copy.html + pub fn copy(src: *const T, dst: *mut T, count: usize); + + /// Sets `count * size_of::()` bytes of memory starting at `dst` to + /// `val`. + /// For the full docs, see the stabilized wrapper [`write_bytes`]. + /// + /// [`write_bytes`]: ../../std/ptr/fn.write_bytes.html + pub fn write_bytes(dst: *mut T, val: u8, count: usize); + } +} + +/// Copies `count * size_of::()` bytes from `src` to `dst`. The source +/// and destination must *not* overlap. +/// +/// For regions of memory which might overlap, use [`copy`] instead. +/// +/// `copy_nonoverlapping` is semantically equivalent to C's [`memcpy`], but +/// with the argument order swapped. +/// +/// [`copy`]: ./fn.copy.html +/// [`memcpy`]: https://en.cppreference.com/w/c/string/byte/memcpy +/// +/// # Safety +/// +/// Behavior is undefined if any of the following conditions are violated: +/// +/// * `src` must be [valid] for reads of `count * size_of::()` bytes. +/// +/// * `dst` must be [valid] for writes of `count * size_of::()` bytes. +/// +/// * Both `src` and `dst` must be properly aligned. +/// +/// * The region of memory beginning at `src` with a size of `count * +/// size_of::()` bytes must *not* overlap with the region of memory +/// beginning at `dst` with the same size. +/// +/// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of +/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values +/// in the region beginning at `*src` and the region beginning at `*dst` can +/// [violate memory safety][read-ownership]. +/// +/// Note that even if the effectively copied size (`count * size_of::()`) is +/// `0`, the pointers must be non-NULL and properly aligned. +/// +/// [`Copy`]: ../marker/trait.Copy.html +/// [`read`]: ../ptr/fn.read.html +/// [read-ownership]: ../ptr/fn.read.html#ownership-of-the-returned-value +/// [valid]: ../ptr/index.html#safety +/// +/// # Examples +/// +/// Manually implement [`Vec::append`]: +/// +/// ``` +/// use std::ptr; +/// +/// /// Moves all the elements of `src` into `dst`, leaving `src` empty. +/// fn append(dst: &mut Vec, src: &mut Vec) { +/// let src_len = src.len(); +/// let dst_len = dst.len(); +/// +/// // Ensure that `dst` has enough capacity to hold all of `src`. +/// dst.reserve(src_len); +/// +/// unsafe { +/// // The call to offset is always safe because `Vec` will never +/// // allocate more than `isize::MAX` bytes. +/// let dst_ptr = dst.as_mut_ptr().offset(dst_len as isize); +/// let src_ptr = src.as_ptr(); +/// +/// // Truncate `src` without dropping its contents. We do this first, +/// // to avoid problems in case something further down panics. +/// src.set_len(0); +/// +/// // The two regions cannot overlap because mutable references do +/// // not alias, and two different vectors cannot own the same +/// // memory. +/// ptr::copy_nonoverlapping(src_ptr, dst_ptr, src_len); +/// +/// // Notify `dst` that it now holds the contents of `src`. +/// dst.set_len(dst_len + src_len); +/// } +/// } +/// +/// let mut a = vec!['r']; +/// let mut b = vec!['u', 's', 't']; +/// +/// append(&mut a, &mut b); +/// +/// assert_eq!(a, &['r', 'u', 's', 't']); +/// assert!(b.is_empty()); +/// ``` +/// +/// [`Vec::append`]: ../../std/vec/struct.Vec.html#method.append +#[stable(feature = "rust1", since = "1.0.0")] +#[inline] +pub unsafe fn copy_nonoverlapping(src: *const T, dst: *mut T, count: usize) { + real_intrinsics::copy_nonoverlapping(src, dst, count); +} + +/// Copies `count * size_of::()` bytes from `src` to `dst`. The source +/// and destination may overlap. +/// +/// If the source and destination will *never* overlap, +/// [`copy_nonoverlapping`] can be used instead. +/// +/// `copy` is semantically equivalent to C's [`memmove`], but with the argument +/// order swapped. Copying takes place as if the bytes were copied from `src` +/// to a temporary array and then copied from the array to `dst`. +/// +/// [`copy_nonoverlapping`]: ./fn.copy_nonoverlapping.html +/// [`memmove`]: https://en.cppreference.com/w/c/string/byte/memmove +/// +/// # Safety +/// +/// Behavior is undefined if any of the following conditions are violated: +/// +/// * `src` must be [valid] for reads of `count * size_of::()` bytes. +/// +/// * `dst` must be [valid] for writes of `count * size_of::()` bytes. +/// +/// * Both `src` and `dst` must be properly aligned. +/// +/// Like [`read`], `copy` creates a bitwise copy of `T`, regardless of +/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the values +/// in the region beginning at `*src` and the region beginning at `*dst` can +/// [violate memory safety][read-ownership]. +/// +/// Note that even if the effectively copied size (`count * size_of::()`) is +/// `0`, the pointers must be non-NULL and properly aligned. +/// +/// [`Copy`]: ../marker/trait.Copy.html +/// [`read`]: ../ptr/fn.read.html +/// [read-ownership]: ../ptr/fn.read.html#ownership-of-the-returned-value +/// [valid]: ../ptr/index.html#safety +/// +/// # Examples +/// +/// Efficiently create a Rust vector from an unsafe buffer: +/// +/// ``` +/// use std::ptr; +/// +/// # #[allow(dead_code)] +/// unsafe fn from_buf_raw(ptr: *const T, elts: usize) -> Vec { +/// let mut dst = Vec::with_capacity(elts); +/// dst.set_len(elts); +/// ptr::copy(ptr, dst.as_mut_ptr(), elts); +/// dst +/// } +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +#[inline] +pub unsafe fn copy(src: *const T, dst: *mut T, count: usize) { + real_intrinsics::copy(src, dst, count) +} + +/// Sets `count * size_of::()` bytes of memory starting at `dst` to +/// `val`. +/// +/// `write_bytes` is similar to C's [`memset`], but sets `count * +/// size_of::()` bytes to `val`. +/// +/// [`memset`]: https://en.cppreference.com/w/c/string/byte/memset +/// +/// # Safety +/// +/// Behavior is undefined if any of the following conditions are violated: +/// +/// * `dst` must be [valid] for writes of `count * size_of::()` bytes. +/// +/// * `dst` must be properly aligned. +/// +/// Additionally, the caller must ensure that writing `count * +/// size_of::()` bytes to the given region of memory results in a valid +/// value of `T`. Using a region of memory typed as a `T` that contains an +/// invalid value of `T` is undefined behavior. +/// +/// Note that even if the effectively copied size (`count * size_of::()`) is +/// `0`, the pointer must be non-NULL and properly aligned. +/// +/// [valid]: ../ptr/index.html#safety +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::ptr; +/// +/// let mut vec = vec![0u32; 4]; +/// unsafe { +/// let vec_ptr = vec.as_mut_ptr(); +/// ptr::write_bytes(vec_ptr, 0xfe, 2); +/// } +/// assert_eq!(vec, [0xfefefefe, 0xfefefefe, 0, 0]); +/// ``` +/// +/// Creating an invalid value: +/// +/// ``` +/// use std::ptr; +/// +/// let mut v = Box::new(0i32); +/// +/// unsafe { +/// // Leaks the previously held value by overwriting the `Box` with +/// // a null pointer. +/// ptr::write_bytes(&mut v as *mut Box, 0, 1); +/// } +/// +/// // At this point, using or dropping `v` results in undefined behavior. +/// // drop(v); // ERROR +/// +/// // Even leaking `v` "uses" it, and hence is undefined behavior. +/// // mem::forget(v); // ERROR +/// +/// // In fact, `v` is invalid according to basic type layout invariants, so *any* +/// // operation touching it is undefined behavior. +/// // let v2 = v; // ERROR +/// +/// unsafe { +/// // Let us instead put in a valid value +/// ptr::write(&mut v as *mut Box, Box::new(42i32)); +/// } +/// +/// // Now the box is fine +/// assert_eq!(*v, 42); +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +#[inline] +pub unsafe fn write_bytes(dst: *mut T, val: u8, count: usize) { + real_intrinsics::write_bytes(dst, val, count) +} diff --git a/src/libcore/iter/adapters/chain.rs b/src/libcore/iter/adapters/chain.rs new file mode 100644 index 0000000000000..573b096fb463e --- /dev/null +++ b/src/libcore/iter/adapters/chain.rs @@ -0,0 +1,260 @@ +use ops::Try; +use usize; +use super::super::{Iterator, DoubleEndedIterator, FusedIterator, TrustedLen}; + +/// An iterator that strings two iterators together. +/// +/// This `struct` is created by the [`chain`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`chain`]: trait.Iterator.html#method.chain +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Chain { + a: A, + b: B, + state: ChainState, +} +impl Chain { + pub(in super::super) fn new(a: A, b: B) -> Chain { + Chain { a, b, state: ChainState::Both } + } +} + +// The iterator protocol specifies that iteration ends with the return value +// `None` from `.next()` (or `.next_back()`) and it is unspecified what +// further calls return. The chain adaptor must account for this since it uses +// two subiterators. +// +// It uses three states: +// +// - Both: `a` and `b` are remaining +// - Front: `a` remaining +// - Back: `b` remaining +// +// The fourth state (neither iterator is remaining) only occurs after Chain has +// returned None once, so we don't need to store this state. +#[derive(Clone, Debug)] +enum ChainState { + // both front and back iterator are remaining + Both, + // only front is remaining + Front, + // only back is remaining + Back, +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Chain where + A: Iterator, + B: Iterator +{ + type Item = A::Item; + + #[inline] + fn next(&mut self) -> Option { + match self.state { + ChainState::Both => match self.a.next() { + elt @ Some(..) => elt, + None => { + self.state = ChainState::Back; + self.b.next() + } + }, + ChainState::Front => self.a.next(), + ChainState::Back => self.b.next(), + } + } + + #[inline] + #[rustc_inherit_overflow_checks] + fn count(self) -> usize { + match self.state { + ChainState::Both => self.a.count() + self.b.count(), + ChainState::Front => self.a.count(), + ChainState::Back => self.b.count(), + } + } + + fn try_fold(&mut self, init: Acc, mut f: F) -> R where + Self: Sized, F: FnMut(Acc, Self::Item) -> R, R: Try + { + let mut accum = init; + match self.state { + ChainState::Both | ChainState::Front => { + accum = self.a.try_fold(accum, &mut f)?; + if let ChainState::Both = self.state { + self.state = ChainState::Back; + } + } + _ => { } + } + if let ChainState::Back = self.state { + accum = self.b.try_fold(accum, &mut f)?; + } + Try::from_ok(accum) + } + + fn fold(self, init: Acc, mut f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + let mut accum = init; + match self.state { + ChainState::Both | ChainState::Front => { + accum = self.a.fold(accum, &mut f); + } + _ => { } + } + match self.state { + ChainState::Both | ChainState::Back => { + accum = self.b.fold(accum, &mut f); + } + _ => { } + } + accum + } + + #[inline] + fn nth(&mut self, mut n: usize) -> Option { + match self.state { + ChainState::Both | ChainState::Front => { + for x in self.a.by_ref() { + if n == 0 { + return Some(x) + } + n -= 1; + } + if let ChainState::Both = self.state { + self.state = ChainState::Back; + } + } + ChainState::Back => {} + } + if let ChainState::Back = self.state { + self.b.nth(n) + } else { + None + } + } + + #[inline] + fn find

(&mut self, mut predicate: P) -> Option where + P: FnMut(&Self::Item) -> bool, + { + match self.state { + ChainState::Both => match self.a.find(&mut predicate) { + None => { + self.state = ChainState::Back; + self.b.find(predicate) + } + v => v + }, + ChainState::Front => self.a.find(predicate), + ChainState::Back => self.b.find(predicate), + } + } + + #[inline] + fn last(self) -> Option { + match self.state { + ChainState::Both => { + // Must exhaust a before b. + let a_last = self.a.last(); + let b_last = self.b.last(); + b_last.or(a_last) + }, + ChainState::Front => self.a.last(), + ChainState::Back => self.b.last() + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (a_lower, a_upper) = self.a.size_hint(); + let (b_lower, b_upper) = self.b.size_hint(); + + let lower = a_lower.saturating_add(b_lower); + + let upper = match (a_upper, b_upper) { + (Some(x), Some(y)) => x.checked_add(y), + _ => None + }; + + (lower, upper) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Chain where + A: DoubleEndedIterator, + B: DoubleEndedIterator, +{ + #[inline] + fn next_back(&mut self) -> Option { + match self.state { + ChainState::Both => match self.b.next_back() { + elt @ Some(..) => elt, + None => { + self.state = ChainState::Front; + self.a.next_back() + } + }, + ChainState::Front => self.a.next_back(), + ChainState::Back => self.b.next_back(), + } + } + + fn try_rfold(&mut self, init: Acc, mut f: F) -> R where + Self: Sized, F: FnMut(Acc, Self::Item) -> R, R: Try + { + let mut accum = init; + match self.state { + ChainState::Both | ChainState::Back => { + accum = self.b.try_rfold(accum, &mut f)?; + if let ChainState::Both = self.state { + self.state = ChainState::Front; + } + } + _ => { } + } + if let ChainState::Front = self.state { + accum = self.a.try_rfold(accum, &mut f)?; + } + Try::from_ok(accum) + } + + fn rfold(self, init: Acc, mut f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + let mut accum = init; + match self.state { + ChainState::Both | ChainState::Back => { + accum = self.b.rfold(accum, &mut f); + } + _ => { } + } + match self.state { + ChainState::Both | ChainState::Front => { + accum = self.a.rfold(accum, &mut f); + } + _ => { } + } + accum + } + +} + +// Note: *both* must be fused to handle double-ended iterators. +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Chain + where A: FusedIterator, + B: FusedIterator, +{} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Chain + where A: TrustedLen, B: TrustedLen, +{} + diff --git a/src/libcore/iter/adapters/flatten.rs b/src/libcore/iter/adapters/flatten.rs new file mode 100644 index 0000000000000..40f6865d38bcf --- /dev/null +++ b/src/libcore/iter/adapters/flatten.rs @@ -0,0 +1,330 @@ +use fmt; +use ops::Try; +use super::super::{Iterator, DoubleEndedIterator, FusedIterator}; +use super::Map; + +/// An iterator that maps each element to an iterator, and yields the elements +/// of the produced iterators. +/// +/// This `struct` is created by the [`flat_map`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`flat_map`]: trait.Iterator.html#method.flat_map +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct FlatMap { + inner: FlattenCompat, ::IntoIter> +} +impl U> FlatMap { + pub(in super::super) fn new(iter: I, f: F) -> FlatMap { + FlatMap { inner: FlattenCompat::new(iter.map(f)) } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Clone for FlatMap + where ::IntoIter: Clone +{ + fn clone(&self) -> Self { FlatMap { inner: self.inner.clone() } } +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for FlatMap + where U::IntoIter: fmt::Debug +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("FlatMap").field("inner", &self.inner).finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for FlatMap + where F: FnMut(I::Item) -> U, +{ + type Item = U::Item; + + #[inline] + fn next(&mut self) -> Option { self.inner.next() } + + #[inline] + fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + self.inner.try_fold(init, fold) + } + + #[inline] + fn fold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.inner.fold(init, fold) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for FlatMap + where F: FnMut(I::Item) -> U, + U: IntoIterator, + U::IntoIter: DoubleEndedIterator +{ + #[inline] + fn next_back(&mut self) -> Option { self.inner.next_back() } + + #[inline] + fn try_rfold(&mut self, init: Acc, fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + self.inner.try_rfold(init, fold) + } + + #[inline] + fn rfold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.inner.rfold(init, fold) + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for FlatMap + where I: FusedIterator, U: IntoIterator, F: FnMut(I::Item) -> U {} + +/// An iterator that flattens one level of nesting in an iterator of things +/// that can be turned into iterators. +/// +/// This `struct` is created by the [`flatten`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`flatten`]: trait.Iterator.html#method.flatten +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "iterator_flatten", since = "1.29.0")] +pub struct Flatten +where I::Item: IntoIterator { + inner: FlattenCompat::IntoIter>, +} +impl Flatten +where I::Item: IntoIterator { + pub(in super::super) fn new(iter: I) -> Flatten { + Flatten { inner: FlattenCompat::new(iter) } + } +} + +#[stable(feature = "iterator_flatten", since = "1.29.0")] +impl fmt::Debug for Flatten + where I: Iterator + fmt::Debug, U: Iterator + fmt::Debug, + I::Item: IntoIterator, +{ + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Flatten").field("inner", &self.inner).finish() + } +} + +#[stable(feature = "iterator_flatten", since = "1.29.0")] +impl Clone for Flatten + where I: Iterator + Clone, U: Iterator + Clone, + I::Item: IntoIterator, +{ + fn clone(&self) -> Self { Flatten { inner: self.inner.clone() } } +} + +#[stable(feature = "iterator_flatten", since = "1.29.0")] +impl Iterator for Flatten + where I: Iterator, U: Iterator, + I::Item: IntoIterator +{ + type Item = U::Item; + + #[inline] + fn next(&mut self) -> Option { self.inner.next() } + + #[inline] + fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + self.inner.try_fold(init, fold) + } + + #[inline] + fn fold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.inner.fold(init, fold) + } +} + +#[stable(feature = "iterator_flatten", since = "1.29.0")] +impl DoubleEndedIterator for Flatten + where I: DoubleEndedIterator, U: DoubleEndedIterator, + I::Item: IntoIterator +{ + #[inline] + fn next_back(&mut self) -> Option { self.inner.next_back() } + + #[inline] + fn try_rfold(&mut self, init: Acc, fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + self.inner.try_rfold(init, fold) + } + + #[inline] + fn rfold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.inner.rfold(init, fold) + } +} + +#[stable(feature = "iterator_flatten", since = "1.29.0")] +impl FusedIterator for Flatten + where I: FusedIterator, U: Iterator, + I::Item: IntoIterator {} + +/// Real logic of both `Flatten` and `FlatMap` which simply delegate to +/// this type. +#[derive(Clone, Debug)] +struct FlattenCompat { + iter: I, + frontiter: Option, + backiter: Option, +} +impl FlattenCompat { + /// Adapts an iterator by flattening it, for use in `flatten()` and `flat_map()`. + fn new(iter: I) -> FlattenCompat { + FlattenCompat { iter, frontiter: None, backiter: None } + } +} + +impl Iterator for FlattenCompat + where I: Iterator, U: Iterator, + I::Item: IntoIterator +{ + type Item = U::Item; + + #[inline] + fn next(&mut self) -> Option { + loop { + if let Some(ref mut inner) = self.frontiter { + if let elt@Some(_) = inner.next() { return elt } + } + match self.iter.next() { + None => return self.backiter.as_mut().and_then(|it| it.next()), + Some(inner) => self.frontiter = Some(inner.into_iter()), + } + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (flo, fhi) = self.frontiter.as_ref().map_or((0, Some(0)), |it| it.size_hint()); + let (blo, bhi) = self.backiter.as_ref().map_or((0, Some(0)), |it| it.size_hint()); + let lo = flo.saturating_add(blo); + match (self.iter.size_hint(), fhi, bhi) { + ((0, Some(0)), Some(a), Some(b)) => (lo, a.checked_add(b)), + _ => (lo, None) + } + } + + #[inline] + fn try_fold(&mut self, mut init: Acc, mut fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + if let Some(ref mut front) = self.frontiter { + init = front.try_fold(init, &mut fold)?; + } + self.frontiter = None; + + { + let frontiter = &mut self.frontiter; + init = self.iter.try_fold(init, |acc, x| { + let mut mid = x.into_iter(); + let r = mid.try_fold(acc, &mut fold); + *frontiter = Some(mid); + r + })?; + } + self.frontiter = None; + + if let Some(ref mut back) = self.backiter { + init = back.try_fold(init, &mut fold)?; + } + self.backiter = None; + + Try::from_ok(init) + } + + #[inline] + fn fold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.frontiter.into_iter() + .chain(self.iter.map(IntoIterator::into_iter)) + .chain(self.backiter) + .fold(init, |acc, iter| iter.fold(acc, &mut fold)) + } +} + +impl DoubleEndedIterator for FlattenCompat + where I: DoubleEndedIterator, U: DoubleEndedIterator, + I::Item: IntoIterator +{ + #[inline] + fn next_back(&mut self) -> Option { + loop { + if let Some(ref mut inner) = self.backiter { + if let elt@Some(_) = inner.next_back() { return elt } + } + match self.iter.next_back() { + None => return self.frontiter.as_mut().and_then(|it| it.next_back()), + next => self.backiter = next.map(IntoIterator::into_iter), + } + } + } + + #[inline] + fn try_rfold(&mut self, mut init: Acc, mut fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + if let Some(ref mut back) = self.backiter { + init = back.try_rfold(init, &mut fold)?; + } + self.backiter = None; + + { + let backiter = &mut self.backiter; + init = self.iter.try_rfold(init, |acc, x| { + let mut mid = x.into_iter(); + let r = mid.try_rfold(acc, &mut fold); + *backiter = Some(mid); + r + })?; + } + self.backiter = None; + + if let Some(ref mut front) = self.frontiter { + init = front.try_rfold(init, &mut fold)?; + } + self.frontiter = None; + + Try::from_ok(init) + } + + #[inline] + fn rfold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.frontiter.into_iter() + .chain(self.iter.map(IntoIterator::into_iter)) + .chain(self.backiter) + .rfold(init, |acc, iter| iter.rfold(acc, &mut fold)) + } +} + diff --git a/src/libcore/iter/adapters/mod.rs b/src/libcore/iter/adapters/mod.rs new file mode 100644 index 0000000000000..cccd51b577930 --- /dev/null +++ b/src/libcore/iter/adapters/mod.rs @@ -0,0 +1,2022 @@ +use cmp; +use fmt; +use ops::Try; +use usize; +use intrinsics; +use super::{Iterator, DoubleEndedIterator, ExactSizeIterator, FusedIterator, TrustedLen}; +use super::LoopState; + +mod chain; +mod flatten; +mod zip; + +pub use self::chain::Chain; +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::flatten::{FlatMap, Flatten}; +pub use self::zip::Zip; +pub(crate) use self::zip::TrustedRandomAccess; + +/// A double-ended iterator with the direction inverted. +/// +/// This `struct` is created by the [`rev`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`rev`]: trait.Iterator.html#method.rev +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Rev { + iter: T +} +impl Rev { + pub(super) fn new(iter: T) -> Rev { + Rev { iter } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Rev where I: DoubleEndedIterator { + type Item = ::Item; + + #[inline] + fn next(&mut self) -> Option<::Item> { self.iter.next_back() } + #[inline] + fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } + + #[inline] + fn nth(&mut self, n: usize) -> Option<::Item> { self.iter.nth_back(n) } + + fn try_fold(&mut self, init: B, f: F) -> R where + Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try + { + self.iter.try_rfold(init, f) + } + + fn fold(self, init: Acc, f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.rfold(init, f) + } + + #[inline] + fn find

(&mut self, predicate: P) -> Option + where P: FnMut(&Self::Item) -> bool + { + self.iter.rfind(predicate) + } + + #[inline] + fn rposition

(&mut self, predicate: P) -> Option where + P: FnMut(Self::Item) -> bool + { + self.iter.position(predicate) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Rev where I: DoubleEndedIterator { + #[inline] + fn next_back(&mut self) -> Option<::Item> { self.iter.next() } + + #[inline] + fn nth_back(&mut self, n: usize) -> Option<::Item> { self.iter.nth(n) } + + fn try_rfold(&mut self, init: B, f: F) -> R where + Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try + { + self.iter.try_fold(init, f) + } + + fn rfold(self, init: Acc, f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.fold(init, f) + } + + fn rfind

(&mut self, predicate: P) -> Option + where P: FnMut(&Self::Item) -> bool + { + self.iter.find(predicate) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Rev + where I: ExactSizeIterator + DoubleEndedIterator +{ + fn len(&self) -> usize { + self.iter.len() + } + + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Rev + where I: FusedIterator + DoubleEndedIterator {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Rev + where I: TrustedLen + DoubleEndedIterator {} + +/// An iterator that copies the elements of an underlying iterator. +/// +/// This `struct` is created by the [`copied`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`copied`]: trait.Iterator.html#method.copied +/// [`Iterator`]: trait.Iterator.html +#[unstable(feature = "iter_copied", issue = "57127")] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[derive(Clone, Debug)] +pub struct Copied { + it: I, +} +impl Copied { + pub(super) fn new(it: I) -> Copied { + Copied { it } + } +} + +#[unstable(feature = "iter_copied", issue = "57127")] +impl<'a, I, T: 'a> Iterator for Copied + where I: Iterator, T: Copy +{ + type Item = T; + + fn next(&mut self) -> Option { + self.it.next().copied() + } + + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } + + fn try_fold(&mut self, init: B, mut f: F) -> R where + Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try + { + self.it.try_fold(init, move |acc, &elt| f(acc, elt)) + } + + fn fold(self, init: Acc, mut f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + self.it.fold(init, move |acc, &elt| f(acc, elt)) + } +} + +#[unstable(feature = "iter_copied", issue = "57127")] +impl<'a, I, T: 'a> DoubleEndedIterator for Copied + where I: DoubleEndedIterator, T: Copy +{ + fn next_back(&mut self) -> Option { + self.it.next_back().copied() + } + + fn try_rfold(&mut self, init: B, mut f: F) -> R where + Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try + { + self.it.try_rfold(init, move |acc, &elt| f(acc, elt)) + } + + fn rfold(self, init: Acc, mut f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + self.it.rfold(init, move |acc, &elt| f(acc, elt)) + } +} + +#[unstable(feature = "iter_copied", issue = "57127")] +impl<'a, I, T: 'a> ExactSizeIterator for Copied + where I: ExactSizeIterator, T: Copy +{ + fn len(&self) -> usize { + self.it.len() + } + + fn is_empty(&self) -> bool { + self.it.is_empty() + } +} + +#[unstable(feature = "iter_copied", issue = "57127")] +impl<'a, I, T: 'a> FusedIterator for Copied + where I: FusedIterator, T: Copy +{} + +#[doc(hidden)] +unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Copied + where I: TrustedRandomAccess, T: Copy +{ + unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { + *self.it.get_unchecked(i) + } + + #[inline] + fn may_have_side_effect() -> bool { + I::may_have_side_effect() + } +} + +#[unstable(feature = "iter_copied", issue = "57127")] +unsafe impl<'a, I, T: 'a> TrustedLen for Copied + where I: TrustedLen, + T: Copy +{} + +/// An iterator that clones the elements of an underlying iterator. +/// +/// This `struct` is created by the [`cloned`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`cloned`]: trait.Iterator.html#method.cloned +/// [`Iterator`]: trait.Iterator.html +#[stable(feature = "iter_cloned", since = "1.1.0")] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[derive(Clone, Debug)] +pub struct Cloned { + it: I, +} +impl Cloned { + pub(super) fn new(it: I) -> Cloned { + Cloned { it } + } +} + +#[stable(feature = "iter_cloned", since = "1.1.0")] +impl<'a, I, T: 'a> Iterator for Cloned + where I: Iterator, T: Clone +{ + type Item = T; + + fn next(&mut self) -> Option { + self.it.next().cloned() + } + + fn size_hint(&self) -> (usize, Option) { + self.it.size_hint() + } + + fn try_fold(&mut self, init: B, mut f: F) -> R where + Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try + { + self.it.try_fold(init, move |acc, elt| f(acc, elt.clone())) + } + + fn fold(self, init: Acc, mut f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + self.it.fold(init, move |acc, elt| f(acc, elt.clone())) + } +} + +#[stable(feature = "iter_cloned", since = "1.1.0")] +impl<'a, I, T: 'a> DoubleEndedIterator for Cloned + where I: DoubleEndedIterator, T: Clone +{ + fn next_back(&mut self) -> Option { + self.it.next_back().cloned() + } + + fn try_rfold(&mut self, init: B, mut f: F) -> R where + Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try + { + self.it.try_rfold(init, move |acc, elt| f(acc, elt.clone())) + } + + fn rfold(self, init: Acc, mut f: F) -> Acc + where F: FnMut(Acc, Self::Item) -> Acc, + { + self.it.rfold(init, move |acc, elt| f(acc, elt.clone())) + } +} + +#[stable(feature = "iter_cloned", since = "1.1.0")] +impl<'a, I, T: 'a> ExactSizeIterator for Cloned + where I: ExactSizeIterator, T: Clone +{ + fn len(&self) -> usize { + self.it.len() + } + + fn is_empty(&self) -> bool { + self.it.is_empty() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl<'a, I, T: 'a> FusedIterator for Cloned + where I: FusedIterator, T: Clone +{} + +#[doc(hidden)] +unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Cloned + where I: TrustedRandomAccess, T: Clone +{ + default unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { + self.it.get_unchecked(i).clone() + } + + #[inline] + default fn may_have_side_effect() -> bool { true } +} + +#[doc(hidden)] +unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Cloned + where I: TrustedRandomAccess, T: Copy +{ + unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { + *self.it.get_unchecked(i) + } + + #[inline] + fn may_have_side_effect() -> bool { + I::may_have_side_effect() + } +} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl<'a, I, T: 'a> TrustedLen for Cloned + where I: TrustedLen, + T: Clone +{} + +/// An iterator that repeats endlessly. +/// +/// This `struct` is created by the [`cycle`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`cycle`]: trait.Iterator.html#method.cycle +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Cycle { + orig: I, + iter: I, +} +impl Cycle { + pub(super) fn new(iter: I) -> Cycle { + Cycle { orig: iter.clone(), iter } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Cycle where I: Clone + Iterator { + type Item = ::Item; + + #[inline] + fn next(&mut self) -> Option<::Item> { + match self.iter.next() { + None => { self.iter = self.orig.clone(); self.iter.next() } + y => y + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + // the cycle iterator is either empty or infinite + match self.orig.size_hint() { + sz @ (0, Some(0)) => sz, + (0, _) => (0, None), + _ => (usize::MAX, None) + } + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Cycle where I: Clone + Iterator {} + +/// An iterator for stepping iterators by a custom amount. +/// +/// This `struct` is created by the [`step_by`] method on [`Iterator`]. See +/// its documentation for more. +/// +/// [`step_by`]: trait.Iterator.html#method.step_by +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "iterator_step_by", since = "1.28.0")] +#[derive(Clone, Debug)] +pub struct StepBy { + iter: I, + step: usize, + first_take: bool, +} +impl StepBy { + pub(super) fn new(iter: I, step: usize) -> StepBy { + assert!(step != 0); + StepBy { iter, step: step - 1, first_take: true } + } +} + +#[stable(feature = "iterator_step_by", since = "1.28.0")] +impl Iterator for StepBy where I: Iterator { + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + if self.first_take { + self.first_take = false; + self.iter.next() + } else { + self.iter.nth(self.step) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let inner_hint = self.iter.size_hint(); + + if self.first_take { + let f = |n| if n == 0 { 0 } else { 1 + (n-1)/(self.step+1) }; + (f(inner_hint.0), inner_hint.1.map(f)) + } else { + let f = |n| n / (self.step+1); + (f(inner_hint.0), inner_hint.1.map(f)) + } + } + + #[inline] + fn nth(&mut self, mut n: usize) -> Option { + if self.first_take { + self.first_take = false; + let first = self.iter.next(); + if n == 0 { + return first; + } + n -= 1; + } + // n and self.step are indices, we need to add 1 to get the amount of elements + // When calling `.nth`, we need to subtract 1 again to convert back to an index + // step + 1 can't overflow because `.step_by` sets `self.step` to `step - 1` + let mut step = self.step + 1; + // n + 1 could overflow + // thus, if n is usize::MAX, instead of adding one, we call .nth(step) + if n == usize::MAX { + self.iter.nth(step - 1); + } else { + n += 1; + } + + // overflow handling + loop { + let mul = n.checked_mul(step); + if unsafe { intrinsics::likely(mul.is_some()) } { + return self.iter.nth(mul.unwrap() - 1); + } + let div_n = usize::MAX / n; + let div_step = usize::MAX / step; + let nth_n = div_n * n; + let nth_step = div_step * step; + let nth = if nth_n > nth_step { + step -= div_n; + nth_n + } else { + n -= div_step; + nth_step + }; + self.iter.nth(nth - 1); + } + } +} + +// StepBy can only make the iterator shorter, so the len will still fit. +#[stable(feature = "iterator_step_by", since = "1.28.0")] +impl ExactSizeIterator for StepBy where I: ExactSizeIterator {} + +/// An iterator that maps the values of `iter` with `f`. +/// +/// This `struct` is created by the [`map`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`map`]: trait.Iterator.html#method.map +/// [`Iterator`]: trait.Iterator.html +/// +/// # Notes about side effects +/// +/// The [`map`] iterator implements [`DoubleEndedIterator`], meaning that +/// you can also [`map`] backwards: +/// +/// ```rust +/// let v: Vec = vec![1, 2, 3].into_iter().map(|x| x + 1).rev().collect(); +/// +/// assert_eq!(v, [4, 3, 2]); +/// ``` +/// +/// [`DoubleEndedIterator`]: trait.DoubleEndedIterator.html +/// +/// But if your closure has state, iterating backwards may act in a way you do +/// not expect. Let's go through an example. First, in the forward direction: +/// +/// ```rust +/// let mut c = 0; +/// +/// for pair in vec!['a', 'b', 'c'].into_iter() +/// .map(|letter| { c += 1; (letter, c) }) { +/// println!("{:?}", pair); +/// } +/// ``` +/// +/// This will print "('a', 1), ('b', 2), ('c', 3)". +/// +/// Now consider this twist where we add a call to `rev`. This version will +/// print `('c', 1), ('b', 2), ('a', 3)`. Note that the letters are reversed, +/// but the values of the counter still go in order. This is because `map()` is +/// still being called lazily on each item, but we are popping items off the +/// back of the vector now, instead of shifting them from the front. +/// +/// ```rust +/// let mut c = 0; +/// +/// for pair in vec!['a', 'b', 'c'].into_iter() +/// .map(|letter| { c += 1; (letter, c) }) +/// .rev() { +/// println!("{:?}", pair); +/// } +/// ``` +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct Map { + iter: I, + f: F, +} +impl Map { + pub(super) fn new(iter: I, f: F) -> Map { + Map { iter, f } + } +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for Map { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Map") + .field("iter", &self.iter) + .finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Map where F: FnMut(I::Item) -> B { + type Item = B; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().map(&mut self.f) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + fn try_fold(&mut self, init: Acc, mut g: G) -> R where + Self: Sized, G: FnMut(Acc, Self::Item) -> R, R: Try + { + let f = &mut self.f; + self.iter.try_fold(init, move |acc, elt| g(acc, f(elt))) + } + + fn fold(self, init: Acc, mut g: G) -> Acc + where G: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter.fold(init, move |acc, elt| g(acc, f(elt))) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Map where + F: FnMut(I::Item) -> B, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.iter.next_back().map(&mut self.f) + } + + fn try_rfold(&mut self, init: Acc, mut g: G) -> R where + Self: Sized, G: FnMut(Acc, Self::Item) -> R, R: Try + { + let f = &mut self.f; + self.iter.try_rfold(init, move |acc, elt| g(acc, f(elt))) + } + + fn rfold(self, init: Acc, mut g: G) -> Acc + where G: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter.rfold(init, move |acc, elt| g(acc, f(elt))) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Map + where F: FnMut(I::Item) -> B +{ + fn len(&self) -> usize { + self.iter.len() + } + + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Map + where F: FnMut(I::Item) -> B {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Map + where I: TrustedLen, + F: FnMut(I::Item) -> B {} + +#[doc(hidden)] +unsafe impl TrustedRandomAccess for Map + where I: TrustedRandomAccess, + F: FnMut(I::Item) -> B, +{ + unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { + (self.f)(self.iter.get_unchecked(i)) + } + #[inline] + fn may_have_side_effect() -> bool { true } +} + +/// An iterator that filters the elements of `iter` with `predicate`. +/// +/// This `struct` is created by the [`filter`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`filter`]: trait.Iterator.html#method.filter +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct Filter { + iter: I, + predicate: P, +} +impl Filter { + pub(super) fn new(iter: I, predicate: P) -> Filter { + Filter { iter, predicate } + } +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for Filter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Filter") + .field("iter", &self.iter) + .finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Filter where P: FnMut(&I::Item) -> bool { + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + self.try_for_each(Err).err() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the predicate + } + + // this special case allows the compiler to make `.filter(_).count()` + // branchless. Barring perfect branch prediction (which is unattainable in + // the general case), this will be much faster in >90% of cases (containing + // virtually all real workloads) and only a tiny bit slower in the rest. + // + // Having this specialization thus allows us to write `.filter(p).count()` + // where we would otherwise write `.map(|x| p(x) as usize).sum()`, which is + // less readable and also less backwards-compatible to Rust before 1.10. + // + // Using the branchless version will also simplify the LLVM byte code, thus + // leaving more budget for LLVM optimizations. + #[inline] + fn count(self) -> usize { + let mut predicate = self.predicate; + self.iter.map(|x| predicate(&x) as usize).sum() + } + + #[inline] + fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + let predicate = &mut self.predicate; + self.iter.try_fold(init, move |acc, item| if predicate(&item) { + fold(acc, item) + } else { + Try::from_ok(acc) + }) + } + + #[inline] + fn fold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut predicate = self.predicate; + self.iter.fold(init, move |acc, item| if predicate(&item) { + fold(acc, item) + } else { + acc + }) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Filter + where P: FnMut(&I::Item) -> bool, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.try_rfold((), |_, x| Err(x)).err() + } + + #[inline] + fn try_rfold(&mut self, init: Acc, mut fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + let predicate = &mut self.predicate; + self.iter.try_rfold(init, move |acc, item| if predicate(&item) { + fold(acc, item) + } else { + Try::from_ok(acc) + }) + } + + #[inline] + fn rfold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut predicate = self.predicate; + self.iter.rfold(init, move |acc, item| if predicate(&item) { + fold(acc, item) + } else { + acc + }) + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Filter + where P: FnMut(&I::Item) -> bool {} + +/// An iterator that uses `f` to both filter and map elements from `iter`. +/// +/// This `struct` is created by the [`filter_map`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`filter_map`]: trait.Iterator.html#method.filter_map +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct FilterMap { + iter: I, + f: F, +} +impl FilterMap { + pub(super) fn new(iter: I, f: F) -> FilterMap { + FilterMap { iter, f } + } +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for FilterMap { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("FilterMap") + .field("iter", &self.iter) + .finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for FilterMap + where F: FnMut(I::Item) -> Option, +{ + type Item = B; + + #[inline] + fn next(&mut self) -> Option { + self.try_for_each(Err).err() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the predicate + } + + #[inline] + fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + let f = &mut self.f; + self.iter.try_fold(init, move |acc, item| match f(item) { + Some(x) => fold(acc, x), + None => Try::from_ok(acc), + }) + } + + #[inline] + fn fold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter.fold(init, move |acc, item| match f(item) { + Some(x) => fold(acc, x), + None => acc, + }) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for FilterMap + where F: FnMut(I::Item) -> Option, +{ + #[inline] + fn next_back(&mut self) -> Option { + self.try_rfold((), |_, x| Err(x)).err() + } + + #[inline] + fn try_rfold(&mut self, init: Acc, mut fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + let f = &mut self.f; + self.iter.try_rfold(init, move |acc, item| match f(item) { + Some(x) => fold(acc, x), + None => Try::from_ok(acc), + }) + } + + #[inline] + fn rfold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter.rfold(init, move |acc, item| match f(item) { + Some(x) => fold(acc, x), + None => acc, + }) + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for FilterMap + where F: FnMut(I::Item) -> Option {} + +/// An iterator that yields the current count and the element during iteration. +/// +/// This `struct` is created by the [`enumerate`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`enumerate`]: trait.Iterator.html#method.enumerate +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Enumerate { + iter: I, + count: usize, +} +impl Enumerate { + pub(super) fn new(iter: I) -> Enumerate { + Enumerate { iter, count: 0 } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Enumerate where I: Iterator { + type Item = (usize, ::Item); + + /// # Overflow Behavior + /// + /// The method does no guarding against overflows, so enumerating more than + /// `usize::MAX` elements either produces the wrong result or panics. If + /// debug assertions are enabled, a panic is guaranteed. + /// + /// # Panics + /// + /// Might panic if the index of the element overflows a `usize`. + #[inline] + #[rustc_inherit_overflow_checks] + fn next(&mut self) -> Option<(usize, ::Item)> { + self.iter.next().map(|a| { + let ret = (self.count, a); + // Possible undefined overflow. + self.count += 1; + ret + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + #[inline] + #[rustc_inherit_overflow_checks] + fn nth(&mut self, n: usize) -> Option<(usize, I::Item)> { + self.iter.nth(n).map(|a| { + let i = self.count + n; + self.count = i + 1; + (i, a) + }) + } + + #[inline] + fn count(self) -> usize { + self.iter.count() + } + + #[inline] + #[rustc_inherit_overflow_checks] + fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + let count = &mut self.count; + self.iter.try_fold(init, move |acc, item| { + let acc = fold(acc, (*count, item)); + *count += 1; + acc + }) + } + + #[inline] + #[rustc_inherit_overflow_checks] + fn fold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut count = self.count; + self.iter.fold(init, move |acc, item| { + let acc = fold(acc, (count, item)); + count += 1; + acc + }) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Enumerate where + I: ExactSizeIterator + DoubleEndedIterator +{ + #[inline] + fn next_back(&mut self) -> Option<(usize, ::Item)> { + self.iter.next_back().map(|a| { + let len = self.iter.len(); + // Can safely add, `ExactSizeIterator` promises that the number of + // elements fits into a `usize`. + (self.count + len, a) + }) + } + + #[inline] + fn try_rfold(&mut self, init: Acc, mut fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + // Can safely add and subtract the count, as `ExactSizeIterator` promises + // that the number of elements fits into a `usize`. + let mut count = self.count + self.iter.len(); + self.iter.try_rfold(init, move |acc, item| { + count -= 1; + fold(acc, (count, item)) + }) + } + + #[inline] + fn rfold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + // Can safely add and subtract the count, as `ExactSizeIterator` promises + // that the number of elements fits into a `usize`. + let mut count = self.count + self.iter.len(); + self.iter.rfold(init, move |acc, item| { + count -= 1; + fold(acc, (count, item)) + }) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Enumerate where I: ExactSizeIterator { + fn len(&self) -> usize { + self.iter.len() + } + + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +#[doc(hidden)] +unsafe impl TrustedRandomAccess for Enumerate + where I: TrustedRandomAccess +{ + unsafe fn get_unchecked(&mut self, i: usize) -> (usize, I::Item) { + (self.count + i, self.iter.get_unchecked(i)) + } + + fn may_have_side_effect() -> bool { + I::may_have_side_effect() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Enumerate where I: FusedIterator {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Enumerate + where I: TrustedLen, +{} + + +/// An iterator with a `peek()` that returns an optional reference to the next +/// element. +/// +/// This `struct` is created by the [`peekable`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`peekable`]: trait.Iterator.html#method.peekable +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Peekable { + iter: I, + /// Remember a peeked value, even if it was None. + peeked: Option>, +} +impl Peekable { + pub(super) fn new(iter: I) -> Peekable { + Peekable { iter, peeked: None } + } +} + +// Peekable must remember if a None has been seen in the `.peek()` method. +// It ensures that `.peek(); .peek();` or `.peek(); .next();` only advances the +// underlying iterator at most once. This does not by itself make the iterator +// fused. +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Peekable { + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + match self.peeked.take() { + Some(v) => v, + None => self.iter.next(), + } + } + + #[inline] + #[rustc_inherit_overflow_checks] + fn count(mut self) -> usize { + match self.peeked.take() { + Some(None) => 0, + Some(Some(_)) => 1 + self.iter.count(), + None => self.iter.count(), + } + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + match self.peeked.take() { + Some(None) => None, + Some(v @ Some(_)) if n == 0 => v, + Some(Some(_)) => self.iter.nth(n - 1), + None => self.iter.nth(n), + } + } + + #[inline] + fn last(mut self) -> Option { + let peek_opt = match self.peeked.take() { + Some(None) => return None, + Some(v) => v, + None => None, + }; + self.iter.last().or(peek_opt) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let peek_len = match self.peeked { + Some(None) => return (0, Some(0)), + Some(Some(_)) => 1, + None => 0, + }; + let (lo, hi) = self.iter.size_hint(); + let lo = lo.saturating_add(peek_len); + let hi = hi.and_then(|x| x.checked_add(peek_len)); + (lo, hi) + } + + #[inline] + fn try_fold(&mut self, init: B, mut f: F) -> R where + Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try + { + let acc = match self.peeked.take() { + Some(None) => return Try::from_ok(init), + Some(Some(v)) => f(init, v)?, + None => init, + }; + self.iter.try_fold(acc, f) + } + + #[inline] + fn fold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let acc = match self.peeked { + Some(None) => return init, + Some(Some(v)) => fold(init, v), + None => init, + }; + self.iter.fold(acc, fold) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Peekable {} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Peekable {} + +impl Peekable { + /// Returns a reference to the next() value without advancing the iterator. + /// + /// Like [`next`], if there is a value, it is wrapped in a `Some(T)`. + /// But if the iteration is over, `None` is returned. + /// + /// [`next`]: trait.Iterator.html#tymethod.next + /// + /// Because `peek()` returns a reference, and many iterators iterate over + /// references, there can be a possibly confusing situation where the + /// return value is a double reference. You can see this effect in the + /// examples below. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let xs = [1, 2, 3]; + /// + /// let mut iter = xs.iter().peekable(); + /// + /// // peek() lets us see into the future + /// assert_eq!(iter.peek(), Some(&&1)); + /// assert_eq!(iter.next(), Some(&1)); + /// + /// assert_eq!(iter.next(), Some(&2)); + /// + /// // The iterator does not advance even if we `peek` multiple times + /// assert_eq!(iter.peek(), Some(&&3)); + /// assert_eq!(iter.peek(), Some(&&3)); + /// + /// assert_eq!(iter.next(), Some(&3)); + /// + /// // After the iterator is finished, so is `peek()` + /// assert_eq!(iter.peek(), None); + /// assert_eq!(iter.next(), None); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + pub fn peek(&mut self) -> Option<&I::Item> { + let iter = &mut self.iter; + self.peeked.get_or_insert_with(|| iter.next()).as_ref() + } +} + +/// An iterator that rejects elements while `predicate` returns `true`. +/// +/// This `struct` is created by the [`skip_while`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`skip_while`]: trait.Iterator.html#method.skip_while +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct SkipWhile { + iter: I, + flag: bool, + predicate: P, +} +impl SkipWhile { + pub(super) fn new(iter: I, predicate: P) -> SkipWhile { + SkipWhile { iter, flag: false, predicate } + } +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for SkipWhile { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SkipWhile") + .field("iter", &self.iter) + .field("flag", &self.flag) + .finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for SkipWhile + where P: FnMut(&I::Item) -> bool +{ + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + let flag = &mut self.flag; + let pred = &mut self.predicate; + self.iter.find(move |x| { + if *flag || !pred(x) { + *flag = true; + true + } else { + false + } + }) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the predicate + } + + #[inline] + fn try_fold(&mut self, mut init: Acc, mut fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + if !self.flag { + match self.next() { + Some(v) => init = fold(init, v)?, + None => return Try::from_ok(init), + } + } + self.iter.try_fold(init, fold) + } + + #[inline] + fn fold(mut self, mut init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + if !self.flag { + match self.next() { + Some(v) => init = fold(init, v), + None => return init, + } + } + self.iter.fold(init, fold) + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for SkipWhile + where I: FusedIterator, P: FnMut(&I::Item) -> bool {} + +/// An iterator that only accepts elements while `predicate` returns `true`. +/// +/// This `struct` is created by the [`take_while`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`take_while`]: trait.Iterator.html#method.take_while +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct TakeWhile { + iter: I, + flag: bool, + predicate: P, +} +impl TakeWhile { + pub(super) fn new(iter: I, predicate: P) -> TakeWhile { + TakeWhile { iter, flag: false, predicate } + } +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for TakeWhile { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("TakeWhile") + .field("iter", &self.iter) + .field("flag", &self.flag) + .finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for TakeWhile + where P: FnMut(&I::Item) -> bool +{ + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + if self.flag { + None + } else { + self.iter.next().and_then(|x| { + if (self.predicate)(&x) { + Some(x) + } else { + self.flag = true; + None + } + }) + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + if self.flag { + (0, Some(0)) + } else { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the predicate + } + } + + #[inline] + fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + if self.flag { + Try::from_ok(init) + } else { + let flag = &mut self.flag; + let p = &mut self.predicate; + self.iter.try_fold(init, move |acc, x|{ + if p(&x) { + LoopState::from_try(fold(acc, x)) + } else { + *flag = true; + LoopState::Break(Try::from_ok(acc)) + } + }).into_try() + } + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for TakeWhile + where I: FusedIterator, P: FnMut(&I::Item) -> bool {} + +/// An iterator that skips over `n` elements of `iter`. +/// +/// This `struct` is created by the [`skip`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`skip`]: trait.Iterator.html#method.skip +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Skip { + iter: I, + n: usize +} +impl Skip { + pub(super) fn new(iter: I, n: usize) -> Skip { + Skip { iter, n } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Skip where I: Iterator { + type Item = ::Item; + + #[inline] + fn next(&mut self) -> Option { + if self.n == 0 { + self.iter.next() + } else { + let old_n = self.n; + self.n = 0; + self.iter.nth(old_n) + } + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + // Can't just add n + self.n due to overflow. + if self.n == 0 { + self.iter.nth(n) + } else { + let to_skip = self.n; + self.n = 0; + // nth(n) skips n+1 + if self.iter.nth(to_skip-1).is_none() { + return None; + } + self.iter.nth(n) + } + } + + #[inline] + fn count(self) -> usize { + self.iter.count().saturating_sub(self.n) + } + + #[inline] + fn last(mut self) -> Option { + if self.n == 0 { + self.iter.last() + } else { + let next = self.next(); + if next.is_some() { + // recurse. n should be 0. + self.last().or(next) + } else { + None + } + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (lower, upper) = self.iter.size_hint(); + + let lower = lower.saturating_sub(self.n); + let upper = upper.map(|x| x.saturating_sub(self.n)); + + (lower, upper) + } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + let n = self.n; + self.n = 0; + if n > 0 { + // nth(n) skips n+1 + if self.iter.nth(n - 1).is_none() { + return Try::from_ok(init); + } + } + self.iter.try_fold(init, fold) + } + + #[inline] + fn fold(mut self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + if self.n > 0 { + // nth(n) skips n+1 + if self.iter.nth(self.n - 1).is_none() { + return init; + } + } + self.iter.fold(init, fold) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Skip where I: ExactSizeIterator {} + +#[stable(feature = "double_ended_skip_iterator", since = "1.9.0")] +impl DoubleEndedIterator for Skip where I: DoubleEndedIterator + ExactSizeIterator { + fn next_back(&mut self) -> Option { + if self.len() > 0 { + self.iter.next_back() + } else { + None + } + } + + fn try_rfold(&mut self, init: Acc, mut fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + let mut n = self.len(); + if n == 0 { + Try::from_ok(init) + } else { + self.iter.try_rfold(init, move |acc, x| { + n -= 1; + let r = fold(acc, x); + if n == 0 { LoopState::Break(r) } + else { LoopState::from_try(r) } + }).into_try() + } + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Skip where I: FusedIterator {} + +/// An iterator that only iterates over the first `n` iterations of `iter`. +/// +/// This `struct` is created by the [`take`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`take`]: trait.Iterator.html#method.take +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Take { + pub(super) iter: I, + pub(super) n: usize +} +impl Take { + pub(super) fn new(iter: I, n: usize) -> Take { + Take { iter, n } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Take where I: Iterator{ + type Item = ::Item; + + #[inline] + fn next(&mut self) -> Option<::Item> { + if self.n != 0 { + self.n -= 1; + self.iter.next() + } else { + None + } + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + if self.n > n { + self.n -= n + 1; + self.iter.nth(n) + } else { + if self.n > 0 { + self.iter.nth(self.n - 1); + self.n = 0; + } + None + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + if self.n == 0 { + return (0, Some(0)); + } + + let (lower, upper) = self.iter.size_hint(); + + let lower = cmp::min(lower, self.n); + + let upper = match upper { + Some(x) if x < self.n => Some(x), + _ => Some(self.n) + }; + + (lower, upper) + } + + #[inline] + fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + if self.n == 0 { + Try::from_ok(init) + } else { + let n = &mut self.n; + self.iter.try_fold(init, move |acc, x| { + *n -= 1; + let r = fold(acc, x); + if *n == 0 { LoopState::Break(r) } + else { LoopState::from_try(r) } + }).into_try() + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Take where I: ExactSizeIterator {} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Take where I: FusedIterator {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Take {} + +/// An iterator to maintain state while iterating another iterator. +/// +/// This `struct` is created by the [`scan`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`scan`]: trait.Iterator.html#method.scan +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct Scan { + iter: I, + f: F, + state: St, +} +impl Scan { + pub(super) fn new(iter: I, state: St, f: F) -> Scan { + Scan { iter, state, f } + } +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for Scan { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Scan") + .field("iter", &self.iter) + .field("state", &self.state) + .finish() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Scan where + I: Iterator, + F: FnMut(&mut St, I::Item) -> Option, +{ + type Item = B; + + #[inline] + fn next(&mut self) -> Option { + self.iter.next().and_then(|a| (self.f)(&mut self.state, a)) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let (_, upper) = self.iter.size_hint(); + (0, upper) // can't know a lower bound, due to the scan function + } + + #[inline] + fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + let state = &mut self.state; + let f = &mut self.f; + self.iter.try_fold(init, move |acc, x| { + match f(state, x) { + None => LoopState::Break(Try::from_ok(acc)), + Some(x) => LoopState::from_try(fold(acc, x)), + } + }).into_try() + } +} + +/// An iterator that yields `None` forever after the underlying iterator +/// yields `None` once. +/// +/// This `struct` is created by the [`fuse`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`fuse`]: trait.Iterator.html#method.fuse +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Fuse { + iter: I, + done: bool +} +impl Fuse { + pub(super) fn new(iter: I) -> Fuse { + Fuse { iter, done: false } + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Fuse where I: Iterator {} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Fuse where I: Iterator { + type Item = ::Item; + + #[inline] + default fn next(&mut self) -> Option<::Item> { + if self.done { + None + } else { + let next = self.iter.next(); + self.done = next.is_none(); + next + } + } + + #[inline] + default fn nth(&mut self, n: usize) -> Option { + if self.done { + None + } else { + let nth = self.iter.nth(n); + self.done = nth.is_none(); + nth + } + } + + #[inline] + default fn last(self) -> Option { + if self.done { + None + } else { + self.iter.last() + } + } + + #[inline] + default fn count(self) -> usize { + if self.done { + 0 + } else { + self.iter.count() + } + } + + #[inline] + default fn size_hint(&self) -> (usize, Option) { + if self.done { + (0, Some(0)) + } else { + self.iter.size_hint() + } + } + + #[inline] + default fn try_fold(&mut self, init: Acc, fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + if self.done { + Try::from_ok(init) + } else { + let acc = self.iter.try_fold(init, fold)?; + self.done = true; + Try::from_ok(acc) + } + } + + #[inline] + default fn fold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + if self.done { + init + } else { + self.iter.fold(init, fold) + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Fuse where I: DoubleEndedIterator { + #[inline] + default fn next_back(&mut self) -> Option<::Item> { + if self.done { + None + } else { + let next = self.iter.next_back(); + self.done = next.is_none(); + next + } + } + + #[inline] + default fn try_rfold(&mut self, init: Acc, fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + if self.done { + Try::from_ok(init) + } else { + let acc = self.iter.try_rfold(init, fold)?; + self.done = true; + Try::from_ok(acc) + } + } + + #[inline] + default fn rfold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + if self.done { + init + } else { + self.iter.rfold(init, fold) + } + } +} + +unsafe impl TrustedRandomAccess for Fuse + where I: TrustedRandomAccess, +{ + unsafe fn get_unchecked(&mut self, i: usize) -> I::Item { + self.iter.get_unchecked(i) + } + + fn may_have_side_effect() -> bool { + I::may_have_side_effect() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl Iterator for Fuse where I: FusedIterator { + #[inline] + fn next(&mut self) -> Option<::Item> { + self.iter.next() + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + self.iter.nth(n) + } + + #[inline] + fn last(self) -> Option { + self.iter.last() + } + + #[inline] + fn count(self) -> usize { + self.iter.count() + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + self.iter.try_fold(init, fold) + } + + #[inline] + fn fold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.fold(init, fold) + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl DoubleEndedIterator for Fuse + where I: DoubleEndedIterator + FusedIterator +{ + #[inline] + fn next_back(&mut self) -> Option<::Item> { + self.iter.next_back() + } + + #[inline] + fn try_rfold(&mut self, init: Acc, fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + self.iter.try_rfold(init, fold) + } + + #[inline] + fn rfold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.iter.rfold(init, fold) + } +} + + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Fuse where I: ExactSizeIterator { + fn len(&self) -> usize { + self.iter.len() + } + + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +/// An iterator that calls a function with a reference to each element before +/// yielding it. +/// +/// This `struct` is created by the [`inspect`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`inspect`]: trait.Iterator.html#method.inspect +/// [`Iterator`]: trait.Iterator.html +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +#[derive(Clone)] +pub struct Inspect { + iter: I, + f: F, +} +impl Inspect { + pub(super) fn new(iter: I, f: F) -> Inspect { + Inspect { iter, f } + } +} + +#[stable(feature = "core_impl_debug", since = "1.9.0")] +impl fmt::Debug for Inspect { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Inspect") + .field("iter", &self.iter) + .finish() + } +} + +impl Inspect where F: FnMut(&I::Item) { + #[inline] + fn do_inspect(&mut self, elt: Option) -> Option { + if let Some(ref a) = elt { + (self.f)(a); + } + + elt + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Inspect where F: FnMut(&I::Item) { + type Item = I::Item; + + #[inline] + fn next(&mut self) -> Option { + let next = self.iter.next(); + self.do_inspect(next) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.iter.size_hint() + } + + #[inline] + fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + let f = &mut self.f; + self.iter.try_fold(init, move |acc, item| { f(&item); fold(acc, item) }) + } + + #[inline] + fn fold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter.fold(init, move |acc, item| { f(&item); fold(acc, item) }) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Inspect + where F: FnMut(&I::Item), +{ + #[inline] + fn next_back(&mut self) -> Option { + let next = self.iter.next_back(); + self.do_inspect(next) + } + + #[inline] + fn try_rfold(&mut self, init: Acc, mut fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + let f = &mut self.f; + self.iter.try_rfold(init, move |acc, item| { f(&item); fold(acc, item) }) + } + + #[inline] + fn rfold(self, init: Acc, mut fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + let mut f = self.f; + self.iter.rfold(init, move |acc, item| { f(&item); fold(acc, item) }) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Inspect + where F: FnMut(&I::Item) +{ + fn len(&self) -> usize { + self.iter.len() + } + + fn is_empty(&self) -> bool { + self.iter.is_empty() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Inspect + where F: FnMut(&I::Item) {} diff --git a/src/libcore/iter/adapters/zip.rs b/src/libcore/iter/adapters/zip.rs new file mode 100644 index 0000000000000..3548d0e282602 --- /dev/null +++ b/src/libcore/iter/adapters/zip.rs @@ -0,0 +1,282 @@ +use cmp; +use super::super::{Iterator, DoubleEndedIterator, ExactSizeIterator, FusedIterator, TrustedLen}; + +/// An iterator that iterates two other iterators simultaneously. +/// +/// This `struct` is created by the [`zip`] method on [`Iterator`]. See its +/// documentation for more. +/// +/// [`zip`]: trait.Iterator.html#method.zip +/// [`Iterator`]: trait.Iterator.html +#[derive(Clone, Debug)] +#[must_use = "iterators are lazy and do nothing unless consumed"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Zip { + a: A, + b: B, + // index and len are only used by the specialized version of zip + index: usize, + len: usize, +} +impl Zip { + pub(in super::super) fn new(a: A, b: B) -> Zip { + ZipImpl::new(a, b) + } + fn super_nth(&mut self, mut n: usize) -> Option<(A::Item, B::Item)> { + while let Some(x) = Iterator::next(self) { + if n == 0 { return Some(x) } + n -= 1; + } + None + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Iterator for Zip where A: Iterator, B: Iterator +{ + type Item = (A::Item, B::Item); + + #[inline] + fn next(&mut self) -> Option { + ZipImpl::next(self) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + ZipImpl::size_hint(self) + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + ZipImpl::nth(self, n) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl DoubleEndedIterator for Zip where + A: DoubleEndedIterator + ExactSizeIterator, + B: DoubleEndedIterator + ExactSizeIterator, +{ + #[inline] + fn next_back(&mut self) -> Option<(A::Item, B::Item)> { + ZipImpl::next_back(self) + } +} + +// Zip specialization trait +#[doc(hidden)] +trait ZipImpl { + type Item; + fn new(a: A, b: B) -> Self; + fn next(&mut self) -> Option; + fn size_hint(&self) -> (usize, Option); + fn nth(&mut self, n: usize) -> Option; + fn next_back(&mut self) -> Option + where A: DoubleEndedIterator + ExactSizeIterator, + B: DoubleEndedIterator + ExactSizeIterator; +} + +// General Zip impl +#[doc(hidden)] +impl ZipImpl for Zip + where A: Iterator, B: Iterator +{ + type Item = (A::Item, B::Item); + default fn new(a: A, b: B) -> Self { + Zip { + a, + b, + index: 0, // unused + len: 0, // unused + } + } + + #[inline] + default fn next(&mut self) -> Option<(A::Item, B::Item)> { + self.a.next().and_then(|x| { + self.b.next().and_then(|y| { + Some((x, y)) + }) + }) + } + + #[inline] + default fn nth(&mut self, n: usize) -> Option { + self.super_nth(n) + } + + #[inline] + default fn next_back(&mut self) -> Option<(A::Item, B::Item)> + where A: DoubleEndedIterator + ExactSizeIterator, + B: DoubleEndedIterator + ExactSizeIterator + { + let a_sz = self.a.len(); + let b_sz = self.b.len(); + if a_sz != b_sz { + // Adjust a, b to equal length + if a_sz > b_sz { + for _ in 0..a_sz - b_sz { self.a.next_back(); } + } else { + for _ in 0..b_sz - a_sz { self.b.next_back(); } + } + } + match (self.a.next_back(), self.b.next_back()) { + (Some(x), Some(y)) => Some((x, y)), + (None, None) => None, + _ => unreachable!(), + } + } + + #[inline] + default fn size_hint(&self) -> (usize, Option) { + let (a_lower, a_upper) = self.a.size_hint(); + let (b_lower, b_upper) = self.b.size_hint(); + + let lower = cmp::min(a_lower, b_lower); + + let upper = match (a_upper, b_upper) { + (Some(x), Some(y)) => Some(cmp::min(x,y)), + (Some(x), None) => Some(x), + (None, Some(y)) => Some(y), + (None, None) => None + }; + + (lower, upper) + } +} + +#[doc(hidden)] +impl ZipImpl for Zip + where A: TrustedRandomAccess, B: TrustedRandomAccess +{ + fn new(a: A, b: B) -> Self { + let len = cmp::min(a.len(), b.len()); + Zip { + a, + b, + index: 0, + len, + } + } + + #[inline] + fn next(&mut self) -> Option<(A::Item, B::Item)> { + if self.index < self.len { + let i = self.index; + self.index += 1; + unsafe { + Some((self.a.get_unchecked(i), self.b.get_unchecked(i))) + } + } else if A::may_have_side_effect() && self.index < self.a.len() { + // match the base implementation's potential side effects + unsafe { + self.a.get_unchecked(self.index); + } + self.index += 1; + None + } else { + None + } + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + let len = self.len - self.index; + (len, Some(len)) + } + + #[inline] + fn nth(&mut self, n: usize) -> Option { + let delta = cmp::min(n, self.len - self.index); + let end = self.index + delta; + while self.index < end { + let i = self.index; + self.index += 1; + if A::may_have_side_effect() { + unsafe { self.a.get_unchecked(i); } + } + if B::may_have_side_effect() { + unsafe { self.b.get_unchecked(i); } + } + } + + self.super_nth(n - delta) + } + + #[inline] + fn next_back(&mut self) -> Option<(A::Item, B::Item)> + where A: DoubleEndedIterator + ExactSizeIterator, + B: DoubleEndedIterator + ExactSizeIterator + { + // Adjust a, b to equal length + if A::may_have_side_effect() { + let sz = self.a.len(); + if sz > self.len { + for _ in 0..sz - cmp::max(self.len, self.index) { + self.a.next_back(); + } + } + } + if B::may_have_side_effect() { + let sz = self.b.len(); + if sz > self.len { + for _ in 0..sz - self.len { + self.b.next_back(); + } + } + } + if self.index < self.len { + self.len -= 1; + let i = self.len; + unsafe { + Some((self.a.get_unchecked(i), self.b.get_unchecked(i))) + } + } else { + None + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for Zip + where A: ExactSizeIterator, B: ExactSizeIterator {} + +#[doc(hidden)] +unsafe impl TrustedRandomAccess for Zip + where A: TrustedRandomAccess, + B: TrustedRandomAccess, +{ + unsafe fn get_unchecked(&mut self, i: usize) -> (A::Item, B::Item) { + (self.a.get_unchecked(i), self.b.get_unchecked(i)) + } + + fn may_have_side_effect() -> bool { + A::may_have_side_effect() || B::may_have_side_effect() + } +} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for Zip + where A: FusedIterator, B: FusedIterator, {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for Zip + where A: TrustedLen, B: TrustedLen, +{} + +/// An iterator whose items are random-accessible efficiently +/// +/// # Safety +/// +/// The iterator's .len() and size_hint() must be exact. +/// `.len()` must be cheap to call. +/// +/// .get_unchecked() must return distinct mutable references for distinct +/// indices (if applicable), and must return a valid reference if index is in +/// 0..self.len(). +pub(crate) unsafe trait TrustedRandomAccess : ExactSizeIterator { + unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item; + /// Returns `true` if getting an iterator element may have + /// side effects. Remember to take inner iterators into account. + fn may_have_side_effect() -> bool; +} diff --git a/src/libcore/iter/mod.rs b/src/libcore/iter/mod.rs index aa23d49672a0b..e6a616b680228 100644 --- a/src/libcore/iter/mod.rs +++ b/src/libcore/iter/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Composable external iteration. //! //! If you've found yourself with a collection of some kind, and needed to @@ -111,7 +101,7 @@ //! type Item = usize; //! //! // next() is the only required method -//! fn next(&mut self) -> Option { +//! fn next(&mut self) -> Option { //! // Increment our count. This is why we started at zero. //! self.count += 1; //! @@ -253,7 +243,7 @@ //! using it. The compiler will warn us about this kind of behavior: //! //! ```text -//! warning: unused result that must be used: iterator adaptors are lazy and +//! warning: unused result that must be used: iterators are lazy and //! do nothing unless consumed //! ``` //! @@ -316,15 +306,10 @@ #![stable(feature = "rust1", since = "1.0.0")] -use cmp; -use fmt; -use iter_private::TrustedRandomAccess; use ops::Try; -use usize; -use intrinsics; #[stable(feature = "rust1", since = "1.0.0")] -pub use self::iterator::Iterator; +pub use self::traits::Iterator; #[unstable(feature = "step_trait", reason = "likely to be replaced by finer-grained traits", @@ -339,8 +324,12 @@ pub use self::sources::{RepeatWith, repeat_with}; pub use self::sources::{Empty, empty}; #[stable(feature = "iter_once", since = "1.2.0")] pub use self::sources::{Once, once}; -#[unstable(feature = "iter_unfold", issue = "55977")] -pub use self::sources::{Unfold, unfold, Successors, successors}; +#[unstable(feature = "iter_once_with", issue = "57581")] +pub use self::sources::{OnceWith, once_with}; +#[stable(feature = "iter_from_fn", since = "1.34.0")] +pub use self::sources::{FromFn, from_fn}; +#[stable(feature = "iter_successors", since = "1.34.0")] +pub use self::sources::{Successors, successors}; #[stable(feature = "rust1", since = "1.0.0")] pub use self::traits::{FromIterator, IntoIterator, DoubleEndedIterator, Extend}; @@ -351,10 +340,27 @@ pub use self::traits::FusedIterator; #[unstable(feature = "trusted_len", issue = "37572")] pub use self::traits::TrustedLen; -mod iterator; +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::adapters::{Rev, Cycle, Chain, Zip, Map, Filter, FilterMap, Enumerate}; +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::adapters::{Peekable, SkipWhile, TakeWhile, Skip, Take, Scan, FlatMap}; +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::adapters::{Fuse, Inspect}; +#[stable(feature = "iter_cloned", since = "1.1.0")] +pub use self::adapters::Cloned; +#[stable(feature = "iterator_step_by", since = "1.28.0")] +pub use self::adapters::StepBy; +#[stable(feature = "iterator_flatten", since = "1.29.0")] +pub use self::adapters::Flatten; +#[unstable(feature = "iter_copied", issue = "57127")] +pub use self::adapters::Copied; + +pub(crate) use self::adapters::TrustedRandomAccess; + mod range; mod sources; mod traits; +mod adapters; /// Used to make try_fold closures more like normal loops #[derive(PartialEq)] @@ -405,2661 +411,3 @@ impl LoopState { } } } - -/// A double-ended iterator with the direction inverted. -/// -/// This `struct` is created by the [`rev`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`rev`]: trait.Iterator.html#method.rev -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Rev { - iter: T -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Rev where I: DoubleEndedIterator { - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option<::Item> { self.iter.next_back() } - #[inline] - fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } - - fn try_fold(&mut self, init: B, f: F) -> R where - Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try - { - self.iter.try_rfold(init, f) - } - - fn fold(self, init: Acc, f: F) -> Acc - where F: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.rfold(init, f) - } - - #[inline] - fn find

(&mut self, predicate: P) -> Option - where P: FnMut(&Self::Item) -> bool - { - self.iter.rfind(predicate) - } - - #[inline] - fn rposition

(&mut self, predicate: P) -> Option where - P: FnMut(Self::Item) -> bool - { - self.iter.position(predicate) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Rev where I: DoubleEndedIterator { - #[inline] - fn next_back(&mut self) -> Option<::Item> { self.iter.next() } - - fn try_rfold(&mut self, init: B, f: F) -> R where - Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try - { - self.iter.try_fold(init, f) - } - - fn rfold(self, init: Acc, f: F) -> Acc - where F: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.fold(init, f) - } - - fn rfind

(&mut self, predicate: P) -> Option - where P: FnMut(&Self::Item) -> bool - { - self.iter.find(predicate) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Rev - where I: ExactSizeIterator + DoubleEndedIterator -{ - fn len(&self) -> usize { - self.iter.len() - } - - fn is_empty(&self) -> bool { - self.iter.is_empty() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Rev - where I: FusedIterator + DoubleEndedIterator {} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for Rev - where I: TrustedLen + DoubleEndedIterator {} - -/// An iterator that clones the elements of an underlying iterator. -/// -/// This `struct` is created by the [`cloned`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`cloned`]: trait.Iterator.html#method.cloned -/// [`Iterator`]: trait.Iterator.html -#[stable(feature = "iter_cloned", since = "1.1.0")] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[derive(Clone, Debug)] -pub struct Cloned { - it: I, -} - -#[stable(feature = "iter_cloned", since = "1.1.0")] -impl<'a, I, T: 'a> Iterator for Cloned - where I: Iterator, T: Clone -{ - type Item = T; - - fn next(&mut self) -> Option { - self.it.next().cloned() - } - - fn size_hint(&self) -> (usize, Option) { - self.it.size_hint() - } - - fn try_fold(&mut self, init: B, mut f: F) -> R where - Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try - { - self.it.try_fold(init, move |acc, elt| f(acc, elt.clone())) - } - - fn fold(self, init: Acc, mut f: F) -> Acc - where F: FnMut(Acc, Self::Item) -> Acc, - { - self.it.fold(init, move |acc, elt| f(acc, elt.clone())) - } -} - -#[stable(feature = "iter_cloned", since = "1.1.0")] -impl<'a, I, T: 'a> DoubleEndedIterator for Cloned - where I: DoubleEndedIterator, T: Clone -{ - fn next_back(&mut self) -> Option { - self.it.next_back().cloned() - } - - fn try_rfold(&mut self, init: B, mut f: F) -> R where - Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try - { - self.it.try_rfold(init, move |acc, elt| f(acc, elt.clone())) - } - - fn rfold(self, init: Acc, mut f: F) -> Acc - where F: FnMut(Acc, Self::Item) -> Acc, - { - self.it.rfold(init, move |acc, elt| f(acc, elt.clone())) - } -} - -#[stable(feature = "iter_cloned", since = "1.1.0")] -impl<'a, I, T: 'a> ExactSizeIterator for Cloned - where I: ExactSizeIterator, T: Clone -{ - fn len(&self) -> usize { - self.it.len() - } - - fn is_empty(&self) -> bool { - self.it.is_empty() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl<'a, I, T: 'a> FusedIterator for Cloned - where I: FusedIterator, T: Clone -{} - -#[doc(hidden)] -unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Cloned - where I: TrustedRandomAccess, T: Clone -{ - default unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { - self.it.get_unchecked(i).clone() - } - - #[inline] - default fn may_have_side_effect() -> bool { true } -} - -#[doc(hidden)] -unsafe impl<'a, I, T: 'a> TrustedRandomAccess for Cloned - where I: TrustedRandomAccess, T: Copy -{ - unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { - *self.it.get_unchecked(i) - } - - #[inline] - fn may_have_side_effect() -> bool { - I::may_have_side_effect() - } -} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl<'a, I, T: 'a> TrustedLen for Cloned - where I: TrustedLen, - T: Clone -{} - -/// An iterator that repeats endlessly. -/// -/// This `struct` is created by the [`cycle`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`cycle`]: trait.Iterator.html#method.cycle -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Cycle { - orig: I, - iter: I, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Cycle where I: Clone + Iterator { - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option<::Item> { - match self.iter.next() { - None => { self.iter = self.orig.clone(); self.iter.next() } - y => y - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - // the cycle iterator is either empty or infinite - match self.orig.size_hint() { - sz @ (0, Some(0)) => sz, - (0, _) => (0, None), - _ => (usize::MAX, None) - } - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Cycle where I: Clone + Iterator {} - -/// An iterator for stepping iterators by a custom amount. -/// -/// This `struct` is created by the [`step_by`] method on [`Iterator`]. See -/// its documentation for more. -/// -/// [`step_by`]: trait.Iterator.html#method.step_by -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "iterator_step_by", since = "1.28.0")] -#[derive(Clone, Debug)] -pub struct StepBy { - iter: I, - step: usize, - first_take: bool, -} - -#[stable(feature = "iterator_step_by", since = "1.28.0")] -impl Iterator for StepBy where I: Iterator { - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - if self.first_take { - self.first_take = false; - self.iter.next() - } else { - self.iter.nth(self.step) - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let inner_hint = self.iter.size_hint(); - - if self.first_take { - let f = |n| if n == 0 { 0 } else { 1 + (n-1)/(self.step+1) }; - (f(inner_hint.0), inner_hint.1.map(f)) - } else { - let f = |n| n / (self.step+1); - (f(inner_hint.0), inner_hint.1.map(f)) - } - } - - #[inline] - fn nth(&mut self, mut n: usize) -> Option { - if self.first_take { - self.first_take = false; - let first = self.iter.next(); - if n == 0 { - return first; - } - n -= 1; - } - // n and self.step are indices, we need to add 1 to get the amount of elements - // When calling `.nth`, we need to subtract 1 again to convert back to an index - // step + 1 can't overflow because `.step_by` sets `self.step` to `step - 1` - let mut step = self.step + 1; - // n + 1 could overflow - // thus, if n is usize::MAX, instead of adding one, we call .nth(step) - if n == usize::MAX { - self.iter.nth(step - 1); - } else { - n += 1; - } - - // overflow handling - loop { - let mul = n.checked_mul(step); - if unsafe { intrinsics::likely(mul.is_some()) } { - return self.iter.nth(mul.unwrap() - 1); - } - let div_n = usize::MAX / n; - let div_step = usize::MAX / step; - let nth_n = div_n * n; - let nth_step = div_step * step; - let nth = if nth_n > nth_step { - step -= div_n; - nth_n - } else { - n -= div_step; - nth_step - }; - self.iter.nth(nth - 1); - } - } -} - -// StepBy can only make the iterator shorter, so the len will still fit. -#[stable(feature = "iterator_step_by", since = "1.28.0")] -impl ExactSizeIterator for StepBy where I: ExactSizeIterator {} - -/// An iterator that strings two iterators together. -/// -/// This `struct` is created by the [`chain`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`chain`]: trait.Iterator.html#method.chain -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Chain { - a: A, - b: B, - state: ChainState, -} - -// The iterator protocol specifies that iteration ends with the return value -// `None` from `.next()` (or `.next_back()`) and it is unspecified what -// further calls return. The chain adaptor must account for this since it uses -// two subiterators. -// -// It uses three states: -// -// - Both: `a` and `b` are remaining -// - Front: `a` remaining -// - Back: `b` remaining -// -// The fourth state (neither iterator is remaining) only occurs after Chain has -// returned None once, so we don't need to store this state. -#[derive(Clone, Debug)] -enum ChainState { - // both front and back iterator are remaining - Both, - // only front is remaining - Front, - // only back is remaining - Back, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Chain where - A: Iterator, - B: Iterator -{ - type Item = A::Item; - - #[inline] - fn next(&mut self) -> Option { - match self.state { - ChainState::Both => match self.a.next() { - elt @ Some(..) => elt, - None => { - self.state = ChainState::Back; - self.b.next() - } - }, - ChainState::Front => self.a.next(), - ChainState::Back => self.b.next(), - } - } - - #[inline] - #[rustc_inherit_overflow_checks] - fn count(self) -> usize { - match self.state { - ChainState::Both => self.a.count() + self.b.count(), - ChainState::Front => self.a.count(), - ChainState::Back => self.b.count(), - } - } - - fn try_fold(&mut self, init: Acc, mut f: F) -> R where - Self: Sized, F: FnMut(Acc, Self::Item) -> R, R: Try - { - let mut accum = init; - match self.state { - ChainState::Both | ChainState::Front => { - accum = self.a.try_fold(accum, &mut f)?; - if let ChainState::Both = self.state { - self.state = ChainState::Back; - } - } - _ => { } - } - if let ChainState::Back = self.state { - accum = self.b.try_fold(accum, &mut f)?; - } - Try::from_ok(accum) - } - - fn fold(self, init: Acc, mut f: F) -> Acc - where F: FnMut(Acc, Self::Item) -> Acc, - { - let mut accum = init; - match self.state { - ChainState::Both | ChainState::Front => { - accum = self.a.fold(accum, &mut f); - } - _ => { } - } - match self.state { - ChainState::Both | ChainState::Back => { - accum = self.b.fold(accum, &mut f); - } - _ => { } - } - accum - } - - #[inline] - fn nth(&mut self, mut n: usize) -> Option { - match self.state { - ChainState::Both | ChainState::Front => { - for x in self.a.by_ref() { - if n == 0 { - return Some(x) - } - n -= 1; - } - if let ChainState::Both = self.state { - self.state = ChainState::Back; - } - } - ChainState::Back => {} - } - if let ChainState::Back = self.state { - self.b.nth(n) - } else { - None - } - } - - #[inline] - fn find

(&mut self, mut predicate: P) -> Option where - P: FnMut(&Self::Item) -> bool, - { - match self.state { - ChainState::Both => match self.a.find(&mut predicate) { - None => { - self.state = ChainState::Back; - self.b.find(predicate) - } - v => v - }, - ChainState::Front => self.a.find(predicate), - ChainState::Back => self.b.find(predicate), - } - } - - #[inline] - fn last(self) -> Option { - match self.state { - ChainState::Both => { - // Must exhaust a before b. - let a_last = self.a.last(); - let b_last = self.b.last(); - b_last.or(a_last) - }, - ChainState::Front => self.a.last(), - ChainState::Back => self.b.last() - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (a_lower, a_upper) = self.a.size_hint(); - let (b_lower, b_upper) = self.b.size_hint(); - - let lower = a_lower.saturating_add(b_lower); - - let upper = match (a_upper, b_upper) { - (Some(x), Some(y)) => x.checked_add(y), - _ => None - }; - - (lower, upper) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Chain where - A: DoubleEndedIterator, - B: DoubleEndedIterator, -{ - #[inline] - fn next_back(&mut self) -> Option { - match self.state { - ChainState::Both => match self.b.next_back() { - elt @ Some(..) => elt, - None => { - self.state = ChainState::Front; - self.a.next_back() - } - }, - ChainState::Front => self.a.next_back(), - ChainState::Back => self.b.next_back(), - } - } - - fn try_rfold(&mut self, init: Acc, mut f: F) -> R where - Self: Sized, F: FnMut(Acc, Self::Item) -> R, R: Try - { - let mut accum = init; - match self.state { - ChainState::Both | ChainState::Back => { - accum = self.b.try_rfold(accum, &mut f)?; - if let ChainState::Both = self.state { - self.state = ChainState::Front; - } - } - _ => { } - } - if let ChainState::Front = self.state { - accum = self.a.try_rfold(accum, &mut f)?; - } - Try::from_ok(accum) - } - - fn rfold(self, init: Acc, mut f: F) -> Acc - where F: FnMut(Acc, Self::Item) -> Acc, - { - let mut accum = init; - match self.state { - ChainState::Both | ChainState::Back => { - accum = self.b.rfold(accum, &mut f); - } - _ => { } - } - match self.state { - ChainState::Both | ChainState::Front => { - accum = self.a.rfold(accum, &mut f); - } - _ => { } - } - accum - } - -} - -// Note: *both* must be fused to handle double-ended iterators. -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Chain - where A: FusedIterator, - B: FusedIterator, -{} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for Chain - where A: TrustedLen, B: TrustedLen, -{} - -/// An iterator that iterates two other iterators simultaneously. -/// -/// This `struct` is created by the [`zip`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`zip`]: trait.Iterator.html#method.zip -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Zip { - a: A, - b: B, - // index and len are only used by the specialized version of zip - index: usize, - len: usize, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Zip where A: Iterator, B: Iterator -{ - type Item = (A::Item, B::Item); - - #[inline] - fn next(&mut self) -> Option { - ZipImpl::next(self) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - ZipImpl::size_hint(self) - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - ZipImpl::nth(self, n) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Zip where - A: DoubleEndedIterator + ExactSizeIterator, - B: DoubleEndedIterator + ExactSizeIterator, -{ - #[inline] - fn next_back(&mut self) -> Option<(A::Item, B::Item)> { - ZipImpl::next_back(self) - } -} - -// Zip specialization trait -#[doc(hidden)] -trait ZipImpl { - type Item; - fn new(a: A, b: B) -> Self; - fn next(&mut self) -> Option; - fn size_hint(&self) -> (usize, Option); - fn nth(&mut self, n: usize) -> Option; - fn super_nth(&mut self, mut n: usize) -> Option { - while let Some(x) = self.next() { - if n == 0 { return Some(x) } - n -= 1; - } - None - } - fn next_back(&mut self) -> Option - where A: DoubleEndedIterator + ExactSizeIterator, - B: DoubleEndedIterator + ExactSizeIterator; -} - -// General Zip impl -#[doc(hidden)] -impl ZipImpl for Zip - where A: Iterator, B: Iterator -{ - type Item = (A::Item, B::Item); - default fn new(a: A, b: B) -> Self { - Zip { - a, - b, - index: 0, // unused - len: 0, // unused - } - } - - #[inline] - default fn next(&mut self) -> Option<(A::Item, B::Item)> { - self.a.next().and_then(|x| { - self.b.next().and_then(|y| { - Some((x, y)) - }) - }) - } - - #[inline] - default fn nth(&mut self, n: usize) -> Option { - self.super_nth(n) - } - - #[inline] - default fn next_back(&mut self) -> Option<(A::Item, B::Item)> - where A: DoubleEndedIterator + ExactSizeIterator, - B: DoubleEndedIterator + ExactSizeIterator - { - let a_sz = self.a.len(); - let b_sz = self.b.len(); - if a_sz != b_sz { - // Adjust a, b to equal length - if a_sz > b_sz { - for _ in 0..a_sz - b_sz { self.a.next_back(); } - } else { - for _ in 0..b_sz - a_sz { self.b.next_back(); } - } - } - match (self.a.next_back(), self.b.next_back()) { - (Some(x), Some(y)) => Some((x, y)), - (None, None) => None, - _ => unreachable!(), - } - } - - #[inline] - default fn size_hint(&self) -> (usize, Option) { - let (a_lower, a_upper) = self.a.size_hint(); - let (b_lower, b_upper) = self.b.size_hint(); - - let lower = cmp::min(a_lower, b_lower); - - let upper = match (a_upper, b_upper) { - (Some(x), Some(y)) => Some(cmp::min(x,y)), - (Some(x), None) => Some(x), - (None, Some(y)) => Some(y), - (None, None) => None - }; - - (lower, upper) - } -} - -#[doc(hidden)] -impl ZipImpl for Zip - where A: TrustedRandomAccess, B: TrustedRandomAccess -{ - fn new(a: A, b: B) -> Self { - let len = cmp::min(a.len(), b.len()); - Zip { - a, - b, - index: 0, - len, - } - } - - #[inline] - fn next(&mut self) -> Option<(A::Item, B::Item)> { - if self.index < self.len { - let i = self.index; - self.index += 1; - unsafe { - Some((self.a.get_unchecked(i), self.b.get_unchecked(i))) - } - } else if A::may_have_side_effect() && self.index < self.a.len() { - // match the base implementation's potential side effects - unsafe { - self.a.get_unchecked(self.index); - } - self.index += 1; - None - } else { - None - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let len = self.len - self.index; - (len, Some(len)) - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - let delta = cmp::min(n, self.len - self.index); - let end = self.index + delta; - while self.index < end { - let i = self.index; - self.index += 1; - if A::may_have_side_effect() { - unsafe { self.a.get_unchecked(i); } - } - if B::may_have_side_effect() { - unsafe { self.b.get_unchecked(i); } - } - } - - self.super_nth(n - delta) - } - - #[inline] - fn next_back(&mut self) -> Option<(A::Item, B::Item)> - where A: DoubleEndedIterator + ExactSizeIterator, - B: DoubleEndedIterator + ExactSizeIterator - { - // Adjust a, b to equal length - if A::may_have_side_effect() { - let sz = self.a.len(); - if sz > self.len { - for _ in 0..sz - cmp::max(self.len, self.index) { - self.a.next_back(); - } - } - } - if B::may_have_side_effect() { - let sz = self.b.len(); - if sz > self.len { - for _ in 0..sz - self.len { - self.b.next_back(); - } - } - } - if self.index < self.len { - self.len -= 1; - let i = self.len; - unsafe { - Some((self.a.get_unchecked(i), self.b.get_unchecked(i))) - } - } else { - None - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Zip - where A: ExactSizeIterator, B: ExactSizeIterator {} - -#[doc(hidden)] -unsafe impl TrustedRandomAccess for Zip - where A: TrustedRandomAccess, - B: TrustedRandomAccess, -{ - unsafe fn get_unchecked(&mut self, i: usize) -> (A::Item, B::Item) { - (self.a.get_unchecked(i), self.b.get_unchecked(i)) - } - - fn may_have_side_effect() -> bool { - A::may_have_side_effect() || B::may_have_side_effect() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Zip - where A: FusedIterator, B: FusedIterator, {} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for Zip - where A: TrustedLen, B: TrustedLen, -{} - -/// An iterator that maps the values of `iter` with `f`. -/// -/// This `struct` is created by the [`map`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`map`]: trait.Iterator.html#method.map -/// [`Iterator`]: trait.Iterator.html -/// -/// # Notes about side effects -/// -/// The [`map`] iterator implements [`DoubleEndedIterator`], meaning that -/// you can also [`map`] backwards: -/// -/// ```rust -/// let v: Vec = vec![1, 2, 3].into_iter().map(|x| x + 1).rev().collect(); -/// -/// assert_eq!(v, [4, 3, 2]); -/// ``` -/// -/// [`DoubleEndedIterator`]: trait.DoubleEndedIterator.html -/// -/// But if your closure has state, iterating backwards may act in a way you do -/// not expect. Let's go through an example. First, in the forward direction: -/// -/// ```rust -/// let mut c = 0; -/// -/// for pair in vec!['a', 'b', 'c'].into_iter() -/// .map(|letter| { c += 1; (letter, c) }) { -/// println!("{:?}", pair); -/// } -/// ``` -/// -/// This will print "('a', 1), ('b', 2), ('c', 3)". -/// -/// Now consider this twist where we add a call to `rev`. This version will -/// print `('c', 1), ('b', 2), ('a', 3)`. Note that the letters are reversed, -/// but the values of the counter still go in order. This is because `map()` is -/// still being called lazily on each item, but we are popping items off the -/// back of the vector now, instead of shifting them from the front. -/// -/// ```rust -/// let mut c = 0; -/// -/// for pair in vec!['a', 'b', 'c'].into_iter() -/// .map(|letter| { c += 1; (letter, c) }) -/// .rev() { -/// println!("{:?}", pair); -/// } -/// ``` -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct Map { - iter: I, - f: F, -} - -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for Map { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Map") - .field("iter", &self.iter) - .finish() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Map where F: FnMut(I::Item) -> B { - type Item = B; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().map(&mut self.f) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - fn try_fold(&mut self, init: Acc, mut g: G) -> R where - Self: Sized, G: FnMut(Acc, Self::Item) -> R, R: Try - { - let f = &mut self.f; - self.iter.try_fold(init, move |acc, elt| g(acc, f(elt))) - } - - fn fold(self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc, - { - let mut f = self.f; - self.iter.fold(init, move |acc, elt| g(acc, f(elt))) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Map where - F: FnMut(I::Item) -> B, -{ - #[inline] - fn next_back(&mut self) -> Option { - self.iter.next_back().map(&mut self.f) - } - - fn try_rfold(&mut self, init: Acc, mut g: G) -> R where - Self: Sized, G: FnMut(Acc, Self::Item) -> R, R: Try - { - let f = &mut self.f; - self.iter.try_rfold(init, move |acc, elt| g(acc, f(elt))) - } - - fn rfold(self, init: Acc, mut g: G) -> Acc - where G: FnMut(Acc, Self::Item) -> Acc, - { - let mut f = self.f; - self.iter.rfold(init, move |acc, elt| g(acc, f(elt))) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Map - where F: FnMut(I::Item) -> B -{ - fn len(&self) -> usize { - self.iter.len() - } - - fn is_empty(&self) -> bool { - self.iter.is_empty() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Map - where F: FnMut(I::Item) -> B {} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for Map - where I: TrustedLen, - F: FnMut(I::Item) -> B {} - -#[doc(hidden)] -unsafe impl TrustedRandomAccess for Map - where I: TrustedRandomAccess, - F: FnMut(I::Item) -> B, -{ - unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item { - (self.f)(self.iter.get_unchecked(i)) - } - #[inline] - fn may_have_side_effect() -> bool { true } -} - -/// An iterator that filters the elements of `iter` with `predicate`. -/// -/// This `struct` is created by the [`filter`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`filter`]: trait.Iterator.html#method.filter -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct Filter { - iter: I, - predicate: P, -} - -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for Filter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Filter") - .field("iter", &self.iter) - .finish() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Filter where P: FnMut(&I::Item) -> bool { - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - for x in &mut self.iter { - if (self.predicate)(&x) { - return Some(x); - } - } - None - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the predicate - } - - // this special case allows the compiler to make `.filter(_).count()` - // branchless. Barring perfect branch prediction (which is unattainable in - // the general case), this will be much faster in >90% of cases (containing - // virtually all real workloads) and only a tiny bit slower in the rest. - // - // Having this specialization thus allows us to write `.filter(p).count()` - // where we would otherwise write `.map(|x| p(x) as usize).sum()`, which is - // less readable and also less backwards-compatible to Rust before 1.10. - // - // Using the branchless version will also simplify the LLVM byte code, thus - // leaving more budget for LLVM optimizations. - #[inline] - fn count(mut self) -> usize { - let mut count = 0; - for x in &mut self.iter { - count += (self.predicate)(&x) as usize; - } - count - } - - #[inline] - fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - let predicate = &mut self.predicate; - self.iter.try_fold(init, move |acc, item| if predicate(&item) { - fold(acc, item) - } else { - Try::from_ok(acc) - }) - } - - #[inline] - fn fold(self, init: Acc, mut fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - let mut predicate = self.predicate; - self.iter.fold(init, move |acc, item| if predicate(&item) { - fold(acc, item) - } else { - acc - }) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Filter - where P: FnMut(&I::Item) -> bool, -{ - #[inline] - fn next_back(&mut self) -> Option { - for x in self.iter.by_ref().rev() { - if (self.predicate)(&x) { - return Some(x); - } - } - None - } - - #[inline] - fn try_rfold(&mut self, init: Acc, mut fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - let predicate = &mut self.predicate; - self.iter.try_rfold(init, move |acc, item| if predicate(&item) { - fold(acc, item) - } else { - Try::from_ok(acc) - }) - } - - #[inline] - fn rfold(self, init: Acc, mut fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - let mut predicate = self.predicate; - self.iter.rfold(init, move |acc, item| if predicate(&item) { - fold(acc, item) - } else { - acc - }) - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Filter - where P: FnMut(&I::Item) -> bool {} - -/// An iterator that uses `f` to both filter and map elements from `iter`. -/// -/// This `struct` is created by the [`filter_map`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`filter_map`]: trait.Iterator.html#method.filter_map -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct FilterMap { - iter: I, - f: F, -} - -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for FilterMap { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("FilterMap") - .field("iter", &self.iter) - .finish() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for FilterMap - where F: FnMut(I::Item) -> Option, -{ - type Item = B; - - #[inline] - fn next(&mut self) -> Option { - for x in self.iter.by_ref() { - if let Some(y) = (self.f)(x) { - return Some(y); - } - } - None - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the predicate - } - - #[inline] - fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - let f = &mut self.f; - self.iter.try_fold(init, move |acc, item| match f(item) { - Some(x) => fold(acc, x), - None => Try::from_ok(acc), - }) - } - - #[inline] - fn fold(self, init: Acc, mut fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - let mut f = self.f; - self.iter.fold(init, move |acc, item| match f(item) { - Some(x) => fold(acc, x), - None => acc, - }) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for FilterMap - where F: FnMut(I::Item) -> Option, -{ - #[inline] - fn next_back(&mut self) -> Option { - for x in self.iter.by_ref().rev() { - if let Some(y) = (self.f)(x) { - return Some(y); - } - } - None - } - - #[inline] - fn try_rfold(&mut self, init: Acc, mut fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - let f = &mut self.f; - self.iter.try_rfold(init, move |acc, item| match f(item) { - Some(x) => fold(acc, x), - None => Try::from_ok(acc), - }) - } - - #[inline] - fn rfold(self, init: Acc, mut fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - let mut f = self.f; - self.iter.rfold(init, move |acc, item| match f(item) { - Some(x) => fold(acc, x), - None => acc, - }) - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for FilterMap - where F: FnMut(I::Item) -> Option {} - -/// An iterator that yields the current count and the element during iteration. -/// -/// This `struct` is created by the [`enumerate`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`enumerate`]: trait.Iterator.html#method.enumerate -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Enumerate { - iter: I, - count: usize, -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Enumerate where I: Iterator { - type Item = (usize, ::Item); - - /// # Overflow Behavior - /// - /// The method does no guarding against overflows, so enumerating more than - /// `usize::MAX` elements either produces the wrong result or panics. If - /// debug assertions are enabled, a panic is guaranteed. - /// - /// # Panics - /// - /// Might panic if the index of the element overflows a `usize`. - #[inline] - #[rustc_inherit_overflow_checks] - fn next(&mut self) -> Option<(usize, ::Item)> { - self.iter.next().map(|a| { - let ret = (self.count, a); - // Possible undefined overflow. - self.count += 1; - ret - }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - #[inline] - #[rustc_inherit_overflow_checks] - fn nth(&mut self, n: usize) -> Option<(usize, I::Item)> { - self.iter.nth(n).map(|a| { - let i = self.count + n; - self.count = i + 1; - (i, a) - }) - } - - #[inline] - fn count(self) -> usize { - self.iter.count() - } - - #[inline] - #[rustc_inherit_overflow_checks] - fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - let count = &mut self.count; - self.iter.try_fold(init, move |acc, item| { - let acc = fold(acc, (*count, item)); - *count += 1; - acc - }) - } - - #[inline] - #[rustc_inherit_overflow_checks] - fn fold(self, init: Acc, mut fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - let mut count = self.count; - self.iter.fold(init, move |acc, item| { - let acc = fold(acc, (count, item)); - count += 1; - acc - }) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Enumerate where - I: ExactSizeIterator + DoubleEndedIterator -{ - #[inline] - fn next_back(&mut self) -> Option<(usize, ::Item)> { - self.iter.next_back().map(|a| { - let len = self.iter.len(); - // Can safely add, `ExactSizeIterator` promises that the number of - // elements fits into a `usize`. - (self.count + len, a) - }) - } - - #[inline] - fn try_rfold(&mut self, init: Acc, mut fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - // Can safely add and subtract the count, as `ExactSizeIterator` promises - // that the number of elements fits into a `usize`. - let mut count = self.count + self.iter.len(); - self.iter.try_rfold(init, move |acc, item| { - count -= 1; - fold(acc, (count, item)) - }) - } - - #[inline] - fn rfold(self, init: Acc, mut fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - // Can safely add and subtract the count, as `ExactSizeIterator` promises - // that the number of elements fits into a `usize`. - let mut count = self.count + self.iter.len(); - self.iter.rfold(init, move |acc, item| { - count -= 1; - fold(acc, (count, item)) - }) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Enumerate where I: ExactSizeIterator { - fn len(&self) -> usize { - self.iter.len() - } - - fn is_empty(&self) -> bool { - self.iter.is_empty() - } -} - -#[doc(hidden)] -unsafe impl TrustedRandomAccess for Enumerate - where I: TrustedRandomAccess -{ - unsafe fn get_unchecked(&mut self, i: usize) -> (usize, I::Item) { - (self.count + i, self.iter.get_unchecked(i)) - } - - fn may_have_side_effect() -> bool { - I::may_have_side_effect() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Enumerate where I: FusedIterator {} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for Enumerate - where I: TrustedLen, -{} - - -/// An iterator with a `peek()` that returns an optional reference to the next -/// element. -/// -/// This `struct` is created by the [`peekable`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`peekable`]: trait.Iterator.html#method.peekable -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Peekable { - iter: I, - /// Remember a peeked value, even if it was None. - peeked: Option>, -} - -// Peekable must remember if a None has been seen in the `.peek()` method. -// It ensures that `.peek(); .peek();` or `.peek(); .next();` only advances the -// underlying iterator at most once. This does not by itself make the iterator -// fused. -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Peekable { - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - match self.peeked.take() { - Some(v) => v, - None => self.iter.next(), - } - } - - #[inline] - #[rustc_inherit_overflow_checks] - fn count(mut self) -> usize { - match self.peeked.take() { - Some(None) => 0, - Some(Some(_)) => 1 + self.iter.count(), - None => self.iter.count(), - } - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - match self.peeked.take() { - Some(None) => None, - Some(v @ Some(_)) if n == 0 => v, - Some(Some(_)) => self.iter.nth(n - 1), - None => self.iter.nth(n), - } - } - - #[inline] - fn last(mut self) -> Option { - let peek_opt = match self.peeked.take() { - Some(None) => return None, - Some(v) => v, - None => None, - }; - self.iter.last().or(peek_opt) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let peek_len = match self.peeked { - Some(None) => return (0, Some(0)), - Some(Some(_)) => 1, - None => 0, - }; - let (lo, hi) = self.iter.size_hint(); - let lo = lo.saturating_add(peek_len); - let hi = hi.and_then(|x| x.checked_add(peek_len)); - (lo, hi) - } - - #[inline] - fn try_fold(&mut self, init: B, mut f: F) -> R where - Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try - { - let acc = match self.peeked.take() { - Some(None) => return Try::from_ok(init), - Some(Some(v)) => f(init, v)?, - None => init, - }; - self.iter.try_fold(acc, f) - } - - #[inline] - fn fold(self, init: Acc, mut fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - let acc = match self.peeked { - Some(None) => return init, - Some(Some(v)) => fold(init, v), - None => init, - }; - self.iter.fold(acc, fold) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Peekable {} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Peekable {} - -impl Peekable { - /// Returns a reference to the next() value without advancing the iterator. - /// - /// Like [`next`], if there is a value, it is wrapped in a `Some(T)`. - /// But if the iteration is over, `None` is returned. - /// - /// [`next`]: trait.Iterator.html#tymethod.next - /// - /// Because `peek()` returns a reference, and many iterators iterate over - /// references, there can be a possibly confusing situation where the - /// return value is a double reference. You can see this effect in the - /// examples below. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let xs = [1, 2, 3]; - /// - /// let mut iter = xs.iter().peekable(); - /// - /// // peek() lets us see into the future - /// assert_eq!(iter.peek(), Some(&&1)); - /// assert_eq!(iter.next(), Some(&1)); - /// - /// assert_eq!(iter.next(), Some(&2)); - /// - /// // The iterator does not advance even if we `peek` multiple times - /// assert_eq!(iter.peek(), Some(&&3)); - /// assert_eq!(iter.peek(), Some(&&3)); - /// - /// assert_eq!(iter.next(), Some(&3)); - /// - /// // After the iterator is finished, so is `peek()` - /// assert_eq!(iter.peek(), None); - /// assert_eq!(iter.next(), None); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - pub fn peek(&mut self) -> Option<&I::Item> { - let iter = &mut self.iter; - self.peeked.get_or_insert_with(|| iter.next()).as_ref() - } -} - -/// An iterator that rejects elements while `predicate` is true. -/// -/// This `struct` is created by the [`skip_while`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`skip_while`]: trait.Iterator.html#method.skip_while -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct SkipWhile { - iter: I, - flag: bool, - predicate: P, -} - -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for SkipWhile { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("SkipWhile") - .field("iter", &self.iter) - .field("flag", &self.flag) - .finish() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for SkipWhile - where P: FnMut(&I::Item) -> bool -{ - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - let flag = &mut self.flag; - let pred = &mut self.predicate; - self.iter.find(move |x| { - if *flag || !pred(x) { - *flag = true; - true - } else { - false - } - }) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the predicate - } - - #[inline] - fn try_fold(&mut self, mut init: Acc, mut fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - if !self.flag { - match self.next() { - Some(v) => init = fold(init, v)?, - None => return Try::from_ok(init), - } - } - self.iter.try_fold(init, fold) - } - - #[inline] - fn fold(mut self, mut init: Acc, mut fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - if !self.flag { - match self.next() { - Some(v) => init = fold(init, v), - None => return init, - } - } - self.iter.fold(init, fold) - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for SkipWhile - where I: FusedIterator, P: FnMut(&I::Item) -> bool {} - -/// An iterator that only accepts elements while `predicate` is true. -/// -/// This `struct` is created by the [`take_while`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`take_while`]: trait.Iterator.html#method.take_while -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct TakeWhile { - iter: I, - flag: bool, - predicate: P, -} - -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for TakeWhile { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("TakeWhile") - .field("iter", &self.iter) - .field("flag", &self.flag) - .finish() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for TakeWhile - where P: FnMut(&I::Item) -> bool -{ - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - if self.flag { - None - } else { - self.iter.next().and_then(|x| { - if (self.predicate)(&x) { - Some(x) - } else { - self.flag = true; - None - } - }) - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - if self.flag { - (0, Some(0)) - } else { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the predicate - } - } - - #[inline] - fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - if self.flag { - Try::from_ok(init) - } else { - let flag = &mut self.flag; - let p = &mut self.predicate; - self.iter.try_fold(init, move |acc, x|{ - if p(&x) { - LoopState::from_try(fold(acc, x)) - } else { - *flag = true; - LoopState::Break(Try::from_ok(acc)) - } - }).into_try() - } - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for TakeWhile - where I: FusedIterator, P: FnMut(&I::Item) -> bool {} - -/// An iterator that skips over `n` elements of `iter`. -/// -/// This `struct` is created by the [`skip`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`skip`]: trait.Iterator.html#method.skip -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Skip { - iter: I, - n: usize -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Skip where I: Iterator { - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option { - if self.n == 0 { - self.iter.next() - } else { - let old_n = self.n; - self.n = 0; - self.iter.nth(old_n) - } - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - // Can't just add n + self.n due to overflow. - if self.n == 0 { - self.iter.nth(n) - } else { - let to_skip = self.n; - self.n = 0; - // nth(n) skips n+1 - if self.iter.nth(to_skip-1).is_none() { - return None; - } - self.iter.nth(n) - } - } - - #[inline] - fn count(self) -> usize { - self.iter.count().saturating_sub(self.n) - } - - #[inline] - fn last(mut self) -> Option { - if self.n == 0 { - self.iter.last() - } else { - let next = self.next(); - if next.is_some() { - // recurse. n should be 0. - self.last().or(next) - } else { - None - } - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (lower, upper) = self.iter.size_hint(); - - let lower = lower.saturating_sub(self.n); - let upper = upper.map(|x| x.saturating_sub(self.n)); - - (lower, upper) - } - - #[inline] - fn try_fold(&mut self, init: Acc, fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - let n = self.n; - self.n = 0; - if n > 0 { - // nth(n) skips n+1 - if self.iter.nth(n - 1).is_none() { - return Try::from_ok(init); - } - } - self.iter.try_fold(init, fold) - } - - #[inline] - fn fold(mut self, init: Acc, fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - if self.n > 0 { - // nth(n) skips n+1 - if self.iter.nth(self.n - 1).is_none() { - return init; - } - } - self.iter.fold(init, fold) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Skip where I: ExactSizeIterator {} - -#[stable(feature = "double_ended_skip_iterator", since = "1.9.0")] -impl DoubleEndedIterator for Skip where I: DoubleEndedIterator + ExactSizeIterator { - fn next_back(&mut self) -> Option { - if self.len() > 0 { - self.iter.next_back() - } else { - None - } - } - - fn try_rfold(&mut self, init: Acc, mut fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - let mut n = self.len(); - if n == 0 { - Try::from_ok(init) - } else { - self.iter.try_rfold(init, move |acc, x| { - n -= 1; - let r = fold(acc, x); - if n == 0 { LoopState::Break(r) } - else { LoopState::from_try(r) } - }).into_try() - } - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Skip where I: FusedIterator {} - -/// An iterator that only iterates over the first `n` iterations of `iter`. -/// -/// This `struct` is created by the [`take`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`take`]: trait.Iterator.html#method.take -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Take { - iter: I, - n: usize -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Take where I: Iterator{ - type Item = ::Item; - - #[inline] - fn next(&mut self) -> Option<::Item> { - if self.n != 0 { - self.n -= 1; - self.iter.next() - } else { - None - } - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - if self.n > n { - self.n -= n + 1; - self.iter.nth(n) - } else { - if self.n > 0 { - self.iter.nth(self.n - 1); - self.n = 0; - } - None - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - if self.n == 0 { - return (0, Some(0)); - } - - let (lower, upper) = self.iter.size_hint(); - - let lower = cmp::min(lower, self.n); - - let upper = match upper { - Some(x) if x < self.n => Some(x), - _ => Some(self.n) - }; - - (lower, upper) - } - - #[inline] - fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - if self.n == 0 { - Try::from_ok(init) - } else { - let n = &mut self.n; - self.iter.try_fold(init, move |acc, x| { - *n -= 1; - let r = fold(acc, x); - if *n == 0 { LoopState::Break(r) } - else { LoopState::from_try(r) } - }).into_try() - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Take where I: ExactSizeIterator {} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Take where I: FusedIterator {} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for Take {} - -/// An iterator to maintain state while iterating another iterator. -/// -/// This `struct` is created by the [`scan`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`scan`]: trait.Iterator.html#method.scan -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct Scan { - iter: I, - f: F, - state: St, -} - -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for Scan { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Scan") - .field("iter", &self.iter) - .field("state", &self.state) - .finish() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Scan where - I: Iterator, - F: FnMut(&mut St, I::Item) -> Option, -{ - type Item = B; - - #[inline] - fn next(&mut self) -> Option { - self.iter.next().and_then(|a| (self.f)(&mut self.state, a)) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (_, upper) = self.iter.size_hint(); - (0, upper) // can't know a lower bound, due to the scan function - } - - #[inline] - fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - let state = &mut self.state; - let f = &mut self.f; - self.iter.try_fold(init, move |acc, x| { - match f(state, x) { - None => LoopState::Break(Try::from_ok(acc)), - Some(x) => LoopState::from_try(fold(acc, x)), - } - }).into_try() - } -} - -/// An iterator that maps each element to an iterator, and yields the elements -/// of the produced iterators. -/// -/// This `struct` is created by the [`flat_map`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`flat_map`]: trait.Iterator.html#method.flat_map -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct FlatMap { - inner: FlattenCompat, ::IntoIter> -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Clone for FlatMap - where ::IntoIter: Clone -{ - fn clone(&self) -> Self { FlatMap { inner: self.inner.clone() } } -} - -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for FlatMap - where U::IntoIter: fmt::Debug -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("FlatMap").field("inner", &self.inner).finish() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for FlatMap - where F: FnMut(I::Item) -> U, -{ - type Item = U::Item; - - #[inline] - fn next(&mut self) -> Option { self.inner.next() } - - #[inline] - fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } - - #[inline] - fn try_fold(&mut self, init: Acc, fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - self.inner.try_fold(init, fold) - } - - #[inline] - fn fold(self, init: Acc, fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.inner.fold(init, fold) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for FlatMap - where F: FnMut(I::Item) -> U, - U: IntoIterator, - U::IntoIter: DoubleEndedIterator -{ - #[inline] - fn next_back(&mut self) -> Option { self.inner.next_back() } - - #[inline] - fn try_rfold(&mut self, init: Acc, fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - self.inner.try_rfold(init, fold) - } - - #[inline] - fn rfold(self, init: Acc, fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.inner.rfold(init, fold) - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for FlatMap - where I: FusedIterator, U: IntoIterator, F: FnMut(I::Item) -> U {} - -/// An iterator that flattens one level of nesting in an iterator of things -/// that can be turned into iterators. -/// -/// This `struct` is created by the [`flatten`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`flatten`]: trait.Iterator.html#method.flatten -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "iterator_flatten", since = "1.29.0")] -pub struct Flatten -where I::Item: IntoIterator { - inner: FlattenCompat::IntoIter>, -} - -#[stable(feature = "iterator_flatten", since = "1.29.0")] -impl fmt::Debug for Flatten - where I: Iterator + fmt::Debug, U: Iterator + fmt::Debug, - I::Item: IntoIterator, -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Flatten").field("inner", &self.inner).finish() - } -} - -#[stable(feature = "iterator_flatten", since = "1.29.0")] -impl Clone for Flatten - where I: Iterator + Clone, U: Iterator + Clone, - I::Item: IntoIterator, -{ - fn clone(&self) -> Self { Flatten { inner: self.inner.clone() } } -} - -#[stable(feature = "iterator_flatten", since = "1.29.0")] -impl Iterator for Flatten - where I: Iterator, U: Iterator, - I::Item: IntoIterator -{ - type Item = U::Item; - - #[inline] - fn next(&mut self) -> Option { self.inner.next() } - - #[inline] - fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } - - #[inline] - fn try_fold(&mut self, init: Acc, fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - self.inner.try_fold(init, fold) - } - - #[inline] - fn fold(self, init: Acc, fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.inner.fold(init, fold) - } -} - -#[stable(feature = "iterator_flatten", since = "1.29.0")] -impl DoubleEndedIterator for Flatten - where I: DoubleEndedIterator, U: DoubleEndedIterator, - I::Item: IntoIterator -{ - #[inline] - fn next_back(&mut self) -> Option { self.inner.next_back() } - - #[inline] - fn try_rfold(&mut self, init: Acc, fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - self.inner.try_rfold(init, fold) - } - - #[inline] - fn rfold(self, init: Acc, fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.inner.rfold(init, fold) - } -} - -#[stable(feature = "iterator_flatten", since = "1.29.0")] -impl FusedIterator for Flatten - where I: FusedIterator, U: Iterator, - I::Item: IntoIterator {} - -/// Adapts an iterator by flattening it, for use in `flatten()` and `flat_map()`. -fn flatten_compat(iter: I) -> FlattenCompat { - FlattenCompat { iter, frontiter: None, backiter: None } -} - -/// Real logic of both `Flatten` and `FlatMap` which simply delegate to -/// this type. -#[derive(Clone, Debug)] -struct FlattenCompat { - iter: I, - frontiter: Option, - backiter: Option, -} - -impl Iterator for FlattenCompat - where I: Iterator, U: Iterator, - I::Item: IntoIterator -{ - type Item = U::Item; - - #[inline] - fn next(&mut self) -> Option { - loop { - if let Some(ref mut inner) = self.frontiter { - if let elt@Some(_) = inner.next() { return elt } - } - match self.iter.next() { - None => return self.backiter.as_mut().and_then(|it| it.next()), - Some(inner) => self.frontiter = Some(inner.into_iter()), - } - } - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - let (flo, fhi) = self.frontiter.as_ref().map_or((0, Some(0)), |it| it.size_hint()); - let (blo, bhi) = self.backiter.as_ref().map_or((0, Some(0)), |it| it.size_hint()); - let lo = flo.saturating_add(blo); - match (self.iter.size_hint(), fhi, bhi) { - ((0, Some(0)), Some(a), Some(b)) => (lo, a.checked_add(b)), - _ => (lo, None) - } - } - - #[inline] - fn try_fold(&mut self, mut init: Acc, mut fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - if let Some(ref mut front) = self.frontiter { - init = front.try_fold(init, &mut fold)?; - } - self.frontiter = None; - - { - let frontiter = &mut self.frontiter; - init = self.iter.try_fold(init, |acc, x| { - let mut mid = x.into_iter(); - let r = mid.try_fold(acc, &mut fold); - *frontiter = Some(mid); - r - })?; - } - self.frontiter = None; - - if let Some(ref mut back) = self.backiter { - init = back.try_fold(init, &mut fold)?; - } - self.backiter = None; - - Try::from_ok(init) - } - - #[inline] - fn fold(self, init: Acc, mut fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.frontiter.into_iter() - .chain(self.iter.map(IntoIterator::into_iter)) - .chain(self.backiter) - .fold(init, |acc, iter| iter.fold(acc, &mut fold)) - } -} - -impl DoubleEndedIterator for FlattenCompat - where I: DoubleEndedIterator, U: DoubleEndedIterator, - I::Item: IntoIterator -{ - #[inline] - fn next_back(&mut self) -> Option { - loop { - if let Some(ref mut inner) = self.backiter { - if let elt@Some(_) = inner.next_back() { return elt } - } - match self.iter.next_back() { - None => return self.frontiter.as_mut().and_then(|it| it.next_back()), - next => self.backiter = next.map(IntoIterator::into_iter), - } - } - } - - #[inline] - fn try_rfold(&mut self, mut init: Acc, mut fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - if let Some(ref mut back) = self.backiter { - init = back.try_rfold(init, &mut fold)?; - } - self.backiter = None; - - { - let backiter = &mut self.backiter; - init = self.iter.try_rfold(init, |acc, x| { - let mut mid = x.into_iter(); - let r = mid.try_rfold(acc, &mut fold); - *backiter = Some(mid); - r - })?; - } - self.backiter = None; - - if let Some(ref mut front) = self.frontiter { - init = front.try_rfold(init, &mut fold)?; - } - self.frontiter = None; - - Try::from_ok(init) - } - - #[inline] - fn rfold(self, init: Acc, mut fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.frontiter.into_iter() - .chain(self.iter.map(IntoIterator::into_iter)) - .chain(self.backiter) - .rfold(init, |acc, iter| iter.rfold(acc, &mut fold)) - } -} - -/// An iterator that yields `None` forever after the underlying iterator -/// yields `None` once. -/// -/// This `struct` is created by the [`fuse`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`fuse`]: trait.Iterator.html#method.fuse -/// [`Iterator`]: trait.Iterator.html -#[derive(Clone, Debug)] -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -pub struct Fuse { - iter: I, - done: bool -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Fuse where I: Iterator {} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Fuse where I: Iterator { - type Item = ::Item; - - #[inline] - default fn next(&mut self) -> Option<::Item> { - if self.done { - None - } else { - let next = self.iter.next(); - self.done = next.is_none(); - next - } - } - - #[inline] - default fn nth(&mut self, n: usize) -> Option { - if self.done { - None - } else { - let nth = self.iter.nth(n); - self.done = nth.is_none(); - nth - } - } - - #[inline] - default fn last(self) -> Option { - if self.done { - None - } else { - self.iter.last() - } - } - - #[inline] - default fn count(self) -> usize { - if self.done { - 0 - } else { - self.iter.count() - } - } - - #[inline] - default fn size_hint(&self) -> (usize, Option) { - if self.done { - (0, Some(0)) - } else { - self.iter.size_hint() - } - } - - #[inline] - default fn try_fold(&mut self, init: Acc, fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - if self.done { - Try::from_ok(init) - } else { - let acc = self.iter.try_fold(init, fold)?; - self.done = true; - Try::from_ok(acc) - } - } - - #[inline] - default fn fold(self, init: Acc, fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - if self.done { - init - } else { - self.iter.fold(init, fold) - } - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Fuse where I: DoubleEndedIterator { - #[inline] - default fn next_back(&mut self) -> Option<::Item> { - if self.done { - None - } else { - let next = self.iter.next_back(); - self.done = next.is_none(); - next - } - } - - #[inline] - default fn try_rfold(&mut self, init: Acc, fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - if self.done { - Try::from_ok(init) - } else { - let acc = self.iter.try_rfold(init, fold)?; - self.done = true; - Try::from_ok(acc) - } - } - - #[inline] - default fn rfold(self, init: Acc, fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - if self.done { - init - } else { - self.iter.rfold(init, fold) - } - } -} - -unsafe impl TrustedRandomAccess for Fuse - where I: TrustedRandomAccess, -{ - unsafe fn get_unchecked(&mut self, i: usize) -> I::Item { - self.iter.get_unchecked(i) - } - - fn may_have_side_effect() -> bool { - I::may_have_side_effect() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl Iterator for Fuse where I: FusedIterator { - #[inline] - fn next(&mut self) -> Option<::Item> { - self.iter.next() - } - - #[inline] - fn nth(&mut self, n: usize) -> Option { - self.iter.nth(n) - } - - #[inline] - fn last(self) -> Option { - self.iter.last() - } - - #[inline] - fn count(self) -> usize { - self.iter.count() - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - #[inline] - fn try_fold(&mut self, init: Acc, fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - self.iter.try_fold(init, fold) - } - - #[inline] - fn fold(self, init: Acc, fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.fold(init, fold) - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl DoubleEndedIterator for Fuse - where I: DoubleEndedIterator + FusedIterator -{ - #[inline] - fn next_back(&mut self) -> Option<::Item> { - self.iter.next_back() - } - - #[inline] - fn try_rfold(&mut self, init: Acc, fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - self.iter.try_rfold(init, fold) - } - - #[inline] - fn rfold(self, init: Acc, fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - self.iter.rfold(init, fold) - } -} - - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Fuse where I: ExactSizeIterator { - fn len(&self) -> usize { - self.iter.len() - } - - fn is_empty(&self) -> bool { - self.iter.is_empty() - } -} - -/// An iterator that calls a function with a reference to each element before -/// yielding it. -/// -/// This `struct` is created by the [`inspect`] method on [`Iterator`]. See its -/// documentation for more. -/// -/// [`inspect`]: trait.Iterator.html#method.inspect -/// [`Iterator`]: trait.Iterator.html -#[must_use = "iterator adaptors are lazy and do nothing unless consumed"] -#[stable(feature = "rust1", since = "1.0.0")] -#[derive(Clone)] -pub struct Inspect { - iter: I, - f: F, -} - -#[stable(feature = "core_impl_debug", since = "1.9.0")] -impl fmt::Debug for Inspect { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Inspect") - .field("iter", &self.iter) - .finish() - } -} - -impl Inspect where F: FnMut(&I::Item) { - #[inline] - fn do_inspect(&mut self, elt: Option) -> Option { - if let Some(ref a) = elt { - (self.f)(a); - } - - elt - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl Iterator for Inspect where F: FnMut(&I::Item) { - type Item = I::Item; - - #[inline] - fn next(&mut self) -> Option { - let next = self.iter.next(); - self.do_inspect(next) - } - - #[inline] - fn size_hint(&self) -> (usize, Option) { - self.iter.size_hint() - } - - #[inline] - fn try_fold(&mut self, init: Acc, mut fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - let f = &mut self.f; - self.iter.try_fold(init, move |acc, item| { f(&item); fold(acc, item) }) - } - - #[inline] - fn fold(self, init: Acc, mut fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - let mut f = self.f; - self.iter.fold(init, move |acc, item| { f(&item); fold(acc, item) }) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl DoubleEndedIterator for Inspect - where F: FnMut(&I::Item), -{ - #[inline] - fn next_back(&mut self) -> Option { - let next = self.iter.next_back(); - self.do_inspect(next) - } - - #[inline] - fn try_rfold(&mut self, init: Acc, mut fold: Fold) -> R where - Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try - { - let f = &mut self.f; - self.iter.try_rfold(init, move |acc, item| { f(&item); fold(acc, item) }) - } - - #[inline] - fn rfold(self, init: Acc, mut fold: Fold) -> Acc - where Fold: FnMut(Acc, Self::Item) -> Acc, - { - let mut f = self.f; - self.iter.rfold(init, move |acc, item| { f(&item); fold(acc, item) }) - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for Inspect - where F: FnMut(&I::Item) -{ - fn len(&self) -> usize { - self.iter.len() - } - - fn is_empty(&self) -> bool { - self.iter.is_empty() - } -} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for Inspect - where F: FnMut(&I::Item) {} diff --git a/src/libcore/iter/range.rs b/src/libcore/iter/range.rs index f0fd07b43cae0..aefed1890fef8 100644 --- a/src/libcore/iter/range.rs +++ b/src/libcore/iter/range.rs @@ -1,16 +1,6 @@ -// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use convert::TryFrom; use mem; -use ops::{self, Add, Sub}; +use ops::{self, Add, Sub, Try}; use usize; use super::{FusedIterator, TrustedLen}; @@ -30,19 +20,19 @@ pub trait Step: Clone + PartialOrd + Sized { /// without overflow. fn steps_between(start: &Self, end: &Self) -> Option; - /// Replaces this step with `1`, returning itself + /// Replaces this step with `1`, returning itself. fn replace_one(&mut self) -> Self; - /// Replaces this step with `0`, returning itself + /// Replaces this step with `0`, returning itself. fn replace_zero(&mut self) -> Self; - /// Adds one to this step, returning the result + /// Adds one to this step, returning the result. fn add_one(&self) -> Self; - /// Subtracts one to this step, returning the result + /// Subtracts one to this step, returning the result. fn sub_one(&self) -> Self; - /// Add an usize, returning None on overflow + /// Adds a `usize`, returning `None` on overflow. fn add_usize(&self, n: usize) -> Option; } @@ -78,11 +68,9 @@ macro_rules! step_impl_unsigned { issue = "42168")] impl Step for $t { #[inline] - #[allow(trivial_numeric_casts)] fn steps_between(start: &$t, end: &$t) -> Option { if *start < *end { - // Note: We assume $t <= usize here - Some((*end - *start) as usize) + usize::try_from(*end - *start).ok() } else { Some(0) } @@ -108,13 +96,11 @@ macro_rules! step_impl_signed { issue = "42168")] impl Step for $t { #[inline] - #[allow(trivial_numeric_casts)] fn steps_between(start: &$t, end: &$t) -> Option { if *start < *end { - // Note: We assume $t <= isize here - // Use .wrapping_sub and cast to usize to compute the - // difference that may not fit inside the range of isize. - Some((*end as isize).wrapping_sub(*start as isize) as usize) + // Use .wrapping_sub and cast to unsigned to compute the + // difference that may not fit inside the range of $t. + usize::try_from(end.wrapping_sub(*start) as $unsigned).ok() } else { Some(0) } @@ -144,46 +130,9 @@ macro_rules! step_impl_signed { )*) } -macro_rules! step_impl_no_between { - ($($t:ty)*) => ($( - #[unstable(feature = "step_trait", - reason = "likely to be replaced by finer-grained traits", - issue = "42168")] - impl Step for $t { - #[inline] - fn steps_between(_start: &Self, _end: &Self) -> Option { - None - } - - #[inline] - fn add_usize(&self, n: usize) -> Option { - self.checked_add(n as $t) - } - - step_identical_methods!(); - } - )*) -} - -step_impl_unsigned!(usize u8 u16); -#[cfg(not(target_pointer_width = "16"))] -step_impl_unsigned!(u32); -#[cfg(target_pointer_width = "16")] -step_impl_no_between!(u32); +step_impl_unsigned!(usize u8 u16 u32 u64 u128); step_impl_signed!([isize: usize] [i8: u8] [i16: u16]); -#[cfg(not(target_pointer_width = "16"))] -step_impl_signed!([i32: u32]); -#[cfg(target_pointer_width = "16")] -step_impl_no_between!(i32); -#[cfg(target_pointer_width = "64")] -step_impl_unsigned!(u64); -#[cfg(target_pointer_width = "64")] -step_impl_signed!([i64: u64]); -// If the target pointer width is not 64-bits, we -// assume here that it is less than 64-bits. -#[cfg(not(target_pointer_width = "64"))] -step_impl_no_between!(u64 i64); -step_impl_no_between!(u128 i128); +step_impl_signed!([i32: u32] [i64: u64] [i128: u128]); macro_rules! range_exact_iter_impl { ($($t:ty)*) => ($( @@ -239,7 +188,7 @@ impl Iterator for ops::Range { fn size_hint(&self) -> (usize, Option) { match Step::steps_between(&self.start, &self.end) { Some(hint) => (hint, Some(hint)), - None => (0, None) + None => (usize::MAX, None) } } @@ -283,8 +232,8 @@ range_incl_exact_iter_impl!(u8 u16 i8 i16); // // They need to guarantee that .size_hint() is either exact, or that // the upper bound is None when it does not fit the type limits. -range_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 i64 u64); -range_incl_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 i64 u64); +range_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 u64 i64 u128 i128); +range_incl_trusted_len_impl!(usize isize u8 i8 u16 i16 u32 i32 u64 i64 u128 i128); #[stable(feature = "rust1", since = "1.0.0")] impl DoubleEndedIterator for ops::Range { @@ -360,7 +309,7 @@ impl Iterator for ops::RangeInclusive { match Step::steps_between(&self.start, &self.end) { Some(hint) => (hint.saturating_add(1), hint.checked_add(1)), - None => (0, None), + None => (usize::MAX, None), } } @@ -378,11 +327,11 @@ impl Iterator for ops::RangeInclusive { Some(Less) => { self.is_empty = Some(false); self.start = plus_n.add_one(); - return Some(plus_n) + return Some(plus_n); } Some(Equal) => { self.is_empty = Some(true); - return Some(plus_n) + return Some(plus_n); } _ => {} } @@ -392,6 +341,34 @@ impl Iterator for ops::RangeInclusive { None } + #[inline] + fn try_fold(&mut self, init: B, mut f: F) -> R + where + Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try + { + self.compute_is_empty(); + + if self.is_empty() { + return Try::from_ok(init); + } + + let mut accum = init; + + while self.start < self.end { + let n = self.start.add_one(); + let n = mem::replace(&mut self.start, n); + accum = f(accum, n)?; + } + + self.is_empty = Some(true); + + if self.start == self.end { + accum = f(accum, self.start.clone())?; + } + + Try::from_ok(accum) + } + #[inline] fn last(mut self) -> Option { self.next_back() @@ -425,6 +402,33 @@ impl DoubleEndedIterator for ops::RangeInclusive { self.end.clone() }) } + + #[inline] + fn try_rfold(&mut self, init: B, mut f: F) -> R where + Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try + { + self.compute_is_empty(); + + if self.is_empty() { + return Try::from_ok(init); + } + + let mut accum = init; + + while self.start < self.end { + let n = self.end.sub_one(); + let n = mem::replace(&mut self.end, n); + accum = f(accum, n)?; + } + + self.is_empty = Some(true); + + if self.start == self.end { + accum = f(accum, self.start.clone())?; + } + + Try::from_ok(accum) + } } #[stable(feature = "fused", since = "1.26.0")] diff --git a/src/libcore/iter/sources.rs b/src/libcore/iter/sources.rs index f6a4a7a6fa80a..7934e5880d7d2 100644 --- a/src/libcore/iter/sources.rs +++ b/src/libcore/iter/sources.rs @@ -1,13 +1,3 @@ -// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use fmt; use marker; use usize; @@ -49,8 +39,7 @@ unsafe impl TrustedLen for Repeat {} /// Creates a new iterator that endlessly repeats a single element. /// -/// The `repeat()` function repeats a single value over and over and over and -/// over and over and 🔁. +/// The `repeat()` function repeats a single value over and over again. /// /// Infinite iterators like `repeat()` are often used with adapters like /// [`take`], in order to make them finite. @@ -138,8 +127,7 @@ unsafe impl A> TrustedLen for RepeatWith {} /// Creates a new iterator that repeats elements of type `A` endlessly by /// applying the provided closure, the repeater, `F: FnMut() -> A`. /// -/// The `repeat_with()` function calls the repeater over and over and over and -/// over and over and 🔁. +/// The `repeat_with()` function calls the repeater over and over again. /// /// Infinite iterators like `repeat_with()` are often used with adapters like /// [`take`], in order to make them finite. @@ -387,25 +375,136 @@ pub fn once(value: T) -> Once { Once { inner: Some(value).into_iter() } } +/// An iterator that yields a single element of type `A` by +/// applying the provided closure `F: FnOnce() -> A`. +/// +/// This `struct` is created by the [`once_with`] function. +/// See its documentation for more. +/// +/// [`once_with`]: fn.once_with.html +#[derive(Copy, Clone, Debug)] +#[unstable(feature = "iter_once_with", issue = "57581")] +pub struct OnceWith { + gen: Option, +} + +#[unstable(feature = "iter_once_with", issue = "57581")] +impl A> Iterator for OnceWith { + type Item = A; + + #[inline] + fn next(&mut self) -> Option { + self.gen.take().map(|f| f()) + } + + #[inline] + fn size_hint(&self) -> (usize, Option) { + self.gen.iter().size_hint() + } +} + +#[unstable(feature = "iter_once_with", issue = "57581")] +impl A> DoubleEndedIterator for OnceWith { + fn next_back(&mut self) -> Option { + self.next() + } +} + +#[unstable(feature = "iter_once_with", issue = "57581")] +impl A> ExactSizeIterator for OnceWith { + fn len(&self) -> usize { + self.gen.iter().len() + } +} + +#[unstable(feature = "iter_once_with", issue = "57581")] +impl A> FusedIterator for OnceWith {} + +#[unstable(feature = "iter_once_with", issue = "57581")] +unsafe impl A> TrustedLen for OnceWith {} + +/// Creates an iterator that lazily generates a value exactly once by invoking +/// the provided closure. +/// +/// This is commonly used to adapt a single value generator into a [`chain`] of +/// other kinds of iteration. Maybe you have an iterator that covers almost +/// everything, but you need an extra special case. Maybe you have a function +/// which works on iterators, but you only need to process one value. +/// +/// Unlike [`once`], this function will lazily generate the value on request. +/// +/// [`once`]: fn.once.html +/// [`chain`]: trait.Iterator.html#method.chain +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// #![feature(iter_once_with)] +/// +/// use std::iter; +/// +/// // one is the loneliest number +/// let mut one = iter::once_with(|| 1); +/// +/// assert_eq!(Some(1), one.next()); +/// +/// // just one, that's all we get +/// assert_eq!(None, one.next()); +/// ``` +/// +/// Chaining together with another iterator. Let's say that we want to iterate +/// over each file of the `.foo` directory, but also a configuration file, +/// `.foorc`: +/// +/// ```no_run +/// #![feature(iter_once_with)] +/// +/// use std::iter; +/// use std::fs; +/// use std::path::PathBuf; +/// +/// let dirs = fs::read_dir(".foo").unwrap(); +/// +/// // we need to convert from an iterator of DirEntry-s to an iterator of +/// // PathBufs, so we use map +/// let dirs = dirs.map(|file| file.unwrap().path()); +/// +/// // now, our iterator just for our config file +/// let config = iter::once_with(|| PathBuf::from(".foorc")); +/// +/// // chain the two iterators together into one big iterator +/// let files = dirs.chain(config); +/// +/// // this will give us all of the files in .foo as well as .foorc +/// for f in files { +/// println!("{:?}", f); +/// } +/// ``` +#[inline] +#[unstable(feature = "iter_once_with", issue = "57581")] +pub fn once_with A>(gen: F) -> OnceWith { + OnceWith { gen: Some(gen) } +} + /// Creates a new iterator where each iteration calls the provided closure -/// `F: FnMut(&mut St) -> Option`. +/// `F: FnMut() -> Option`. /// /// This allows creating a custom iterator with any behavior /// without using the more verbose syntax of creating a dedicated type /// and implementing the `Iterator` trait for it. /// -/// In addition to its captures and environment, -/// the closure is given a mutable reference to some state -/// that is preserved across iterations. -/// That state starts as the given `initial_state` value. -/// -/// Note that the `Unfold` iterator doesn’t make assumptions about the behavior of the closure, +/// Note that the `FromFn` iterator doesn’t make assumptions about the behavior of the closure, /// and therefore conservatively does not implement [`FusedIterator`], /// or override [`Iterator::size_hint`] from its default `(0, None)`. /// /// [`FusedIterator`]: trait.FusedIterator.html /// [`Iterator::size_hint`]: trait.Iterator.html#method.size_hint /// +/// The closure can use captures and its environment to track state across iterations. Depending on +/// how the iterator is used, this may require specifying the `move` keyword on the closure. +/// /// # Examples /// /// Let’s re-implement the counter iterator from [module-level documentation]: @@ -413,14 +512,14 @@ pub fn once(value: T) -> Once { /// [module-level documentation]: index.html /// /// ``` -/// #![feature(iter_unfold)] -/// let counter = std::iter::unfold(0, |count| { +/// let mut count = 0; +/// let counter = std::iter::from_fn(move || { /// // Increment our count. This is why we started at zero. -/// *count += 1; +/// count += 1; /// /// // Check to see if we've finished counting or not. -/// if *count < 6 { -/// Some(*count) +/// if count < 6 { +/// Some(count) /// } else { /// None /// } @@ -428,47 +527,39 @@ pub fn once(value: T) -> Once { /// assert_eq!(counter.collect::>(), &[1, 2, 3, 4, 5]); /// ``` #[inline] -#[unstable(feature = "iter_unfold", issue = "55977")] -pub fn unfold(initial_state: St, f: F) -> Unfold - where F: FnMut(&mut St) -> Option +#[stable(feature = "iter_from_fn", since = "1.34.0")] +pub fn from_fn(f: F) -> FromFn + where F: FnMut() -> Option { - Unfold { - state: initial_state, - f, - } + FromFn(f) } -/// An iterator where each iteration calls the provided closure `F: FnMut(&mut St) -> Option`. +/// An iterator where each iteration calls the provided closure `F: FnMut() -> Option`. /// -/// This `struct` is created by the [`unfold`] function. +/// This `struct` is created by the [`iter::from_fn`] function. /// See its documentation for more. /// -/// [`unfold`]: fn.unfold.html +/// [`iter::from_fn`]: fn.from_fn.html #[derive(Clone)] -#[unstable(feature = "iter_unfold", issue = "55977")] -pub struct Unfold { - state: St, - f: F, -} +#[stable(feature = "iter_from_fn", since = "1.34.0")] +pub struct FromFn(F); -#[unstable(feature = "iter_unfold", issue = "55977")] -impl Iterator for Unfold - where F: FnMut(&mut St) -> Option +#[stable(feature = "iter_from_fn", since = "1.34.0")] +impl Iterator for FromFn + where F: FnMut() -> Option { type Item = T; #[inline] fn next(&mut self) -> Option { - (self.f)(&mut self.state) + (self.0)() } } -#[unstable(feature = "iter_unfold", issue = "55977")] -impl fmt::Debug for Unfold { +#[stable(feature = "iter_from_fn", since = "1.34.0")] +impl fmt::Debug for FromFn { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Unfold") - .field("state", &self.state) - .finish() + f.debug_struct("FromFn").finish() } } @@ -478,13 +569,12 @@ impl fmt::Debug for Unfold { /// and calls the given `FnMut(&T) -> Option` closure to compute each item’s successor. /// /// ``` -/// #![feature(iter_unfold)] /// use std::iter::successors; /// /// let powers_of_10 = successors(Some(1_u16), |n| n.checked_mul(10)); /// assert_eq!(powers_of_10.collect::>(), &[1, 10, 100, 1_000, 10_000]); /// ``` -#[unstable(feature = "iter_unfold", issue = "55977")] +#[stable(feature = "iter_successors", since = "1.34.0")] pub fn successors(first: Option, succ: F) -> Successors where F: FnMut(&T) -> Option { @@ -504,13 +594,13 @@ pub fn successors(first: Option, succ: F) -> Successors /// /// [`successors`]: fn.successors.html #[derive(Clone)] -#[unstable(feature = "iter_unfold", issue = "55977")] +#[stable(feature = "iter_successors", since = "1.34.0")] pub struct Successors { next: Option, succ: F, } -#[unstable(feature = "iter_unfold", issue = "55977")] +#[stable(feature = "iter_successors", since = "1.34.0")] impl Iterator for Successors where F: FnMut(&T) -> Option { @@ -534,12 +624,12 @@ impl Iterator for Successors } } -#[unstable(feature = "iter_unfold", issue = "55977")] +#[stable(feature = "iter_successors", since = "1.34.0")] impl FusedIterator for Successors where F: FnMut(&T) -> Option {} -#[unstable(feature = "iter_unfold", issue = "55977")] +#[stable(feature = "iter_successors", since = "1.34.0")] impl fmt::Debug for Successors { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Successors") diff --git a/src/libcore/iter/traits.rs b/src/libcore/iter/traits.rs deleted file mode 100644 index 45e5b614db3e0..0000000000000 --- a/src/libcore/iter/traits.rs +++ /dev/null @@ -1,1002 +0,0 @@ -// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -use ops::{Mul, Add, Try}; -use num::Wrapping; - -use super::LoopState; - -/// Conversion from an `Iterator`. -/// -/// By implementing `FromIterator` for a type, you define how it will be -/// created from an iterator. This is common for types which describe a -/// collection of some kind. -/// -/// `FromIterator`'s [`from_iter`] is rarely called explicitly, and is instead -/// used through [`Iterator`]'s [`collect`] method. See [`collect`]'s -/// documentation for more examples. -/// -/// [`from_iter`]: #tymethod.from_iter -/// [`Iterator`]: trait.Iterator.html -/// [`collect`]: trait.Iterator.html#method.collect -/// -/// See also: [`IntoIterator`]. -/// -/// [`IntoIterator`]: trait.IntoIterator.html -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// use std::iter::FromIterator; -/// -/// let five_fives = std::iter::repeat(5).take(5); -/// -/// let v = Vec::from_iter(five_fives); -/// -/// assert_eq!(v, vec![5, 5, 5, 5, 5]); -/// ``` -/// -/// Using [`collect`] to implicitly use `FromIterator`: -/// -/// ``` -/// let five_fives = std::iter::repeat(5).take(5); -/// -/// let v: Vec = five_fives.collect(); -/// -/// assert_eq!(v, vec![5, 5, 5, 5, 5]); -/// ``` -/// -/// Implementing `FromIterator` for your type: -/// -/// ``` -/// use std::iter::FromIterator; -/// -/// // A sample collection, that's just a wrapper over Vec -/// #[derive(Debug)] -/// struct MyCollection(Vec); -/// -/// // Let's give it some methods so we can create one and add things -/// // to it. -/// impl MyCollection { -/// fn new() -> MyCollection { -/// MyCollection(Vec::new()) -/// } -/// -/// fn add(&mut self, elem: i32) { -/// self.0.push(elem); -/// } -/// } -/// -/// // and we'll implement FromIterator -/// impl FromIterator for MyCollection { -/// fn from_iter>(iter: I) -> Self { -/// let mut c = MyCollection::new(); -/// -/// for i in iter { -/// c.add(i); -/// } -/// -/// c -/// } -/// } -/// -/// // Now we can make a new iterator... -/// let iter = (0..5).into_iter(); -/// -/// // ... and make a MyCollection out of it -/// let c = MyCollection::from_iter(iter); -/// -/// assert_eq!(c.0, vec![0, 1, 2, 3, 4]); -/// -/// // collect works too! -/// -/// let iter = (0..5).into_iter(); -/// let c: MyCollection = iter.collect(); -/// -/// assert_eq!(c.0, vec![0, 1, 2, 3, 4]); -/// ``` -#[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented( - message="a collection of type `{Self}` cannot be built from an iterator \ - over elements of type `{A}`", - label="a collection of type `{Self}` cannot be built from `std::iter::Iterator`", -)] -pub trait FromIterator: Sized { - /// Creates a value from an iterator. - /// - /// See the [module-level documentation] for more. - /// - /// [module-level documentation]: index.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use std::iter::FromIterator; - /// - /// let five_fives = std::iter::repeat(5).take(5); - /// - /// let v = Vec::from_iter(five_fives); - /// - /// assert_eq!(v, vec![5, 5, 5, 5, 5]); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - fn from_iter>(iter: T) -> Self; -} - -/// Conversion into an `Iterator`. -/// -/// By implementing `IntoIterator` for a type, you define how it will be -/// converted to an iterator. This is common for types which describe a -/// collection of some kind. -/// -/// One benefit of implementing `IntoIterator` is that your type will [work -/// with Rust's `for` loop syntax](index.html#for-loops-and-intoiterator). -/// -/// See also: [`FromIterator`]. -/// -/// [`FromIterator`]: trait.FromIterator.html -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// let v = vec![1, 2, 3]; -/// let mut iter = v.into_iter(); -/// -/// assert_eq!(Some(1), iter.next()); -/// assert_eq!(Some(2), iter.next()); -/// assert_eq!(Some(3), iter.next()); -/// assert_eq!(None, iter.next()); -/// ``` -/// Implementing `IntoIterator` for your type: -/// -/// ``` -/// // A sample collection, that's just a wrapper over Vec -/// #[derive(Debug)] -/// struct MyCollection(Vec); -/// -/// // Let's give it some methods so we can create one and add things -/// // to it. -/// impl MyCollection { -/// fn new() -> MyCollection { -/// MyCollection(Vec::new()) -/// } -/// -/// fn add(&mut self, elem: i32) { -/// self.0.push(elem); -/// } -/// } -/// -/// // and we'll implement IntoIterator -/// impl IntoIterator for MyCollection { -/// type Item = i32; -/// type IntoIter = ::std::vec::IntoIter; -/// -/// fn into_iter(self) -> Self::IntoIter { -/// self.0.into_iter() -/// } -/// } -/// -/// // Now we can make a new collection... -/// let mut c = MyCollection::new(); -/// -/// // ... add some stuff to it ... -/// c.add(0); -/// c.add(1); -/// c.add(2); -/// -/// // ... and then turn it into an Iterator: -/// for (i, n) in c.into_iter().enumerate() { -/// assert_eq!(i as i32, n); -/// } -/// ``` -/// -/// It is common to use `IntoIterator` as a trait bound. This allows -/// the input collection type to change, so long as it is still an -/// iterator. Additional bounds can be specified by restricting on -/// `Item`: -/// -/// ```rust -/// fn collect_as_strings(collection: T) -> Vec -/// where T: IntoIterator, -/// T::Item : std::fmt::Debug, -/// { -/// collection -/// .into_iter() -/// .map(|item| format!("{:?}", item)) -/// .collect() -/// } -/// ``` -#[stable(feature = "rust1", since = "1.0.0")] -pub trait IntoIterator { - /// The type of the elements being iterated over. - #[stable(feature = "rust1", since = "1.0.0")] - type Item; - - /// Which kind of iterator are we turning this into? - #[stable(feature = "rust1", since = "1.0.0")] - type IntoIter: Iterator; - - /// Creates an iterator from a value. - /// - /// See the [module-level documentation] for more. - /// - /// [module-level documentation]: index.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let v = vec![1, 2, 3]; - /// let mut iter = v.into_iter(); - /// - /// assert_eq!(Some(1), iter.next()); - /// assert_eq!(Some(2), iter.next()); - /// assert_eq!(Some(3), iter.next()); - /// assert_eq!(None, iter.next()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - fn into_iter(self) -> Self::IntoIter; -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl IntoIterator for I { - type Item = I::Item; - type IntoIter = I; - - fn into_iter(self) -> I { - self - } -} - -/// Extend a collection with the contents of an iterator. -/// -/// Iterators produce a series of values, and collections can also be thought -/// of as a series of values. The `Extend` trait bridges this gap, allowing you -/// to extend a collection by including the contents of that iterator. When -/// extending a collection with an already existing key, that entry is updated -/// or, in the case of collections that permit multiple entries with equal -/// keys, that entry is inserted. -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// // You can extend a String with some chars: -/// let mut message = String::from("The first three letters are: "); -/// -/// message.extend(&['a', 'b', 'c']); -/// -/// assert_eq!("abc", &message[29..32]); -/// ``` -/// -/// Implementing `Extend`: -/// -/// ``` -/// // A sample collection, that's just a wrapper over Vec -/// #[derive(Debug)] -/// struct MyCollection(Vec); -/// -/// // Let's give it some methods so we can create one and add things -/// // to it. -/// impl MyCollection { -/// fn new() -> MyCollection { -/// MyCollection(Vec::new()) -/// } -/// -/// fn add(&mut self, elem: i32) { -/// self.0.push(elem); -/// } -/// } -/// -/// // since MyCollection has a list of i32s, we implement Extend for i32 -/// impl Extend for MyCollection { -/// -/// // This is a bit simpler with the concrete type signature: we can call -/// // extend on anything which can be turned into an Iterator which gives -/// // us i32s. Because we need i32s to put into MyCollection. -/// fn extend>(&mut self, iter: T) { -/// -/// // The implementation is very straightforward: loop through the -/// // iterator, and add() each element to ourselves. -/// for elem in iter { -/// self.add(elem); -/// } -/// } -/// } -/// -/// let mut c = MyCollection::new(); -/// -/// c.add(5); -/// c.add(6); -/// c.add(7); -/// -/// // let's extend our collection with three more numbers -/// c.extend(vec![1, 2, 3]); -/// -/// // we've added these elements onto the end -/// assert_eq!("MyCollection([5, 6, 7, 1, 2, 3])", format!("{:?}", c)); -/// ``` -#[stable(feature = "rust1", since = "1.0.0")] -pub trait Extend { - /// Extends a collection with the contents of an iterator. - /// - /// As this is the only method for this trait, the [trait-level] docs - /// contain more details. - /// - /// [trait-level]: trait.Extend.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// // You can extend a String with some chars: - /// let mut message = String::from("abc"); - /// - /// message.extend(['d', 'e', 'f'].iter()); - /// - /// assert_eq!("abcdef", &message); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - fn extend>(&mut self, iter: T); -} - -#[stable(feature = "extend_for_unit", since = "1.28.0")] -impl Extend<()> for () { - fn extend>(&mut self, iter: T) { - iter.into_iter().for_each(drop) - } -} - -/// An iterator able to yield elements from both ends. -/// -/// Something that implements `DoubleEndedIterator` has one extra capability -/// over something that implements [`Iterator`]: the ability to also take -/// `Item`s from the back, as well as the front. -/// -/// It is important to note that both back and forth work on the same range, -/// and do not cross: iteration is over when they meet in the middle. -/// -/// In a similar fashion to the [`Iterator`] protocol, once a -/// `DoubleEndedIterator` returns `None` from a `next_back()`, calling it again -/// may or may not ever return `Some` again. `next()` and `next_back()` are -/// interchangeable for this purpose. -/// -/// [`Iterator`]: trait.Iterator.html -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// let numbers = vec![1, 2, 3, 4, 5, 6]; -/// -/// let mut iter = numbers.iter(); -/// -/// assert_eq!(Some(&1), iter.next()); -/// assert_eq!(Some(&6), iter.next_back()); -/// assert_eq!(Some(&5), iter.next_back()); -/// assert_eq!(Some(&2), iter.next()); -/// assert_eq!(Some(&3), iter.next()); -/// assert_eq!(Some(&4), iter.next()); -/// assert_eq!(None, iter.next()); -/// assert_eq!(None, iter.next_back()); -/// ``` -#[stable(feature = "rust1", since = "1.0.0")] -pub trait DoubleEndedIterator: Iterator { - /// Removes and returns an element from the end of the iterator. - /// - /// Returns `None` when there are no more elements. - /// - /// The [trait-level] docs contain more details. - /// - /// [trait-level]: trait.DoubleEndedIterator.html - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let numbers = vec![1, 2, 3, 4, 5, 6]; - /// - /// let mut iter = numbers.iter(); - /// - /// assert_eq!(Some(&1), iter.next()); - /// assert_eq!(Some(&6), iter.next_back()); - /// assert_eq!(Some(&5), iter.next_back()); - /// assert_eq!(Some(&2), iter.next()); - /// assert_eq!(Some(&3), iter.next()); - /// assert_eq!(Some(&4), iter.next()); - /// assert_eq!(None, iter.next()); - /// assert_eq!(None, iter.next_back()); - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - fn next_back(&mut self) -> Option; - - /// This is the reverse version of [`try_fold()`]: it takes elements - /// starting from the back of the iterator. - /// - /// [`try_fold()`]: trait.Iterator.html#method.try_fold - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = ["1", "2", "3"]; - /// let sum = a.iter() - /// .map(|&s| s.parse::()) - /// .try_rfold(0, |acc, x| x.and_then(|y| Ok(acc + y))); - /// assert_eq!(sum, Ok(6)); - /// ``` - /// - /// Short-circuiting: - /// - /// ``` - /// let a = ["1", "rust", "3"]; - /// let mut it = a.iter(); - /// let sum = it - /// .by_ref() - /// .map(|&s| s.parse::()) - /// .try_rfold(0, |acc, x| x.and_then(|y| Ok(acc + y))); - /// assert!(sum.is_err()); - /// - /// // Because it short-circuited, the remaining elements are still - /// // available through the iterator. - /// assert_eq!(it.next_back(), Some(&"1")); - /// ``` - #[inline] - #[stable(feature = "iterator_try_fold", since = "1.27.0")] - fn try_rfold(&mut self, init: B, mut f: F) -> R where - Self: Sized, F: FnMut(B, Self::Item) -> R, R: Try - { - let mut accum = init; - while let Some(x) = self.next_back() { - accum = f(accum, x)?; - } - Try::from_ok(accum) - } - - /// An iterator method that reduces the iterator's elements to a single, - /// final value, starting from the back. - /// - /// This is the reverse version of [`fold()`]: it takes elements starting from - /// the back of the iterator. - /// - /// `rfold()` takes two arguments: an initial value, and a closure with two - /// arguments: an 'accumulator', and an element. The closure returns the value that - /// the accumulator should have for the next iteration. - /// - /// The initial value is the value the accumulator will have on the first - /// call. - /// - /// After applying this closure to every element of the iterator, `rfold()` - /// returns the accumulator. - /// - /// This operation is sometimes called 'reduce' or 'inject'. - /// - /// Folding is useful whenever you have a collection of something, and want - /// to produce a single value from it. - /// - /// [`fold()`]: trait.Iterator.html#method.fold - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// // the sum of all of the elements of a - /// let sum = a.iter() - /// .rfold(0, |acc, &x| acc + x); - /// - /// assert_eq!(sum, 6); - /// ``` - /// - /// This example builds a string, starting with an initial value - /// and continuing with each element from the back until the front: - /// - /// ``` - /// let numbers = [1, 2, 3, 4, 5]; - /// - /// let zero = "0".to_string(); - /// - /// let result = numbers.iter().rfold(zero, |acc, &x| { - /// format!("({} + {})", x, acc) - /// }); - /// - /// assert_eq!(result, "(1 + (2 + (3 + (4 + (5 + 0)))))"); - /// ``` - #[inline] - #[stable(feature = "iter_rfold", since = "1.27.0")] - fn rfold(mut self, accum: B, mut f: F) -> B where - Self: Sized, F: FnMut(B, Self::Item) -> B, - { - self.try_rfold(accum, move |acc, x| Ok::(f(acc, x))).unwrap() - } - - /// Searches for an element of an iterator from the back that satisfies a predicate. - /// - /// `rfind()` takes a closure that returns `true` or `false`. It applies - /// this closure to each element of the iterator, starting at the end, and if any - /// of them return `true`, then `rfind()` returns [`Some(element)`]. If they all return - /// `false`, it returns [`None`]. - /// - /// `rfind()` is short-circuiting; in other words, it will stop processing - /// as soon as the closure returns `true`. - /// - /// Because `rfind()` takes a reference, and many iterators iterate over - /// references, this leads to a possibly confusing situation where the - /// argument is a double reference. You can see this effect in the - /// examples below, with `&&x`. - /// - /// [`Some(element)`]: ../../std/option/enum.Option.html#variant.Some - /// [`None`]: ../../std/option/enum.Option.html#variant.None - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// assert_eq!(a.iter().rfind(|&&x| x == 2), Some(&2)); - /// - /// assert_eq!(a.iter().rfind(|&&x| x == 5), None); - /// ``` - /// - /// Stopping at the first `true`: - /// - /// ``` - /// let a = [1, 2, 3]; - /// - /// let mut iter = a.iter(); - /// - /// assert_eq!(iter.rfind(|&&x| x == 2), Some(&2)); - /// - /// // we can still use `iter`, as there are more elements. - /// assert_eq!(iter.next_back(), Some(&1)); - /// ``` - #[inline] - #[stable(feature = "iter_rfind", since = "1.27.0")] - fn rfind

(&mut self, mut predicate: P) -> Option where - Self: Sized, - P: FnMut(&Self::Item) -> bool - { - self.try_rfold((), move |(), x| { - if predicate(&x) { LoopState::Break(x) } - else { LoopState::Continue(()) } - }).break_value() - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl<'a, I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for &'a mut I { - fn next_back(&mut self) -> Option { (**self).next_back() } -} - -/// An iterator that knows its exact length. -/// -/// Many [`Iterator`]s don't know how many times they will iterate, but some do. -/// If an iterator knows how many times it can iterate, providing access to -/// that information can be useful. For example, if you want to iterate -/// backwards, a good start is to know where the end is. -/// -/// When implementing an `ExactSizeIterator`, you must also implement -/// [`Iterator`]. When doing so, the implementation of [`size_hint`] *must* -/// return the exact size of the iterator. -/// -/// [`Iterator`]: trait.Iterator.html -/// [`size_hint`]: trait.Iterator.html#method.size_hint -/// -/// The [`len`] method has a default implementation, so you usually shouldn't -/// implement it. However, you may be able to provide a more performant -/// implementation than the default, so overriding it in this case makes sense. -/// -/// [`len`]: #method.len -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// // a finite range knows exactly how many times it will iterate -/// let five = 0..5; -/// -/// assert_eq!(5, five.len()); -/// ``` -/// -/// In the [module level docs][moddocs], we implemented an [`Iterator`], -/// `Counter`. Let's implement `ExactSizeIterator` for it as well: -/// -/// [moddocs]: index.html -/// -/// ``` -/// # struct Counter { -/// # count: usize, -/// # } -/// # impl Counter { -/// # fn new() -> Counter { -/// # Counter { count: 0 } -/// # } -/// # } -/// # impl Iterator for Counter { -/// # type Item = usize; -/// # fn next(&mut self) -> Option { -/// # self.count += 1; -/// # if self.count < 6 { -/// # Some(self.count) -/// # } else { -/// # None -/// # } -/// # } -/// # } -/// impl ExactSizeIterator for Counter { -/// // We can easily calculate the remaining number of iterations. -/// fn len(&self) -> usize { -/// 5 - self.count -/// } -/// } -/// -/// // And now we can use it! -/// -/// let counter = Counter::new(); -/// -/// assert_eq!(5, counter.len()); -/// ``` -#[stable(feature = "rust1", since = "1.0.0")] -pub trait ExactSizeIterator: Iterator { - /// Returns the exact number of times the iterator will iterate. - /// - /// This method has a default implementation, so you usually should not - /// implement it directly. However, if you can provide a more efficient - /// implementation, you can do so. See the [trait-level] docs for an - /// example. - /// - /// This function has the same safety guarantees as the [`size_hint`] - /// function. - /// - /// [trait-level]: trait.ExactSizeIterator.html - /// [`size_hint`]: trait.Iterator.html#method.size_hint - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// // a finite range knows exactly how many times it will iterate - /// let five = 0..5; - /// - /// assert_eq!(5, five.len()); - /// ``` - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - fn len(&self) -> usize { - let (lower, upper) = self.size_hint(); - // Note: This assertion is overly defensive, but it checks the invariant - // guaranteed by the trait. If this trait were rust-internal, - // we could use debug_assert!; assert_eq! will check all Rust user - // implementations too. - assert_eq!(upper, Some(lower)); - lower - } - - /// Returns whether the iterator is empty. - /// - /// This method has a default implementation using `self.len()`, so you - /// don't need to implement it yourself. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// #![feature(exact_size_is_empty)] - /// - /// let mut one_element = std::iter::once(0); - /// assert!(!one_element.is_empty()); - /// - /// assert_eq!(one_element.next(), Some(0)); - /// assert!(one_element.is_empty()); - /// - /// assert_eq!(one_element.next(), None); - /// ``` - #[inline] - #[unstable(feature = "exact_size_is_empty", issue = "35428")] - fn is_empty(&self) -> bool { - self.len() == 0 - } -} - -#[stable(feature = "rust1", since = "1.0.0")] -impl ExactSizeIterator for &mut I { - fn len(&self) -> usize { - (**self).len() - } - fn is_empty(&self) -> bool { - (**self).is_empty() - } -} - -/// Trait to represent types that can be created by summing up an iterator. -/// -/// This trait is used to implement the [`sum`] method on iterators. Types which -/// implement the trait can be generated by the [`sum`] method. Like -/// [`FromIterator`] this trait should rarely be called directly and instead -/// interacted with through [`Iterator::sum`]. -/// -/// [`sum`]: ../../std/iter/trait.Sum.html#tymethod.sum -/// [`FromIterator`]: ../../std/iter/trait.FromIterator.html -/// [`Iterator::sum`]: ../../std/iter/trait.Iterator.html#method.sum -#[stable(feature = "iter_arith_traits", since = "1.12.0")] -pub trait Sum: Sized { - /// Method which takes an iterator and generates `Self` from the elements by - /// "summing up" the items. - #[stable(feature = "iter_arith_traits", since = "1.12.0")] - fn sum>(iter: I) -> Self; -} - -/// Trait to represent types that can be created by multiplying elements of an -/// iterator. -/// -/// This trait is used to implement the [`product`] method on iterators. Types -/// which implement the trait can be generated by the [`product`] method. Like -/// [`FromIterator`] this trait should rarely be called directly and instead -/// interacted with through [`Iterator::product`]. -/// -/// [`product`]: ../../std/iter/trait.Product.html#tymethod.product -/// [`FromIterator`]: ../../std/iter/trait.FromIterator.html -/// [`Iterator::product`]: ../../std/iter/trait.Iterator.html#method.product -#[stable(feature = "iter_arith_traits", since = "1.12.0")] -pub trait Product: Sized { - /// Method which takes an iterator and generates `Self` from the elements by - /// multiplying the items. - #[stable(feature = "iter_arith_traits", since = "1.12.0")] - fn product>(iter: I) -> Self; -} - -// N.B., explicitly use Add and Mul here to inherit overflow checks -macro_rules! integer_sum_product { - (@impls $zero:expr, $one:expr, #[$attr:meta], $($a:ty)*) => ($( - #[$attr] - impl Sum for $a { - fn sum>(iter: I) -> $a { - iter.fold($zero, Add::add) - } - } - - #[$attr] - impl Product for $a { - fn product>(iter: I) -> $a { - iter.fold($one, Mul::mul) - } - } - - #[$attr] - impl<'a> Sum<&'a $a> for $a { - fn sum>(iter: I) -> $a { - iter.fold($zero, Add::add) - } - } - - #[$attr] - impl<'a> Product<&'a $a> for $a { - fn product>(iter: I) -> $a { - iter.fold($one, Mul::mul) - } - } - )*); - ($($a:ty)*) => ( - integer_sum_product!(@impls 0, 1, - #[stable(feature = "iter_arith_traits", since = "1.12.0")], - $($a)+); - integer_sum_product!(@impls Wrapping(0), Wrapping(1), - #[stable(feature = "wrapping_iter_arith", since = "1.14.0")], - $(Wrapping<$a>)+); - ); -} - -macro_rules! float_sum_product { - ($($a:ident)*) => ($( - #[stable(feature = "iter_arith_traits", since = "1.12.0")] - impl Sum for $a { - fn sum>(iter: I) -> $a { - iter.fold(0.0, |a, b| a + b) - } - } - - #[stable(feature = "iter_arith_traits", since = "1.12.0")] - impl Product for $a { - fn product>(iter: I) -> $a { - iter.fold(1.0, |a, b| a * b) - } - } - - #[stable(feature = "iter_arith_traits", since = "1.12.0")] - impl<'a> Sum<&'a $a> for $a { - fn sum>(iter: I) -> $a { - iter.fold(0.0, |a, b| a + *b) - } - } - - #[stable(feature = "iter_arith_traits", since = "1.12.0")] - impl<'a> Product<&'a $a> for $a { - fn product>(iter: I) -> $a { - iter.fold(1.0, |a, b| a * *b) - } - } - )*) -} - -integer_sum_product! { i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize } -float_sum_product! { f32 f64 } - -/// An iterator adapter that produces output as long as the underlying -/// iterator produces `Result::Ok` values. -/// -/// If an error is encountered, the iterator stops and the error is -/// stored. The error may be recovered later via `reconstruct`. -struct ResultShunt { - iter: I, - error: Option, -} - -impl ResultShunt - where I: Iterator> -{ - /// Process the given iterator as if it yielded a `T` instead of a - /// `Result`. Any errors will stop the inner iterator and - /// the overall result will be an error. - pub fn process(iter: I, mut f: F) -> Result - where F: FnMut(&mut Self) -> U - { - let mut shunt = ResultShunt::new(iter); - let value = f(shunt.by_ref()); - shunt.reconstruct(value) - } - - fn new(iter: I) -> Self { - ResultShunt { - iter, - error: None, - } - } - - /// Consume the adapter and rebuild a `Result` value. This should - /// *always* be called, otherwise any potential error would be - /// lost. - fn reconstruct(self, val: U) -> Result { - match self.error { - None => Ok(val), - Some(e) => Err(e), - } - } -} - -impl Iterator for ResultShunt - where I: Iterator> -{ - type Item = T; - - fn next(&mut self) -> Option { - match self.iter.next() { - Some(Ok(v)) => Some(v), - Some(Err(e)) => { - self.error = Some(e); - None - } - None => None, - } - } - - fn size_hint(&self) -> (usize, Option) { - if self.error.is_some() { - (0, Some(0)) - } else { - let (_, upper) = self.iter.size_hint(); - (0, upper) - } - } -} - -#[stable(feature = "iter_arith_traits_result", since="1.16.0")] -impl Sum> for Result - where T: Sum, -{ - /// Takes each element in the `Iterator`: if it is an `Err`, no further - /// elements are taken, and the `Err` is returned. Should no `Err` occur, - /// the sum of all elements is returned. - /// - /// # Examples - /// - /// This sums up every integer in a vector, rejecting the sum if a negative - /// element is encountered: - /// - /// ``` - /// let v = vec![1, 2]; - /// let res: Result = v.iter().map(|&x: &i32| - /// if x < 0 { Err("Negative element found") } - /// else { Ok(x) } - /// ).sum(); - /// assert_eq!(res, Ok(3)); - /// ``` - fn sum(iter: I) -> Result - where I: Iterator>, - { - ResultShunt::process(iter, |i| i.sum()) - } -} - -#[stable(feature = "iter_arith_traits_result", since="1.16.0")] -impl Product> for Result - where T: Product, -{ - /// Takes each element in the `Iterator`: if it is an `Err`, no further - /// elements are taken, and the `Err` is returned. Should no `Err` occur, - /// the product of all elements is returned. - fn product(iter: I) -> Result - where I: Iterator>, - { - ResultShunt::process(iter, |i| i.product()) - } -} - -/// An iterator that always continues to yield `None` when exhausted. -/// -/// Calling next on a fused iterator that has returned `None` once is guaranteed -/// to return [`None`] again. This trait should be implemented by all iterators -/// that behave this way because it allows optimizing [`Iterator::fuse`]. -/// -/// Note: In general, you should not use `FusedIterator` in generic bounds if -/// you need a fused iterator. Instead, you should just call [`Iterator::fuse`] -/// on the iterator. If the iterator is already fused, the additional [`Fuse`] -/// wrapper will be a no-op with no performance penalty. -/// -/// [`None`]: ../../std/option/enum.Option.html#variant.None -/// [`Iterator::fuse`]: ../../std/iter/trait.Iterator.html#method.fuse -/// [`Fuse`]: ../../std/iter/struct.Fuse.html -#[stable(feature = "fused", since = "1.26.0")] -pub trait FusedIterator: Iterator {} - -#[stable(feature = "fused", since = "1.26.0")] -impl FusedIterator for &mut I {} - -/// An iterator that reports an accurate length using size_hint. -/// -/// The iterator reports a size hint where it is either exact -/// (lower bound is equal to upper bound), or the upper bound is [`None`]. -/// The upper bound must only be [`None`] if the actual iterator length is -/// larger than [`usize::MAX`]. In that case, the lower bound must be -/// [`usize::MAX`], resulting in a [`.size_hint`] of `(usize::MAX, None)`. -/// -/// The iterator must produce exactly the number of elements it reported -/// or diverge before reaching the end. -/// -/// # Safety -/// -/// This trait must only be implemented when the contract is upheld. -/// Consumers of this trait must inspect [`.size_hint`]’s upper bound. -/// -/// [`None`]: ../../std/option/enum.Option.html#variant.None -/// [`usize::MAX`]: ../../std/usize/constant.MAX.html -/// [`.size_hint`]: ../../std/iter/trait.Iterator.html#method.size_hint -#[unstable(feature = "trusted_len", issue = "37572")] -pub unsafe trait TrustedLen : Iterator {} - -#[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl TrustedLen for &mut I {} diff --git a/src/libcore/iter/traits/accum.rs b/src/libcore/iter/traits/accum.rs new file mode 100644 index 0000000000000..dfe1d2a1006d7 --- /dev/null +++ b/src/libcore/iter/traits/accum.rs @@ -0,0 +1,225 @@ +use ops::{Mul, Add}; +use num::Wrapping; + +/// Trait to represent types that can be created by summing up an iterator. +/// +/// This trait is used to implement the [`sum`] method on iterators. Types which +/// implement the trait can be generated by the [`sum`] method. Like +/// [`FromIterator`] this trait should rarely be called directly and instead +/// interacted with through [`Iterator::sum`]. +/// +/// [`sum`]: ../../std/iter/trait.Sum.html#tymethod.sum +/// [`FromIterator`]: ../../std/iter/trait.FromIterator.html +/// [`Iterator::sum`]: ../../std/iter/trait.Iterator.html#method.sum +#[stable(feature = "iter_arith_traits", since = "1.12.0")] +pub trait Sum: Sized { + /// Method which takes an iterator and generates `Self` from the elements by + /// "summing up" the items. + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + fn sum>(iter: I) -> Self; +} + +/// Trait to represent types that can be created by multiplying elements of an +/// iterator. +/// +/// This trait is used to implement the [`product`] method on iterators. Types +/// which implement the trait can be generated by the [`product`] method. Like +/// [`FromIterator`] this trait should rarely be called directly and instead +/// interacted with through [`Iterator::product`]. +/// +/// [`product`]: ../../std/iter/trait.Product.html#tymethod.product +/// [`FromIterator`]: ../../std/iter/trait.FromIterator.html +/// [`Iterator::product`]: ../../std/iter/trait.Iterator.html#method.product +#[stable(feature = "iter_arith_traits", since = "1.12.0")] +pub trait Product: Sized { + /// Method which takes an iterator and generates `Self` from the elements by + /// multiplying the items. + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + fn product>(iter: I) -> Self; +} + +// N.B., explicitly use Add and Mul here to inherit overflow checks +macro_rules! integer_sum_product { + (@impls $zero:expr, $one:expr, #[$attr:meta], $($a:ty)*) => ($( + #[$attr] + impl Sum for $a { + fn sum>(iter: I) -> $a { + iter.fold($zero, Add::add) + } + } + + #[$attr] + impl Product for $a { + fn product>(iter: I) -> $a { + iter.fold($one, Mul::mul) + } + } + + #[$attr] + impl<'a> Sum<&'a $a> for $a { + fn sum>(iter: I) -> $a { + iter.fold($zero, Add::add) + } + } + + #[$attr] + impl<'a> Product<&'a $a> for $a { + fn product>(iter: I) -> $a { + iter.fold($one, Mul::mul) + } + } + )*); + ($($a:ty)*) => ( + integer_sum_product!(@impls 0, 1, + #[stable(feature = "iter_arith_traits", since = "1.12.0")], + $($a)+); + integer_sum_product!(@impls Wrapping(0), Wrapping(1), + #[stable(feature = "wrapping_iter_arith", since = "1.14.0")], + $(Wrapping<$a>)+); + ); +} + +macro_rules! float_sum_product { + ($($a:ident)*) => ($( + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + impl Sum for $a { + fn sum>(iter: I) -> $a { + iter.fold(0.0, |a, b| a + b) + } + } + + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + impl Product for $a { + fn product>(iter: I) -> $a { + iter.fold(1.0, |a, b| a * b) + } + } + + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + impl<'a> Sum<&'a $a> for $a { + fn sum>(iter: I) -> $a { + iter.fold(0.0, |a, b| a + *b) + } + } + + #[stable(feature = "iter_arith_traits", since = "1.12.0")] + impl<'a> Product<&'a $a> for $a { + fn product>(iter: I) -> $a { + iter.fold(1.0, |a, b| a * *b) + } + } + )*) +} + +integer_sum_product! { i8 i16 i32 i64 i128 isize u8 u16 u32 u64 u128 usize } +float_sum_product! { f32 f64 } + +/// An iterator adapter that produces output as long as the underlying +/// iterator produces `Result::Ok` values. +/// +/// If an error is encountered, the iterator stops and the error is +/// stored. The error may be recovered later via `reconstruct`. +struct ResultShunt { + iter: I, + error: Option, +} + +impl ResultShunt + where I: Iterator> +{ + /// Process the given iterator as if it yielded a `T` instead of a + /// `Result`. Any errors will stop the inner iterator and + /// the overall result will be an error. + pub fn process(iter: I, mut f: F) -> Result + where F: FnMut(&mut Self) -> U + { + let mut shunt = ResultShunt::new(iter); + let value = f(shunt.by_ref()); + shunt.reconstruct(value) + } + + fn new(iter: I) -> Self { + ResultShunt { + iter, + error: None, + } + } + + /// Consume the adapter and rebuild a `Result` value. This should + /// *always* be called, otherwise any potential error would be + /// lost. + fn reconstruct(self, val: U) -> Result { + match self.error { + None => Ok(val), + Some(e) => Err(e), + } + } +} + +impl Iterator for ResultShunt + where I: Iterator> +{ + type Item = T; + + fn next(&mut self) -> Option { + match self.iter.next() { + Some(Ok(v)) => Some(v), + Some(Err(e)) => { + self.error = Some(e); + None + } + None => None, + } + } + + fn size_hint(&self) -> (usize, Option) { + if self.error.is_some() { + (0, Some(0)) + } else { + let (_, upper) = self.iter.size_hint(); + (0, upper) + } + } +} + +#[stable(feature = "iter_arith_traits_result", since="1.16.0")] +impl Sum> for Result + where T: Sum, +{ + /// Takes each element in the `Iterator`: if it is an `Err`, no further + /// elements are taken, and the `Err` is returned. Should no `Err` occur, + /// the sum of all elements is returned. + /// + /// # Examples + /// + /// This sums up every integer in a vector, rejecting the sum if a negative + /// element is encountered: + /// + /// ``` + /// let v = vec![1, 2]; + /// let res: Result = v.iter().map(|&x: &i32| + /// if x < 0 { Err("Negative element found") } + /// else { Ok(x) } + /// ).sum(); + /// assert_eq!(res, Ok(3)); + /// ``` + fn sum(iter: I) -> Result + where I: Iterator>, + { + ResultShunt::process(iter, |i| i.sum()) + } +} + +#[stable(feature = "iter_arith_traits_result", since="1.16.0")] +impl Product> for Result + where T: Product, +{ + /// Takes each element in the `Iterator`: if it is an `Err`, no further + /// elements are taken, and the `Err` is returned. Should no `Err` occur, + /// the product of all elements is returned. + fn product(iter: I) -> Result + where I: Iterator>, + { + ResultShunt::process(iter, |i| i.product()) + } +} diff --git a/src/libcore/iter/traits/collect.rs b/src/libcore/iter/traits/collect.rs new file mode 100644 index 0000000000000..cd61ab5c552b6 --- /dev/null +++ b/src/libcore/iter/traits/collect.rs @@ -0,0 +1,349 @@ +/// Conversion from an `Iterator`. +/// +/// By implementing `FromIterator` for a type, you define how it will be +/// created from an iterator. This is common for types which describe a +/// collection of some kind. +/// +/// `FromIterator`'s [`from_iter`] is rarely called explicitly, and is instead +/// used through [`Iterator`]'s [`collect`] method. See [`collect`]'s +/// documentation for more examples. +/// +/// [`from_iter`]: #tymethod.from_iter +/// [`Iterator`]: trait.Iterator.html +/// [`collect`]: trait.Iterator.html#method.collect +/// +/// See also: [`IntoIterator`]. +/// +/// [`IntoIterator`]: trait.IntoIterator.html +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// use std::iter::FromIterator; +/// +/// let five_fives = std::iter::repeat(5).take(5); +/// +/// let v = Vec::from_iter(five_fives); +/// +/// assert_eq!(v, vec![5, 5, 5, 5, 5]); +/// ``` +/// +/// Using [`collect`] to implicitly use `FromIterator`: +/// +/// ``` +/// let five_fives = std::iter::repeat(5).take(5); +/// +/// let v: Vec = five_fives.collect(); +/// +/// assert_eq!(v, vec![5, 5, 5, 5, 5]); +/// ``` +/// +/// Implementing `FromIterator` for your type: +/// +/// ``` +/// use std::iter::FromIterator; +/// +/// // A sample collection, that's just a wrapper over Vec +/// #[derive(Debug)] +/// struct MyCollection(Vec); +/// +/// // Let's give it some methods so we can create one and add things +/// // to it. +/// impl MyCollection { +/// fn new() -> MyCollection { +/// MyCollection(Vec::new()) +/// } +/// +/// fn add(&mut self, elem: i32) { +/// self.0.push(elem); +/// } +/// } +/// +/// // and we'll implement FromIterator +/// impl FromIterator for MyCollection { +/// fn from_iter>(iter: I) -> Self { +/// let mut c = MyCollection::new(); +/// +/// for i in iter { +/// c.add(i); +/// } +/// +/// c +/// } +/// } +/// +/// // Now we can make a new iterator... +/// let iter = (0..5).into_iter(); +/// +/// // ... and make a MyCollection out of it +/// let c = MyCollection::from_iter(iter); +/// +/// assert_eq!(c.0, vec![0, 1, 2, 3, 4]); +/// +/// // collect works too! +/// +/// let iter = (0..5).into_iter(); +/// let c: MyCollection = iter.collect(); +/// +/// assert_eq!(c.0, vec![0, 1, 2, 3, 4]); +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +#[rustc_on_unimplemented( + message="a collection of type `{Self}` cannot be built from an iterator \ + over elements of type `{A}`", + label="a collection of type `{Self}` cannot be built from `std::iter::Iterator`", +)] +pub trait FromIterator: Sized { + /// Creates a value from an iterator. + /// + /// See the [module-level documentation] for more. + /// + /// [module-level documentation]: index.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// use std::iter::FromIterator; + /// + /// let five_fives = std::iter::repeat(5).take(5); + /// + /// let v = Vec::from_iter(five_fives); + /// + /// assert_eq!(v, vec![5, 5, 5, 5, 5]); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + fn from_iter>(iter: T) -> Self; +} + +/// Conversion into an `Iterator`. +/// +/// By implementing `IntoIterator` for a type, you define how it will be +/// converted to an iterator. This is common for types which describe a +/// collection of some kind. +/// +/// One benefit of implementing `IntoIterator` is that your type will [work +/// with Rust's `for` loop syntax](index.html#for-loops-and-intoiterator). +/// +/// See also: [`FromIterator`]. +/// +/// [`FromIterator`]: trait.FromIterator.html +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// let v = vec![1, 2, 3]; +/// let mut iter = v.into_iter(); +/// +/// assert_eq!(Some(1), iter.next()); +/// assert_eq!(Some(2), iter.next()); +/// assert_eq!(Some(3), iter.next()); +/// assert_eq!(None, iter.next()); +/// ``` +/// Implementing `IntoIterator` for your type: +/// +/// ``` +/// // A sample collection, that's just a wrapper over Vec +/// #[derive(Debug)] +/// struct MyCollection(Vec); +/// +/// // Let's give it some methods so we can create one and add things +/// // to it. +/// impl MyCollection { +/// fn new() -> MyCollection { +/// MyCollection(Vec::new()) +/// } +/// +/// fn add(&mut self, elem: i32) { +/// self.0.push(elem); +/// } +/// } +/// +/// // and we'll implement IntoIterator +/// impl IntoIterator for MyCollection { +/// type Item = i32; +/// type IntoIter = ::std::vec::IntoIter; +/// +/// fn into_iter(self) -> Self::IntoIter { +/// self.0.into_iter() +/// } +/// } +/// +/// // Now we can make a new collection... +/// let mut c = MyCollection::new(); +/// +/// // ... add some stuff to it ... +/// c.add(0); +/// c.add(1); +/// c.add(2); +/// +/// // ... and then turn it into an Iterator: +/// for (i, n) in c.into_iter().enumerate() { +/// assert_eq!(i as i32, n); +/// } +/// ``` +/// +/// It is common to use `IntoIterator` as a trait bound. This allows +/// the input collection type to change, so long as it is still an +/// iterator. Additional bounds can be specified by restricting on +/// `Item`: +/// +/// ```rust +/// fn collect_as_strings(collection: T) -> Vec +/// where T: IntoIterator, +/// T::Item : std::fmt::Debug, +/// { +/// collection +/// .into_iter() +/// .map(|item| format!("{:?}", item)) +/// .collect() +/// } +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +pub trait IntoIterator { + /// The type of the elements being iterated over. + #[stable(feature = "rust1", since = "1.0.0")] + type Item; + + /// Which kind of iterator are we turning this into? + #[stable(feature = "rust1", since = "1.0.0")] + type IntoIter: Iterator; + + /// Creates an iterator from a value. + /// + /// See the [module-level documentation] for more. + /// + /// [module-level documentation]: index.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let v = vec![1, 2, 3]; + /// let mut iter = v.into_iter(); + /// + /// assert_eq!(Some(1), iter.next()); + /// assert_eq!(Some(2), iter.next()); + /// assert_eq!(Some(3), iter.next()); + /// assert_eq!(None, iter.next()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + fn into_iter(self) -> Self::IntoIter; +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl IntoIterator for I { + type Item = I::Item; + type IntoIter = I; + + fn into_iter(self) -> I { + self + } +} + +/// Extend a collection with the contents of an iterator. +/// +/// Iterators produce a series of values, and collections can also be thought +/// of as a series of values. The `Extend` trait bridges this gap, allowing you +/// to extend a collection by including the contents of that iterator. When +/// extending a collection with an already existing key, that entry is updated +/// or, in the case of collections that permit multiple entries with equal +/// keys, that entry is inserted. +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// // You can extend a String with some chars: +/// let mut message = String::from("The first three letters are: "); +/// +/// message.extend(&['a', 'b', 'c']); +/// +/// assert_eq!("abc", &message[29..32]); +/// ``` +/// +/// Implementing `Extend`: +/// +/// ``` +/// // A sample collection, that's just a wrapper over Vec +/// #[derive(Debug)] +/// struct MyCollection(Vec); +/// +/// // Let's give it some methods so we can create one and add things +/// // to it. +/// impl MyCollection { +/// fn new() -> MyCollection { +/// MyCollection(Vec::new()) +/// } +/// +/// fn add(&mut self, elem: i32) { +/// self.0.push(elem); +/// } +/// } +/// +/// // since MyCollection has a list of i32s, we implement Extend for i32 +/// impl Extend for MyCollection { +/// +/// // This is a bit simpler with the concrete type signature: we can call +/// // extend on anything which can be turned into an Iterator which gives +/// // us i32s. Because we need i32s to put into MyCollection. +/// fn extend>(&mut self, iter: T) { +/// +/// // The implementation is very straightforward: loop through the +/// // iterator, and add() each element to ourselves. +/// for elem in iter { +/// self.add(elem); +/// } +/// } +/// } +/// +/// let mut c = MyCollection::new(); +/// +/// c.add(5); +/// c.add(6); +/// c.add(7); +/// +/// // let's extend our collection with three more numbers +/// c.extend(vec![1, 2, 3]); +/// +/// // we've added these elements onto the end +/// assert_eq!("MyCollection([5, 6, 7, 1, 2, 3])", format!("{:?}", c)); +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +pub trait Extend { + /// Extends a collection with the contents of an iterator. + /// + /// As this is the only method for this trait, the [trait-level] docs + /// contain more details. + /// + /// [trait-level]: trait.Extend.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// // You can extend a String with some chars: + /// let mut message = String::from("abc"); + /// + /// message.extend(['d', 'e', 'f'].iter()); + /// + /// assert_eq!("abcdef", &message); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + fn extend>(&mut self, iter: T); +} + +#[stable(feature = "extend_for_unit", since = "1.28.0")] +impl Extend<()> for () { + fn extend>(&mut self, iter: T) { + iter.into_iter().for_each(drop) + } +} diff --git a/src/libcore/iter/traits/double_ended.rs b/src/libcore/iter/traits/double_ended.rs new file mode 100644 index 0000000000000..2976afc0b4f81 --- /dev/null +++ b/src/libcore/iter/traits/double_ended.rs @@ -0,0 +1,297 @@ +use ops::Try; +use iter::LoopState; + +/// An iterator able to yield elements from both ends. +/// +/// Something that implements `DoubleEndedIterator` has one extra capability +/// over something that implements [`Iterator`]: the ability to also take +/// `Item`s from the back, as well as the front. +/// +/// It is important to note that both back and forth work on the same range, +/// and do not cross: iteration is over when they meet in the middle. +/// +/// In a similar fashion to the [`Iterator`] protocol, once a +/// `DoubleEndedIterator` returns `None` from a `next_back()`, calling it again +/// may or may not ever return `Some` again. `next()` and `next_back()` are +/// interchangeable for this purpose. +/// +/// [`Iterator`]: trait.Iterator.html +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// let numbers = vec![1, 2, 3, 4, 5, 6]; +/// +/// let mut iter = numbers.iter(); +/// +/// assert_eq!(Some(&1), iter.next()); +/// assert_eq!(Some(&6), iter.next_back()); +/// assert_eq!(Some(&5), iter.next_back()); +/// assert_eq!(Some(&2), iter.next()); +/// assert_eq!(Some(&3), iter.next()); +/// assert_eq!(Some(&4), iter.next()); +/// assert_eq!(None, iter.next()); +/// assert_eq!(None, iter.next_back()); +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +pub trait DoubleEndedIterator: Iterator { + /// Removes and returns an element from the end of the iterator. + /// + /// Returns `None` when there are no more elements. + /// + /// The [trait-level] docs contain more details. + /// + /// [trait-level]: trait.DoubleEndedIterator.html + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let numbers = vec![1, 2, 3, 4, 5, 6]; + /// + /// let mut iter = numbers.iter(); + /// + /// assert_eq!(Some(&1), iter.next()); + /// assert_eq!(Some(&6), iter.next_back()); + /// assert_eq!(Some(&5), iter.next_back()); + /// assert_eq!(Some(&2), iter.next()); + /// assert_eq!(Some(&3), iter.next()); + /// assert_eq!(Some(&4), iter.next()); + /// assert_eq!(None, iter.next()); + /// assert_eq!(None, iter.next_back()); + /// ``` + #[stable(feature = "rust1", since = "1.0.0")] + fn next_back(&mut self) -> Option; + + /// Returns the `n`th element from the end of the iterator. + /// + /// This is essentially the reversed version of [`nth`]. Although like most indexing + /// operations, the count starts from zero, so `nth_back(0)` returns the first value fro + /// the end, `nth_back(1)` the second, and so on. + /// + /// Note that all elements between the end and the returned element will be + /// consumed, including the returned element. This also means that calling + /// `nth_back(0)` multiple times on the same iterator will return different + /// elements. + /// + /// `nth_back()` will return [`None`] if `n` is greater than or equal to the length of the + /// iterator. + /// + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// [`nth`]: ../../std/iter/trait.Iterator.html#method.nth + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(iter_nth_back)] + /// let a = [1, 2, 3]; + /// assert_eq!(a.iter().nth_back(2), Some(&1)); + /// ``` + /// + /// Calling `nth_back()` multiple times doesn't rewind the iterator: + /// + /// ``` + /// #![feature(iter_nth_back)] + /// let a = [1, 2, 3]; + /// + /// let mut iter = a.iter(); + /// + /// assert_eq!(iter.nth_back(1), Some(&2)); + /// assert_eq!(iter.nth_back(1), None); + /// ``` + /// + /// Returning `None` if there are less than `n + 1` elements: + /// + /// ``` + /// #![feature(iter_nth_back)] + /// let a = [1, 2, 3]; + /// assert_eq!(a.iter().nth_back(10), None); + /// ``` + #[inline] + #[unstable(feature = "iter_nth_back", issue = "56995")] + fn nth_back(&mut self, mut n: usize) -> Option { + for x in self.rev() { + if n == 0 { return Some(x) } + n -= 1; + } + None + } + + /// This is the reverse version of [`try_fold()`]: it takes elements + /// starting from the back of the iterator. + /// + /// [`try_fold()`]: trait.Iterator.html#method.try_fold + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = ["1", "2", "3"]; + /// let sum = a.iter() + /// .map(|&s| s.parse::()) + /// .try_rfold(0, |acc, x| x.and_then(|y| Ok(acc + y))); + /// assert_eq!(sum, Ok(6)); + /// ``` + /// + /// Short-circuiting: + /// + /// ``` + /// let a = ["1", "rust", "3"]; + /// let mut it = a.iter(); + /// let sum = it + /// .by_ref() + /// .map(|&s| s.parse::()) + /// .try_rfold(0, |acc, x| x.and_then(|y| Ok(acc + y))); + /// assert!(sum.is_err()); + /// + /// // Because it short-circuited, the remaining elements are still + /// // available through the iterator. + /// assert_eq!(it.next_back(), Some(&"1")); + /// ``` + #[inline] + #[stable(feature = "iterator_try_fold", since = "1.27.0")] + fn try_rfold(&mut self, init: B, mut f: F) -> R + where + Self: Sized, + F: FnMut(B, Self::Item) -> R, + R: Try + { + let mut accum = init; + while let Some(x) = self.next_back() { + accum = f(accum, x)?; + } + Try::from_ok(accum) + } + + /// An iterator method that reduces the iterator's elements to a single, + /// final value, starting from the back. + /// + /// This is the reverse version of [`fold()`]: it takes elements starting from + /// the back of the iterator. + /// + /// `rfold()` takes two arguments: an initial value, and a closure with two + /// arguments: an 'accumulator', and an element. The closure returns the value that + /// the accumulator should have for the next iteration. + /// + /// The initial value is the value the accumulator will have on the first + /// call. + /// + /// After applying this closure to every element of the iterator, `rfold()` + /// returns the accumulator. + /// + /// This operation is sometimes called 'reduce' or 'inject'. + /// + /// Folding is useful whenever you have a collection of something, and want + /// to produce a single value from it. + /// + /// [`fold()`]: trait.Iterator.html#method.fold + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// // the sum of all of the elements of a + /// let sum = a.iter() + /// .rfold(0, |acc, &x| acc + x); + /// + /// assert_eq!(sum, 6); + /// ``` + /// + /// This example builds a string, starting with an initial value + /// and continuing with each element from the back until the front: + /// + /// ``` + /// let numbers = [1, 2, 3, 4, 5]; + /// + /// let zero = "0".to_string(); + /// + /// let result = numbers.iter().rfold(zero, |acc, &x| { + /// format!("({} + {})", x, acc) + /// }); + /// + /// assert_eq!(result, "(1 + (2 + (3 + (4 + (5 + 0)))))"); + /// ``` + #[inline] + #[stable(feature = "iter_rfold", since = "1.27.0")] + fn rfold(mut self, accum: B, mut f: F) -> B + where + Self: Sized, + F: FnMut(B, Self::Item) -> B, + { + self.try_rfold(accum, move |acc, x| Ok::(f(acc, x))).unwrap() + } + + /// Searches for an element of an iterator from the back that satisfies a predicate. + /// + /// `rfind()` takes a closure that returns `true` or `false`. It applies + /// this closure to each element of the iterator, starting at the end, and if any + /// of them return `true`, then `rfind()` returns [`Some(element)`]. If they all return + /// `false`, it returns [`None`]. + /// + /// `rfind()` is short-circuiting; in other words, it will stop processing + /// as soon as the closure returns `true`. + /// + /// Because `rfind()` takes a reference, and many iterators iterate over + /// references, this leads to a possibly confusing situation where the + /// argument is a double reference. You can see this effect in the + /// examples below, with `&&x`. + /// + /// [`Some(element)`]: ../../std/option/enum.Option.html#variant.Some + /// [`None`]: ../../std/option/enum.Option.html#variant.None + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// assert_eq!(a.iter().rfind(|&&x| x == 2), Some(&2)); + /// + /// assert_eq!(a.iter().rfind(|&&x| x == 5), None); + /// ``` + /// + /// Stopping at the first `true`: + /// + /// ``` + /// let a = [1, 2, 3]; + /// + /// let mut iter = a.iter(); + /// + /// assert_eq!(iter.rfind(|&&x| x == 2), Some(&2)); + /// + /// // we can still use `iter`, as there are more elements. + /// assert_eq!(iter.next_back(), Some(&1)); + /// ``` + #[inline] + #[stable(feature = "iter_rfind", since = "1.27.0")] + fn rfind

(&mut self, mut predicate: P) -> Option + where + Self: Sized, + P: FnMut(&Self::Item) -> bool + { + self.try_rfold((), move |(), x| { + if predicate(&x) { LoopState::Break(x) } + else { LoopState::Continue(()) } + }).break_value() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<'a, I: DoubleEndedIterator + ?Sized> DoubleEndedIterator for &'a mut I { + fn next_back(&mut self) -> Option { + (**self).next_back() + } + fn nth_back(&mut self, n: usize) -> Option { + (**self).nth_back(n) + } +} diff --git a/src/libcore/iter/traits/exact_size.rs b/src/libcore/iter/traits/exact_size.rs new file mode 100644 index 0000000000000..8fc4ac93daa68 --- /dev/null +++ b/src/libcore/iter/traits/exact_size.rs @@ -0,0 +1,143 @@ +/// An iterator that knows its exact length. +/// +/// Many [`Iterator`]s don't know how many times they will iterate, but some do. +/// If an iterator knows how many times it can iterate, providing access to +/// that information can be useful. For example, if you want to iterate +/// backwards, a good start is to know where the end is. +/// +/// When implementing an `ExactSizeIterator`, you must also implement +/// [`Iterator`]. When doing so, the implementation of [`size_hint`] *must* +/// return the exact size of the iterator. +/// +/// [`Iterator`]: trait.Iterator.html +/// [`size_hint`]: trait.Iterator.html#method.size_hint +/// +/// The [`len`] method has a default implementation, so you usually shouldn't +/// implement it. However, you may be able to provide a more performant +/// implementation than the default, so overriding it in this case makes sense. +/// +/// [`len`]: #method.len +/// +/// # Examples +/// +/// Basic usage: +/// +/// ``` +/// // a finite range knows exactly how many times it will iterate +/// let five = 0..5; +/// +/// assert_eq!(5, five.len()); +/// ``` +/// +/// In the [module level docs][moddocs], we implemented an [`Iterator`], +/// `Counter`. Let's implement `ExactSizeIterator` for it as well: +/// +/// [moddocs]: index.html +/// +/// ``` +/// # struct Counter { +/// # count: usize, +/// # } +/// # impl Counter { +/// # fn new() -> Counter { +/// # Counter { count: 0 } +/// # } +/// # } +/// # impl Iterator for Counter { +/// # type Item = usize; +/// # fn next(&mut self) -> Option { +/// # self.count += 1; +/// # if self.count < 6 { +/// # Some(self.count) +/// # } else { +/// # None +/// # } +/// # } +/// # } +/// impl ExactSizeIterator for Counter { +/// // We can easily calculate the remaining number of iterations. +/// fn len(&self) -> usize { +/// 5 - self.count +/// } +/// } +/// +/// // And now we can use it! +/// +/// let counter = Counter::new(); +/// +/// assert_eq!(5, counter.len()); +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +pub trait ExactSizeIterator: Iterator { + /// Returns the exact number of times the iterator will iterate. + /// + /// This method has a default implementation, so you usually should not + /// implement it directly. However, if you can provide a more efficient + /// implementation, you can do so. See the [trait-level] docs for an + /// example. + /// + /// This function has the same safety guarantees as the [`size_hint`] + /// function. + /// + /// [trait-level]: trait.ExactSizeIterator.html + /// [`size_hint`]: trait.Iterator.html#method.size_hint + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// // a finite range knows exactly how many times it will iterate + /// let five = 0..5; + /// + /// assert_eq!(5, five.len()); + /// ``` + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + fn len(&self) -> usize { + let (lower, upper) = self.size_hint(); + // Note: This assertion is overly defensive, but it checks the invariant + // guaranteed by the trait. If this trait were rust-internal, + // we could use debug_assert!; assert_eq! will check all Rust user + // implementations too. + assert_eq!(upper, Some(lower)); + lower + } + + /// Returns `true` if the iterator is empty. + /// + /// This method has a default implementation using `self.len()`, so you + /// don't need to implement it yourself. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(exact_size_is_empty)] + /// + /// let mut one_element = std::iter::once(0); + /// assert!(!one_element.is_empty()); + /// + /// assert_eq!(one_element.next(), Some(0)); + /// assert!(one_element.is_empty()); + /// + /// assert_eq!(one_element.next(), None); + /// ``` + #[inline] + #[unstable(feature = "exact_size_is_empty", issue = "35428")] + fn is_empty(&self) -> bool { + self.len() == 0 + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl ExactSizeIterator for &mut I { + fn len(&self) -> usize { + (**self).len() + } + fn is_empty(&self) -> bool { + (**self).is_empty() + } +} + diff --git a/src/libcore/iter/iterator.rs b/src/libcore/iter/traits/iterator.rs similarity index 91% rename from src/libcore/iter/iterator.rs rename to src/libcore/iter/traits/iterator.rs index c0b83a6868b38..6df4a457655c5 100644 --- a/src/libcore/iter/iterator.rs +++ b/src/libcore/iter/traits/iterator.rs @@ -1,22 +1,11 @@ -// Copyright 2013-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use cmp::Ordering; use ops::Try; -use super::LoopState; -use super::{Chain, Cycle, Cloned, Enumerate, Filter, FilterMap, Fuse}; -use super::{Flatten, FlatMap, flatten_compat}; -use super::{Inspect, Map, Peekable, Scan, Skip, SkipWhile, StepBy, Take, TakeWhile, Rev}; -use super::{Zip, Sum, Product}; -use super::{ChainState, FromIterator, ZipImpl}; +use super::super::LoopState; +use super::super::{Chain, Cycle, Copied, Cloned, Enumerate, Filter, FilterMap, Fuse}; +use super::super::{Flatten, FlatMap}; +use super::super::{Inspect, Map, Peekable, Scan, Skip, SkipWhile, StepBy, Take, TakeWhile, Rev}; +use super::super::{Zip, Sum, Product, FromIterator}; fn _assert_is_object_safe(_: &dyn Iterator) {} @@ -87,7 +76,7 @@ fn _assert_is_object_safe(_: &dyn Iterator) {} on( _Self="[]", label="borrow the array with `&` or call `.iter()` on it to iterate over it", - note="arrays are not an iterators, but slices like the following are: `&[1, 2, 3]`" + note="arrays are not iterators, but slices like the following are: `&[1, 2, 3]`" ), on( _Self="{integral}", @@ -98,7 +87,7 @@ fn _assert_is_object_safe(_: &dyn Iterator) {} message="`{Self}` is not an iterator" )] #[doc(spotlight)] -#[must_use] +#[must_use = "iterators are lazy and do nothing unless consumed"] pub trait Iterator { /// The type of the elements being iterated over. #[stable(feature = "rust1", since = "1.0.0")] @@ -131,7 +120,7 @@ pub trait Iterator { /// // ... and then None once it's over. /// assert_eq!(None, iter.next()); /// - /// // More calls may or may not return None. Here, they always will. + /// // More calls may or may not return `None`. Here, they always will. /// assert_eq!(None, iter.next()); /// assert_eq!(None, iter.next()); /// ``` @@ -162,7 +151,7 @@ pub trait Iterator { /// That said, the implementation should provide a correct estimation, /// because otherwise it would be a violation of the trait's protocol. /// - /// The default implementation returns `(0, None)` which is correct for any + /// The default implementation returns `(0, `[`None`]`)` which is correct for any /// iterator. /// /// [`usize`]: ../../std/primitive.usize.html @@ -377,8 +366,7 @@ pub trait Iterator { #[inline] #[stable(feature = "iterator_step_by", since = "1.28.0")] fn step_by(self, step: usize) -> StepBy where Self: Sized { - assert!(step != 0); - StepBy{iter: self, step: step - 1, first_take: true} + StepBy::new(self, step) } /// Takes two iterators and creates a new iterator over both in sequence. @@ -435,7 +423,7 @@ pub trait Iterator { fn chain(self, other: U) -> Chain where Self: Sized, U: IntoIterator, { - Chain{a: self, b: other.into_iter(), state: ChainState::Both} + Chain::new(self, other.into_iter()) } /// 'Zips up' two iterators into a single iterator of pairs. @@ -570,15 +558,15 @@ pub trait Iterator { fn map(self, f: F) -> Map where Self: Sized, F: FnMut(Self::Item) -> B, { - Map { iter: self, f } + Map::new(self, f) } /// Calls a closure on each element of an iterator. /// /// This is equivalent to using a [`for`] loop on the iterator, although - /// `break` and `continue` are not possible from a closure. It's generally + /// `break` and `continue` are not possible from a closure. It's generally /// more idiomatic to use a `for` loop, but `for_each` may be more legible - /// when processing items at the end of longer iterator chains. In some + /// when processing items at the end of longer iterator chains. In some /// cases `for_each` may also be faster than a loop, because it will use /// internal iteration on adaptors like `Chain`. /// @@ -681,7 +669,7 @@ pub trait Iterator { fn filter

(self, predicate: P) -> Filter where Self: Sized, P: FnMut(&Self::Item) -> bool, { - Filter {iter: self, predicate } + Filter::new(self, predicate) } /// Creates an iterator that both filters and maps. @@ -738,7 +726,7 @@ pub trait Iterator { fn filter_map(self, f: F) -> FilterMap where Self: Sized, F: FnMut(Self::Item) -> Option, { - FilterMap { iter: self, f } + FilterMap::new(self, f) } /// Creates an iterator which gives the current iteration count as well as @@ -782,7 +770,7 @@ pub trait Iterator { #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn enumerate(self) -> Enumerate where Self: Sized { - Enumerate { iter: self, count: 0 } + Enumerate::new(self) } /// Creates an iterator which can use `peek` to look at the next element of @@ -828,7 +816,7 @@ pub trait Iterator { #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn peekable(self) -> Peekable where Self: Sized { - Peekable{iter: self, peeked: None} + Peekable::new(self) } /// Creates an iterator that [`skip`]s elements based on a predicate. @@ -891,7 +879,7 @@ pub trait Iterator { fn skip_while

(self, predicate: P) -> SkipWhile where Self: Sized, P: FnMut(&Self::Item) -> bool, { - SkipWhile { iter: self, flag: false, predicate } + SkipWhile::new(self, predicate) } /// Creates an iterator that yields elements based on a predicate. @@ -964,14 +952,13 @@ pub trait Iterator { /// ``` /// /// The `3` is no longer there, because it was consumed in order to see if - /// the iteration should stop, but wasn't placed back into the iterator or - /// some similar thing. + /// the iteration should stop, but wasn't placed back into the iterator. #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn take_while

(self, predicate: P) -> TakeWhile where Self: Sized, P: FnMut(&Self::Item) -> bool, { - TakeWhile { iter: self, flag: false, predicate } + TakeWhile::new(self, predicate) } /// Creates an iterator that skips the first `n` elements. @@ -993,7 +980,7 @@ pub trait Iterator { #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn skip(self, n: usize) -> Skip where Self: Sized { - Skip { iter: self, n } + Skip::new(self, n) } /// Creates an iterator that yields its first `n` elements. @@ -1025,7 +1012,7 @@ pub trait Iterator { #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn take(self, n: usize) -> Take where Self: Sized, { - Take { iter: self, n } + Take::new(self, n) } /// An iterator adaptor similar to [`fold`] that holds internal state and @@ -1070,7 +1057,7 @@ pub trait Iterator { fn scan(self, initial_state: St, f: F) -> Scan where Self: Sized, F: FnMut(&mut St, Self::Item) -> Option, { - Scan { iter: self, f, state: initial_state } + Scan::new(self, initial_state, f) } /// Creates an iterator that works like map, but flattens nested structure. @@ -1108,7 +1095,7 @@ pub trait Iterator { fn flat_map(self, f: F) -> FlatMap where Self: Sized, U: IntoIterator, F: FnMut(Self::Item) -> U, { - FlatMap { inner: flatten_compat(self.map(f)) } + FlatMap::new(self, f) } /// Creates an iterator that flattens nested structure. @@ -1176,7 +1163,7 @@ pub trait Iterator { #[stable(feature = "iterator_flatten", since = "1.29.0")] fn flatten(self) -> Flatten where Self: Sized, Self::Item: IntoIterator { - Flatten { inner: flatten_compat(self) } + Flatten::new(self) } /// Creates an iterator which ends after the first [`None`]. @@ -1228,7 +1215,7 @@ pub trait Iterator { /// assert_eq!(iter.next(), Some(4)); /// assert_eq!(iter.next(), None); /// - /// // it will always return None after the first time. + /// // it will always return `None` after the first time. /// assert_eq!(iter.next(), None); /// assert_eq!(iter.next(), None); /// assert_eq!(iter.next(), None); @@ -1236,7 +1223,7 @@ pub trait Iterator { #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn fuse(self) -> Fuse where Self: Sized { - Fuse{iter: self, done: false} + Fuse::new(self) } /// Do something with each element of an iterator, passing the value on. @@ -1319,7 +1306,7 @@ pub trait Iterator { fn inspect(self, f: F) -> Inspect where Self: Sized, F: FnMut(&Self::Item), { - Inspect { iter: self, f } + Inspect::new(self, f) } /// Borrows an iterator, rather than consuming it. @@ -1528,7 +1515,7 @@ pub trait Iterator { /// is propagated back to the caller immediately (short-circuiting). /// /// The initial value is the value the accumulator will have on the first - /// call. If applying the closure succeeded against every element of the + /// call. If applying the closure succeeded against every element of the /// iterator, `try_fold()` returns the final accumulator as success. /// /// Folding is useful whenever you have a collection of something, and want @@ -1541,10 +1528,10 @@ pub trait Iterator { /// do something better than the default `for` loop implementation. /// /// In particular, try to have this call `try_fold()` on the internal parts - /// from which this iterator is composed. If multiple calls are needed, + /// from which this iterator is composed. If multiple calls are needed, /// the `?` operator may be convenient for chaining the accumulator value /// along, but beware any invariants that need to be upheld before those - /// early returns. This is a `&mut self` method, so iteration needs to be + /// early returns. This is a `&mut self` method, so iteration needs to be /// resumable after hitting an error here. /// /// # Examples @@ -2021,12 +2008,7 @@ pub trait Iterator { #[stable(feature = "rust1", since = "1.0.0")] fn max(self) -> Option where Self: Sized, Self::Item: Ord { - select_fold1(self, - |_| (), - // switch to y even if it is only equal, to preserve - // stability. - |_, x, _, y| *x <= *y) - .map(|(_, x)| x) + self.max_by(Ord::cmp) } /// Returns the minimum element of an iterator. @@ -2051,12 +2033,7 @@ pub trait Iterator { #[stable(feature = "rust1", since = "1.0.0")] fn min(self) -> Option where Self: Sized, Self::Item: Ord { - select_fold1(self, - |_| (), - // only switch to y if it is strictly smaller, to - // preserve stability. - |_, x, _, y| *x > *y) - .map(|(_, x)| x) + self.min_by(Ord::cmp) } /// Returns the element that gives the maximum value from the @@ -2075,15 +2052,11 @@ pub trait Iterator { /// ``` #[inline] #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] - fn max_by_key(self, f: F) -> Option + fn max_by_key(self, mut f: F) -> Option where Self: Sized, F: FnMut(&Self::Item) -> B, { - select_fold1(self, - f, - // switch to y even if it is only equal, to preserve - // stability. - |x_p, _, y_p, _| x_p <= y_p) - .map(|(_, x)| x) + // switch to y even if it is only equal, to preserve stability. + select_fold1(self.map(|x| (f(&x), x)), |(x_p, _), (y_p, _)| x_p <= y_p).map(|(_, x)| x) } /// Returns the element that gives the maximum value with respect to the @@ -2105,12 +2078,8 @@ pub trait Iterator { fn max_by(self, mut compare: F) -> Option where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering, { - select_fold1(self, - |_| (), - // switch to y even if it is only equal, to preserve - // stability. - |_, x, _, y| Ordering::Greater != compare(x, y)) - .map(|(_, x)| x) + // switch to y even if it is only equal, to preserve stability. + select_fold1(self, |x, y| compare(x, y) != Ordering::Greater) } /// Returns the element that gives the minimum value from the @@ -2128,15 +2097,11 @@ pub trait Iterator { /// assert_eq!(*a.iter().min_by_key(|x| x.abs()).unwrap(), 0); /// ``` #[stable(feature = "iter_cmp_by_key", since = "1.6.0")] - fn min_by_key(self, f: F) -> Option + fn min_by_key(self, mut f: F) -> Option where Self: Sized, F: FnMut(&Self::Item) -> B, { - select_fold1(self, - f, - // only switch to y if it is strictly smaller, to - // preserve stability. - |x_p, _, y_p, _| x_p > y_p) - .map(|(_, x)| x) + // only switch to y if it is strictly smaller, to preserve stability. + select_fold1(self.map(|x| (f(&x), x)), |(x_p, _), (y_p, _)| x_p > y_p).map(|(_, x)| x) } /// Returns the element that gives the minimum value with respect to the @@ -2158,12 +2123,8 @@ pub trait Iterator { fn min_by(self, mut compare: F) -> Option where Self: Sized, F: FnMut(&Self::Item, &Self::Item) -> Ordering, { - select_fold1(self, - |_| (), - // switch to y even if it is strictly smaller, to - // preserve stability. - |_, x, _, y| Ordering::Greater == compare(x, y)) - .map(|(_, x)| x) + // only switch to y if it is strictly smaller, to preserve stability. + select_fold1(self, |x, y| compare(x, y) == Ordering::Greater) } @@ -2193,7 +2154,7 @@ pub trait Iterator { #[inline] #[stable(feature = "rust1", since = "1.0.0")] fn rev(self) -> Rev where Self: Sized + DoubleEndedIterator { - Rev{iter: self} + Rev::new(self) } /// Converts an iterator of pairs into a pair of containers. @@ -2235,6 +2196,35 @@ pub trait Iterator { (ts, us) } + /// Creates an iterator which copies all of its elements. + /// + /// This is useful when you have an iterator over `&T`, but you need an + /// iterator over `T`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(iter_copied)] + /// + /// let a = [1, 2, 3]; + /// + /// let v_cloned: Vec<_> = a.iter().copied().collect(); + /// + /// // copied is the same as .map(|&x| x) + /// let v_map: Vec<_> = a.iter().map(|&x| x).collect(); + /// + /// assert_eq!(v_cloned, vec![1, 2, 3]); + /// assert_eq!(v_map, vec![1, 2, 3]); + /// ``` + #[unstable(feature = "iter_copied", issue = "57127")] + fn copied<'a, T: 'a>(self) -> Copied + where Self: Sized + Iterator, T: Copy + { + Copied::new(self) + } + /// Creates an iterator which [`clone`]s all of its elements. /// /// This is useful when you have an iterator over `&T`, but you need an @@ -2261,7 +2251,7 @@ pub trait Iterator { fn cloned<'a, T: 'a>(self) -> Cloned where Self: Sized + Iterator, T: Clone { - Cloned { it: self } + Cloned::new(self) } /// Repeats an iterator endlessly. @@ -2292,7 +2282,7 @@ pub trait Iterator { #[stable(feature = "rust1", since = "1.0.0")] #[inline] fn cycle(self) -> Cycle where Self: Sized + Clone { - Cycle{orig: self.clone(), iter: self} + Cycle::new(self) } /// Sums the elements of an iterator. @@ -2339,7 +2329,7 @@ pub trait Iterator { /// /// ``` /// fn factorial(n: u32) -> u32 { - /// (1..).take_while(|&i| i <= n).product() + /// (1..=n).product() /// } /// assert_eq!(factorial(0), 1); /// assert_eq!(factorial(1), 1); @@ -2445,177 +2435,171 @@ pub trait Iterator { /// Determines if the elements of this `Iterator` are unequal to those of /// another. #[stable(feature = "iter_order", since = "1.5.0")] - fn ne(mut self, other: I) -> bool where + fn ne(self, other: I) -> bool where I: IntoIterator, Self::Item: PartialEq, Self: Sized, { - let mut other = other.into_iter(); - - loop { - let x = match self.next() { - None => return other.next().is_some(), - Some(val) => val, - }; - - let y = match other.next() { - None => return true, - Some(val) => val, - }; - - if x != y { return true } - } + !self.eq(other) } /// Determines if the elements of this `Iterator` are lexicographically /// less than those of another. #[stable(feature = "iter_order", since = "1.5.0")] - fn lt(mut self, other: I) -> bool where + fn lt(self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd, Self: Sized, { - let mut other = other.into_iter(); - - loop { - let x = match self.next() { - None => return other.next().is_some(), - Some(val) => val, - }; - - let y = match other.next() { - None => return false, - Some(val) => val, - }; - - match x.partial_cmp(&y) { - Some(Ordering::Less) => return true, - Some(Ordering::Equal) => (), - Some(Ordering::Greater) => return false, - None => return false, - } - } + self.partial_cmp(other) == Some(Ordering::Less) } /// Determines if the elements of this `Iterator` are lexicographically /// less or equal to those of another. #[stable(feature = "iter_order", since = "1.5.0")] - fn le(mut self, other: I) -> bool where + fn le(self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd, Self: Sized, { - let mut other = other.into_iter(); - - loop { - let x = match self.next() { - None => { other.next(); return true; }, - Some(val) => val, - }; - - let y = match other.next() { - None => return false, - Some(val) => val, - }; - - match x.partial_cmp(&y) { - Some(Ordering::Less) => return true, - Some(Ordering::Equal) => (), - Some(Ordering::Greater) => return false, - None => return false, - } + match self.partial_cmp(other) { + Some(Ordering::Less) | Some(Ordering::Equal) => true, + _ => false, } } /// Determines if the elements of this `Iterator` are lexicographically /// greater than those of another. #[stable(feature = "iter_order", since = "1.5.0")] - fn gt(mut self, other: I) -> bool where + fn gt(self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd, Self: Sized, { - let mut other = other.into_iter(); - - loop { - let x = match self.next() { - None => { other.next(); return false; }, - Some(val) => val, - }; - - let y = match other.next() { - None => return true, - Some(val) => val, - }; - - match x.partial_cmp(&y) { - Some(Ordering::Less) => return false, - Some(Ordering::Equal) => (), - Some(Ordering::Greater) => return true, - None => return false, - } - } + self.partial_cmp(other) == Some(Ordering::Greater) } /// Determines if the elements of this `Iterator` are lexicographically /// greater than or equal to those of another. #[stable(feature = "iter_order", since = "1.5.0")] - fn ge(mut self, other: I) -> bool where + fn ge(self, other: I) -> bool where I: IntoIterator, Self::Item: PartialOrd, Self: Sized, { - let mut other = other.into_iter(); - - loop { - let x = match self.next() { - None => return other.next().is_none(), - Some(val) => val, - }; + match self.partial_cmp(other) { + Some(Ordering::Greater) | Some(Ordering::Equal) => true, + _ => false, + } + } - let y = match other.next() { - None => return true, - Some(val) => val, - }; + /// Checks if the elements of this iterator are sorted. + /// + /// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the + /// iterator yields exactly zero or one element, `true` is returned. + /// + /// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition + /// implies that this function returns `false` if any two consecutive items are not + /// comparable. + /// + /// # Examples + /// + /// ``` + /// #![feature(is_sorted)] + /// + /// assert!([1, 2, 2, 9].iter().is_sorted()); + /// assert!(![1, 3, 2, 4].iter().is_sorted()); + /// assert!([0].iter().is_sorted()); + /// assert!(std::iter::empty::().is_sorted()); + /// assert!(![0.0, 1.0, std::f32::NAN].iter().is_sorted()); + /// ``` + #[inline] + #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] + fn is_sorted(self) -> bool + where + Self: Sized, + Self::Item: PartialOrd, + { + self.is_sorted_by(|a, b| a.partial_cmp(b)) + } - match x.partial_cmp(&y) { - Some(Ordering::Less) => return false, - Some(Ordering::Equal) => (), - Some(Ordering::Greater) => return true, - None => return false, + /// Checks if the elements of this iterator are sorted using the given comparator function. + /// + /// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare` + /// function to determine the ordering of two elements. Apart from that, it's equivalent to + /// [`is_sorted`]; see its documentation for more information. + /// + /// [`is_sorted`]: trait.Iterator.html#method.is_sorted + #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] + fn is_sorted_by(mut self, mut compare: F) -> bool + where + Self: Sized, + F: FnMut(&Self::Item, &Self::Item) -> Option + { + let mut last = match self.next() { + Some(e) => e, + None => return true, + }; + + while let Some(curr) = self.next() { + if compare(&last, &curr) + .map(|o| o == Ordering::Greater) + .unwrap_or(true) + { + return false; } + last = curr; } + + true + } + + /// Checks if the elements of this iterator are sorted using the given key extraction + /// function. + /// + /// Instead of comparing the iterator's elements directly, this function compares the keys of + /// the elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see + /// its documentation for more information. + /// + /// [`is_sorted`]: trait.Iterator.html#method.is_sorted + /// + /// # Examples + /// + /// ``` + /// #![feature(is_sorted)] + /// + /// assert!(["c", "bb", "aaa"].iter().is_sorted_by_key(|s| s.len())); + /// assert!(![-2i32, -1, 0, 3].iter().is_sorted_by_key(|n| n.abs())); + /// ``` + #[inline] + #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] + fn is_sorted_by_key(self, mut f: F) -> bool + where + Self: Sized, + F: FnMut(&Self::Item) -> K, + K: PartialOrd + { + self.is_sorted_by(|a, b| f(a).partial_cmp(&f(b))) } } -/// Select an element from an iterator based on the given "projection" -/// and "comparison" function. +/// Select an element from an iterator based on the given "comparison" +/// function. /// /// This is an idiosyncratic helper to try to factor out the /// commonalities of {max,min}{,_by}. In particular, this avoids /// having to implement optimizations several times. #[inline] -fn select_fold1(mut it: I, - mut f_proj: FProj, - mut f_cmp: FCmp) -> Option<(B, I::Item)> - where I: Iterator, - FProj: FnMut(&I::Item) -> B, - FCmp: FnMut(&B, &I::Item, &B, &I::Item) -> bool +fn select_fold1(mut it: I, mut f: F) -> Option + where + I: Iterator, + F: FnMut(&I::Item, &I::Item) -> bool, { // start with the first element as our selection. This avoids // having to use `Option`s inside the loop, translating to a // sizeable performance gain (6x in one case). it.next().map(|first| { - let first_p = f_proj(&first); - - it.fold((first_p, first), |(sel_p, sel), x| { - let x_p = f_proj(&x); - if f_cmp(&sel_p, &sel, &x_p, &x) { - (x_p, x) - } else { - (sel_p, sel) - } - }) + it.fold(first, |sel, x| if f(&sel, &x) { x } else { sel }) }) } diff --git a/src/libcore/iter/traits/marker.rs b/src/libcore/iter/traits/marker.rs new file mode 100644 index 0000000000000..602619bce5a96 --- /dev/null +++ b/src/libcore/iter/traits/marker.rs @@ -0,0 +1,44 @@ +/// An iterator that always continues to yield `None` when exhausted. +/// +/// Calling next on a fused iterator that has returned `None` once is guaranteed +/// to return [`None`] again. This trait should be implemented by all iterators +/// that behave this way because it allows optimizing [`Iterator::fuse`]. +/// +/// Note: In general, you should not use `FusedIterator` in generic bounds if +/// you need a fused iterator. Instead, you should just call [`Iterator::fuse`] +/// on the iterator. If the iterator is already fused, the additional [`Fuse`] +/// wrapper will be a no-op with no performance penalty. +/// +/// [`None`]: ../../std/option/enum.Option.html#variant.None +/// [`Iterator::fuse`]: ../../std/iter/trait.Iterator.html#method.fuse +/// [`Fuse`]: ../../std/iter/struct.Fuse.html +#[stable(feature = "fused", since = "1.26.0")] +pub trait FusedIterator: Iterator {} + +#[stable(feature = "fused", since = "1.26.0")] +impl FusedIterator for &mut I {} + +/// An iterator that reports an accurate length using size_hint. +/// +/// The iterator reports a size hint where it is either exact +/// (lower bound is equal to upper bound), or the upper bound is [`None`]. +/// The upper bound must only be [`None`] if the actual iterator length is +/// larger than [`usize::MAX`]. In that case, the lower bound must be +/// [`usize::MAX`], resulting in a [`.size_hint`] of `(usize::MAX, None)`. +/// +/// The iterator must produce exactly the number of elements it reported +/// or diverge before reaching the end. +/// +/// # Safety +/// +/// This trait must only be implemented when the contract is upheld. +/// Consumers of this trait must inspect [`.size_hint`]’s upper bound. +/// +/// [`None`]: ../../std/option/enum.Option.html#variant.None +/// [`usize::MAX`]: ../../std/usize/constant.MAX.html +/// [`.size_hint`]: ../../std/iter/trait.Iterator.html#method.size_hint +#[unstable(feature = "trusted_len", issue = "37572")] +pub unsafe trait TrustedLen : Iterator {} + +#[unstable(feature = "trusted_len", issue = "37572")] +unsafe impl TrustedLen for &mut I {} diff --git a/src/libcore/iter/traits/mod.rs b/src/libcore/iter/traits/mod.rs new file mode 100644 index 0000000000000..cf3013f423c94 --- /dev/null +++ b/src/libcore/iter/traits/mod.rs @@ -0,0 +1,15 @@ +mod iterator; +mod double_ended; +mod exact_size; +mod collect; +mod accum; +mod marker; + +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::iterator::Iterator; +pub use self::double_ended::DoubleEndedIterator; +pub use self::exact_size::ExactSizeIterator; +pub use self::collect::{FromIterator, IntoIterator, Extend}; +pub use self::accum::{Sum, Product}; +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::marker::{FusedIterator, TrustedLen}; diff --git a/src/libcore/iter_private.rs b/src/libcore/iter_private.rs index c4d54d2c7b81d..890db47b19700 100644 --- a/src/libcore/iter_private.rs +++ b/src/libcore/iter_private.rs @@ -1,14 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - /// An iterator whose items are random accessible efficiently /// /// # Safety diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index a51674fbfc71a..63688e70c45cb 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! # The Rust Core Library //! //! The Rust Core Library is the dependency-free[^free] foundation of [The @@ -34,7 +24,7 @@ //! often generated by LLVM. Additionally, this library can make explicit //! calls to these functions. Their signatures are the same as found in C. //! These functions are often provided by the system libc, but can also be -//! provided by the [rlibc crate](https://crates.io/crates/rlibc). +//! provided by the [compiler-builtins crate](https://crates.io/crates/compiler_builtins). //! //! * `rust_begin_panic` - This function takes four arguments, a //! `fmt::Arguments`, a `&'static str`, and two `u32`'s. These four arguments @@ -61,17 +51,17 @@ #![cfg(not(test))] #![stable(feature = "core", since = "1.6.0")] -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/", +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/", html_playground_url = "https://play.rust-lang.org/", issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/", test(no_crate_inject, attr(deny(warnings))), test(attr(allow(dead_code, deprecated, unused_variables, unused_mut))))] - #![no_core] -#![deny(missing_docs)] -#![deny(missing_debug_implementations)] + +#![warn(deprecated_in_future)] +#![warn(missing_docs)] +#![warn(intra_doc_link_resolution_failure)] +#![warn(missing_debug_implementations)] #![feature(allow_internal_unstable)] #![feature(arbitrary_self_types)] @@ -80,7 +70,6 @@ #![feature(cfg_target_has_atomic)] #![feature(concat_idents)] #![feature(const_fn)] -#![feature(const_int_ops)] #![feature(const_fn_union)] #![feature(custom_attribute)] #![feature(doc_cfg)] @@ -88,6 +77,8 @@ #![feature(extern_types)] #![feature(fundamental)] #![feature(intrinsics)] +#![feature(is_sorted)] +#![feature(iter_once_with)] #![feature(lang_items)] #![feature(link_llvm_intrinsics)] #![feature(never_type)] @@ -104,6 +95,7 @@ #![feature(simd_ffi)] #![feature(specialization)] #![feature(staged_api)] +#![feature(std_internals)] #![feature(stmt_expr_attributes)] #![feature(unboxed_closures)] #![feature(unsized_locals)] @@ -118,18 +110,20 @@ #![feature(mips_target_feature)] #![feature(aarch64_target_feature)] #![feature(wasm_target_feature)] +#![feature(avx512_target_feature)] +#![feature(cmpxchg16b_target_feature)] #![feature(const_slice_len)] #![feature(const_str_as_bytes)] #![feature(const_str_len)] -#![feature(const_let)] -#![feature(const_int_rotate)] -#![feature(const_int_wrapping)] -#![feature(const_int_sign)] #![feature(const_int_conversion)] #![feature(const_transmute)] #![feature(reverse_bits)] #![feature(non_exhaustive)] #![feature(structural_match)] +#![feature(abi_unadjusted)] +#![feature(adx_target_feature)] +#![feature(maybe_uninit, maybe_uninit_slice, maybe_uninit_array)] +#![feature(external_doc)] #[prelude_import] #[allow(unused)] @@ -224,34 +218,19 @@ pub mod task; pub mod alloc; // note: does not need to be public -mod iter_private; -mod nonzero; mod tuple; mod unit; -// Pull in the `coresimd` crate directly into libcore. This is where all the -// architecture-specific (and vendor-specific) intrinsics are defined. AKA -// things like SIMD and such. Note that the actual source for all this lies in a -// different repository, rust-lang-nursery/stdsimd. That's why the setup here is -// a bit wonky. -#[allow(unused_macros)] -macro_rules! test_v16 { ($item:item) => {}; } -#[allow(unused_macros)] -macro_rules! test_v32 { ($item:item) => {}; } -#[allow(unused_macros)] -macro_rules! test_v64 { ($item:item) => {}; } -#[allow(unused_macros)] -macro_rules! test_v128 { ($item:item) => {}; } -#[allow(unused_macros)] -macro_rules! test_v256 { ($item:item) => {}; } -#[allow(unused_macros)] -macro_rules! test_v512 { ($item:item) => {}; } -#[allow(unused_macros)] -macro_rules! vector_impl { ($([$f:ident, $($args:tt)*]),*) => { $($f!($($args)*);)* } } -#[path = "../stdsimd/coresimd/mod.rs"] +// Pull in the `core_arch` crate directly into libcore. The contents of +// `core_arch` are in a different repository: rust-lang-nursery/stdsimd. +// +// `core_arch` depends on libcore, but the contents of this module are +// set up in such a way that directly pulling it here works such that the +// crate uses the this crate as its libcore. +#[path = "../stdsimd/crates/core_arch/src/mod.rs"] #[allow(missing_docs, missing_debug_implementations, dead_code, unused_imports)] #[unstable(feature = "stdsimd", issue = "48556")] -mod coresimd; +mod core_arch; #[stable(feature = "simd_arch", since = "1.27.0")] -pub use coresimd::arch; +pub use core_arch::arch; diff --git a/src/libcore/macros.rs b/src/libcore/macros.rs index 5ba0e949483ae..d2ee9b11b3640 100644 --- a/src/libcore/macros.rs +++ b/src/libcore/macros.rs @@ -1,16 +1,8 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/// Entry point of thread panic, for details, see std::macros +/// Panics the current thread. +/// +/// For details, see `std::macros`. #[macro_export] -#[allow_internal_unstable] +#[allow_internal_unstable(core_panic, __rust_unstable_column)] #[stable(feature = "core", since = "1.6.0")] macro_rules! panic { () => ( @@ -55,9 +47,12 @@ macro_rules! assert_eq { match (&$left, &$right) { (left_val, right_val) => { if !(*left_val == *right_val) { + // The reborrows below are intentional. Without them, the stack slot for the + // borrow is initialized even before the values are compared, leading to a + // noticeable slow down. panic!(r#"assertion failed: `(left == right)` left: `{:?}`, - right: `{:?}`"#, left_val, right_val) + right: `{:?}`"#, &*left_val, &*right_val) } } } @@ -69,9 +64,12 @@ macro_rules! assert_eq { match (&($left), &($right)) { (left_val, right_val) => { if !(*left_val == *right_val) { + // The reborrows below are intentional. Without them, the stack slot for the + // borrow is initialized even before the values are compared, leading to a + // noticeable slow down. panic!(r#"assertion failed: `(left == right)` left: `{:?}`, - right: `{:?}`: {}"#, left_val, right_val, + right: `{:?}`: {}"#, &*left_val, &*right_val, format_args!($($arg)+)) } } @@ -106,9 +104,12 @@ macro_rules! assert_ne { match (&$left, &$right) { (left_val, right_val) => { if *left_val == *right_val { + // The reborrows below are intentional. Without them, the stack slot for the + // borrow is initialized even before the values are compared, leading to a + // noticeable slow down. panic!(r#"assertion failed: `(left != right)` left: `{:?}`, - right: `{:?}`"#, left_val, right_val) + right: `{:?}`"#, &*left_val, &*right_val) } } } @@ -120,9 +121,12 @@ macro_rules! assert_ne { match (&($left), &($right)) { (left_val, right_val) => { if *left_val == *right_val { + // The reborrows below are intentional. Without them, the stack slot for the + // borrow is initialized even before the values are compared, leading to a + // noticeable slow down. panic!(r#"assertion failed: `(left != right)` left: `{:?}`, - right: `{:?}`: {}"#, left_val, right_val, + right: `{:?}`: {}"#, &*left_val, &*right_val, format_args!($($arg)+)) } } @@ -130,7 +134,7 @@ macro_rules! assert_ne { }); } -/// Ensure that a boolean expression is `true` at runtime. +/// Asserts that a boolean expression is `true` at runtime. /// /// This will invoke the [`panic!`] macro if the provided expression cannot be /// evaluated to `true` at runtime. @@ -234,8 +238,7 @@ macro_rules! debug_assert_ne { ($($arg:tt)*) => (if cfg!(debug_assertions) { assert_ne!($($arg)*); }) } -/// Helper macro for reducing boilerplate code for matching `Result` together -/// with converting downstream errors. +/// Unwraps a result or propagates its error. /// /// The `?` operator was added to replace `try!` and should be used instead. /// Furthermore, `try` is a reserved word in Rust 2018, so if you must use @@ -310,7 +313,7 @@ macro_rules! r#try { ($expr:expr,) => (r#try!($expr)); } -/// Write formatted data into a buffer. +/// Writes formatted data into a buffer. /// /// This macro accepts a format string, a list of arguments, and a 'writer'. Arguments will be /// formatted according to the specified format string and the result will be passed to the writer. @@ -419,7 +422,7 @@ macro_rules! write { /// ``` #[macro_export] #[stable(feature = "rust1", since = "1.0.0")] -#[allow_internal_unstable] +#[allow_internal_unstable(format_args_nl)] macro_rules! writeln { ($dst:expr) => ( write!($dst, "\n") @@ -432,7 +435,7 @@ macro_rules! writeln { ); } -/// A utility macro for indicating unreachable code. +/// Indicates unreachable code. /// /// This is useful any time that the compiler can't determine that some code is unreachable. For /// example: @@ -442,7 +445,7 @@ macro_rules! writeln { /// * Iterators that dynamically terminate. /// /// If the determination that the code is unreachable proves incorrect, the -/// program immediately terminates with a [`panic!`]. The function [`unreachable_unchecked`], +/// program immediately terminates with a [`panic!`]. The function [`unreachable_unchecked`], /// which belongs to the [`std::hint`] module, informs the compiler to /// optimize the code out of the release version entirely. /// @@ -500,10 +503,10 @@ macro_rules! unreachable { }); } -/// A standardized placeholder for marking unfinished code. +/// Indicates unfinished code. /// /// This can be useful if you are prototyping and are just looking to have your -/// code typecheck, or if you're implementing a trait that requires multiple +/// code type-check, or if you're implementing a trait that requires multiple /// methods, and you're only planning on using one of them. /// /// # Panics @@ -557,6 +560,82 @@ macro_rules! unimplemented { ($($arg:tt)+) => (panic!("not yet implemented: {}", format_args!($($arg)*))); } +/// Indicates unfinished code. +/// +/// This can be useful if you are prototyping and are just looking to have your +/// code typecheck. `todo!` works exactly like `unimplemented!`. The only +/// difference between the two macros is the name. +/// +/// # Panics +/// +/// This will always [panic!](macro.panic.html) +/// +/// # Examples +/// +/// Here's an example of some in-progress code. We have a trait `Foo`: +/// +/// ``` +/// trait Foo { +/// fn bar(&self); +/// fn baz(&self); +/// } +/// ``` +/// +/// We want to implement `Foo` on one of our types, but we also want to work on +/// just `bar()` first. In order for our code to compile, we need to implement +/// `baz()`, so we can use `todo!`: +/// +/// ``` +/// #![feature(todo_macro)] +/// +/// # trait Foo { +/// # fn bar(&self); +/// # fn baz(&self); +/// # } +/// struct MyStruct; +/// +/// impl Foo for MyStruct { +/// fn bar(&self) { +/// // implementation goes here +/// } +/// +/// fn baz(&self) { +/// // let's not worry about implementing baz() for now +/// todo!(); +/// } +/// } +/// +/// fn main() { +/// let s = MyStruct; +/// s.bar(); +/// +/// // we aren't even using baz() yet, so this is fine. +/// } +/// ``` +#[macro_export] +#[unstable(feature = "todo_macro", issue = "59277")] +macro_rules! todo { + () => (panic!("not yet implemented")); + ($($arg:tt)+) => (panic!("not yet implemented: {}", format_args!($($arg)*))); +} + +/// Creates an array of [`MaybeUninit`]. +/// +/// This macro constructs an uninitialized array of the type `[MaybeUninit; N]`. +/// +/// [`MaybeUninit`]: mem/union.MaybeUninit.html +#[macro_export] +#[unstable(feature = "maybe_uninit_array", issue = "53491")] +macro_rules! uninitialized_array { + // This `assume_init` is safe because an array of `MaybeUninit` does not + // require initialization. + // FIXME(#49147): Could be replaced by an array initializer, once those can + // be any const expression. + ($t:ty; $size:expr) => (unsafe { + MaybeUninit::<[MaybeUninit<$t>; $size]>::uninit().assume_init() + }); +} + /// Built-in macros to the compiler itself. /// /// These macros do not have any corresponding definition with a `macro_rules!` @@ -567,7 +646,7 @@ macro_rules! unimplemented { #[cfg(rustdoc)] mod builtin { - /// Unconditionally causes compilation to fail with the given error message when encountered. + /// Causes compilation to fail with the given error message when encountered. /// /// For more information, see the documentation for [`std::compile_error!`]. /// @@ -579,7 +658,7 @@ mod builtin { ($msg:expr,) => ({ /* compiler built-in */ }); } - /// The core macro for formatted string creation & output. + /// Constructs parameters for the other string-formatting macros. /// /// For more information, see the documentation for [`std::format_args!`]. /// @@ -591,7 +670,7 @@ mod builtin { ($fmt:expr, $($args:tt)*) => ({ /* compiler built-in */ }); } - /// Inspect an environment variable at compile time. + /// Inspects an environment variable at compile time. /// /// For more information, see the documentation for [`std::env!`]. /// @@ -603,7 +682,7 @@ mod builtin { ($name:expr,) => ({ /* compiler built-in */ }); } - /// Optionally inspect an environment variable at compile time. + /// Optionally inspects an environment variable at compile time. /// /// For more information, see the documentation for [`std::option_env!`]. /// @@ -615,7 +694,7 @@ mod builtin { ($name:expr,) => ({ /* compiler built-in */ }); } - /// Concatenate identifiers into one identifier. + /// Concatenates identifiers into one identifier. /// /// For more information, see the documentation for [`std::concat_idents!`]. /// @@ -639,7 +718,7 @@ mod builtin { ($($e:expr,)*) => ({ /* compiler built-in */ }); } - /// A macro which expands to the line number on which it was invoked. + /// Expands to the line number on which it was invoked. /// /// For more information, see the documentation for [`std::line!`]. /// @@ -648,7 +727,7 @@ mod builtin { #[rustc_doc_only_macro] macro_rules! line { () => ({ /* compiler built-in */ }) } - /// A macro which expands to the column number on which it was invoked. + /// Expands to the column number on which it was invoked. /// /// For more information, see the documentation for [`std::column!`]. /// @@ -657,7 +736,7 @@ mod builtin { #[rustc_doc_only_macro] macro_rules! column { () => ({ /* compiler built-in */ }) } - /// A macro which expands to the file name from which it was invoked. + /// Expands to the file name from which it was invoked. /// /// For more information, see the documentation for [`std::file!`]. /// @@ -666,7 +745,7 @@ mod builtin { #[rustc_doc_only_macro] macro_rules! file { () => ({ /* compiler built-in */ }) } - /// A macro which stringifies its arguments. + /// Stringifies its arguments. /// /// For more information, see the documentation for [`std::stringify!`]. /// @@ -708,7 +787,7 @@ mod builtin { #[rustc_doc_only_macro] macro_rules! module_path { () => ({ /* compiler built-in */ }) } - /// Boolean evaluation of configuration flags, at compile-time. + /// Evaluates boolean combinations of configuration flags, at compile-time. /// /// For more information, see the documentation for [`std::cfg!`]. /// @@ -717,7 +796,7 @@ mod builtin { #[rustc_doc_only_macro] macro_rules! cfg { ($($cfg:tt)*) => ({ /* compiler built-in */ }) } - /// Parse a file as an expression or an item according to the context. + /// Parses a file as an expression or an item according to the context. /// /// For more information, see the documentation for [`std::include!`]. /// @@ -729,7 +808,7 @@ mod builtin { ($file:expr,) => ({ /* compiler built-in */ }); } - /// Ensure that a boolean expression is `true` at runtime. + /// Asserts that a boolean expression is `true` at runtime. /// /// For more information, see the documentation for [`std::assert!`]. /// diff --git a/src/libcore/marker.rs b/src/libcore/marker.rs index d3d16127ed5fe..9b1ead7edd68b 100644 --- a/src/libcore/marker.rs +++ b/src/libcore/marker.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Primitive traits and types representing basic properties of types. //! //! Rust types can be classified in various useful ways according to @@ -88,14 +78,14 @@ impl !Send for *mut T { } /// // be made into an object /// ``` /// -/// [trait object]: ../../book/first-edition/trait-objects.html +/// [trait object]: ../../book/ch17-02-trait-objects.html #[stable(feature = "rust1", since = "1.0.0")] #[lang = "sized"] #[rustc_on_unimplemented( on(parent_trait="std::path::Path", label="borrow the `Path` instead"), message="the size for values of type `{Self}` cannot be known at compilation time", label="doesn't have a size known at compile-time", - note="to learn more, visit Freeze for &mut T {} /// Types which can be safely moved after being pinned. /// -/// Since Rust itself has no notion of immovable types, and will consider moves to always be safe, +/// Since Rust itself has no notion of immovable types, and considers moves +/// (e.g. through assignment or [`mem::replace`]) to always be safe, /// this trait cannot prevent types from moving by itself. /// -/// Instead it can be used to prevent moves through the type system, -/// by controlling the behavior of pointers wrapped in the [`Pin`] wrapper, +/// Instead it is used to prevent moves through the type system, +/// by controlling the behavior of pointers `P` wrapped in the [`Pin

`] wrapper, /// which "pin" the type in place by not allowing it to be moved out of them. /// See the [`pin module`] documentation for more information on pinning. /// /// Implementing this trait lifts the restrictions of pinning off a type, -/// which then allows it to move out with functions such as [`replace`]. +/// which then allows it to move out with functions such as [`mem::replace`]. +/// +/// `Unpin` has no consequence at all for non-pinned data. In particular, +/// [`mem::replace`] happily moves `!Unpin` data (it works for any `&mut T`, not +/// just when `T: Unpin`). However, you cannot use +/// [`mem::replace`] on data wrapped inside a [`Pin

`] because you cannot get the +/// `&mut T` you need for that, and *that* is what makes this system work. /// /// So this, for example, can only be done on types implementing `Unpin`: /// /// ```rust -/// #![feature(pin)] -/// use std::mem::replace; +/// use std::mem; /// use std::pin::Pin; /// /// let mut string = "this".to_string(); /// let mut pinned_string = Pin::new(&mut string); /// -/// // dereferencing the pointer mutably is only possible because String implements Unpin -/// replace(&mut *pinned_string, "other".to_string()); +/// // We need a mutable reference to call `mem::replace`. +/// // We can obtain such a reference by (implicitly) invoking `Pin::deref_mut`, +/// // but that is only possible because `String` implements `Unpin`. +/// mem::replace(&mut *pinned_string, "other".to_string()); /// ``` /// /// This trait is automatically implemented for almost every type. /// -/// [`replace`]: ../../std/mem/fn.replace.html -/// [`Pin`]: ../pin/struct.Pin.html +/// [`mem::replace`]: ../../std/mem/fn.replace.html +/// [`Pin

`]: ../pin/struct.Pin.html /// [`pin module`]: ../../std/pin/index.html -#[unstable(feature = "pin", issue = "49150")] +#[stable(feature = "pin", since = "1.33.0")] +#[lang = "unpin"] pub auto trait Unpin {} /// A marker type which does not implement `Unpin`. /// /// If a type contains a `PhantomPinned`, it will not implement `Unpin` by default. -#[unstable(feature = "pin", issue = "49150")] +#[stable(feature = "pin", since = "1.33.0")] #[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] pub struct PhantomPinned; -#[unstable(feature = "pin", issue = "49150")] +#[stable(feature = "pin", since = "1.33.0")] impl !Unpin for PhantomPinned {} -#[unstable(feature = "pin", issue = "49150")] +#[stable(feature = "pin", since = "1.33.0")] impl<'a, T: ?Sized + 'a> Unpin for &'a T {} -#[unstable(feature = "pin", issue = "49150")] +#[stable(feature = "pin", since = "1.33.0")] impl<'a, T: ?Sized + 'a> Unpin for &'a mut T {} /// Implementations of `Copy` for primitive types. diff --git a/src/libcore/mem.rs b/src/libcore/mem.rs index afd9fcb1fba84..66bcf1f7d0101 100644 --- a/src/libcore/mem.rs +++ b/src/libcore/mem.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Basic functions for dealing with memory. //! //! This module contains functions for querying the size and alignment of @@ -25,6 +15,7 @@ use ptr; use ops::{Deref, DerefMut}; #[stable(feature = "rust1", since = "1.0.0")] +#[doc(inline)] pub use intrinsics::transmute; /// Takes ownership and "forgets" about the value **without running its destructor**. @@ -304,11 +295,11 @@ pub const fn size_of() -> usize { /// Returns the size of the pointed-to value in bytes. /// /// This is usually the same as `size_of::()`. However, when `T` *has* no -/// statically known size, e.g., a slice [`[T]`][slice] or a [trait object], +/// statically-known size, e.g., a slice [`[T]`][slice] or a [trait object], /// then `size_of_val` can be used to get the dynamically-known size. /// /// [slice]: ../../std/primitive.slice.html -/// [trait object]: ../../book/first-edition/trait-objects.html +/// [trait object]: ../../book/ch17-02-trait-objects.html /// /// # Examples /// @@ -412,7 +403,7 @@ pub fn align_of_val(val: &T) -> usize { unsafe { intrinsics::min_align_of_val(val) } } -/// Returns whether dropping values of type `T` matters. +/// Returns `true` if dropping values of type `T` matters. /// /// This is purely an optimization hint, and may be implemented conservatively: /// it may return `true` for types that don't actually need to be dropped. @@ -499,9 +490,9 @@ pub const fn needs_drop() -> bool { /// assert_eq!(0, x); /// ``` #[inline] -#[rustc_deprecated(since = "2.0.0", reason = "use `mem::MaybeUninit::zeroed` instead")] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn zeroed() -> T { + intrinsics::panic_if_uninhabited::(); intrinsics::init() } @@ -530,6 +521,12 @@ pub unsafe fn zeroed() -> T { /// it goes out of scope (and therefore would be dropped). Note that this /// includes a `panic` occurring and unwinding the stack suddenly. /// +/// If you partially initialize an array, you may need to use +/// [`ptr::drop_in_place`][drop_in_place] to remove the elements you have fully +/// initialized followed by [`mem::forget`][mem_forget] to prevent drop running +/// on the array. If a partially allocated array is dropped this will lead to +/// undefined behaviour. +/// /// # Examples /// /// Here's how to safely initialize an array of [`Vec`]s. @@ -583,18 +580,52 @@ pub unsafe fn zeroed() -> T { /// println!("{:?}", &data[0]); /// ``` /// +/// This example shows how to handle partially initialized arrays, which could +/// be found in low-level datastructures. +/// +/// ``` +/// use std::mem; +/// use std::ptr; +/// +/// // Count the number of elements we have assigned. +/// let mut data_len: usize = 0; +/// let mut data: [String; 1000]; +/// +/// unsafe { +/// data = mem::uninitialized(); +/// +/// for elem in &mut data[0..500] { +/// ptr::write(elem, String::from("hello")); +/// data_len += 1; +/// } +/// +/// // For each item in the array, drop if we allocated it. +/// for i in &mut data[0..data_len] { +/// ptr::drop_in_place(i); +/// } +/// } +/// // Forget the data. If this is allowed to drop, you may see a crash such as: +/// // 'mem_uninit_test(2457,0x7fffb55dd380) malloc: *** error for object +/// // 0x7ff3b8402920: pointer being freed was not allocated' +/// mem::forget(data); +/// ``` +/// /// [`Vec`]: ../../std/vec/struct.Vec.html /// [`vec!`]: ../../std/macro.vec.html /// [`Clone`]: ../../std/clone/trait.Clone.html /// [ub]: ../../reference/behavior-considered-undefined.html /// [write]: ../ptr/fn.write.html +/// [drop_in_place]: ../ptr/fn.drop_in_place.html +/// [mem_zeroed]: fn.zeroed.html +/// [mem_forget]: fn.forget.html /// [copy]: ../intrinsics/fn.copy.html /// [copy_no]: ../intrinsics/fn.copy_nonoverlapping.html /// [`Drop`]: ../ops/trait.Drop.html #[inline] -#[rustc_deprecated(since = "2.0.0", reason = "use `mem::MaybeUninit::uninitialized` instead")] +#[rustc_deprecated(since = "2.0.0", reason = "use `mem::MaybeUninit::uninit` instead")] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn uninitialized() -> T { + intrinsics::panic_if_uninhabited::(); intrinsics::uninit() } @@ -681,8 +712,7 @@ pub fn replace(dest: &mut T, mut src: T) -> T { /// Disposes of a value. /// -/// While this does call the argument's implementation of [`Drop`][drop], -/// it will not release any borrows, as borrows are based on lexical scope. +/// This does call the argument's implementation of [`Drop`][drop]. /// /// This effectively does nothing for types which implement `Copy`, e.g. /// integers. Such values are copied and _then_ moved into the function, so the @@ -709,32 +739,6 @@ pub fn replace(dest: &mut T, mut src: T) -> T { /// drop(v); // explicitly drop the vector /// ``` /// -/// Borrows are based on lexical scope, so this produces an error: -/// -/// ```compile_fail,E0502 -/// let mut v = vec![1, 2, 3]; -/// let x = &v[0]; -/// -/// drop(x); // explicitly drop the reference, but the borrow still exists -/// -/// v.push(4); // error: cannot borrow `v` as mutable because it is also -/// // borrowed as immutable -/// ``` -/// -/// An inner scope is needed to fix this: -/// -/// ``` -/// let mut v = vec![1, 2, 3]; -/// -/// { -/// let x = &v[0]; -/// -/// drop(x); // this is now redundant, as `x` is going out of scope anyway -/// } -/// -/// v.push(4); // no problems -/// ``` -/// /// Since [`RefCell`] enforces the borrow rules at runtime, `drop` can /// release a [`RefCell`] borrow: /// @@ -896,10 +900,16 @@ pub fn discriminant(v: &T) -> Discriminant { } } +// FIXME: Reference `MaybeUninit` from these docs, once that is stable. /// A wrapper to inhibit compiler from automatically calling `T`’s destructor. /// /// This wrapper is 0-cost. /// +/// `ManuallyDrop` is subject to the same layout optimizations as `T`. +/// As a consequence, it has *no effect* on the assumptions that the compiler makes +/// about all values being initialized at their type. In particular, initializing +/// a `ManuallyDrop<&mut T>` with [`mem::zeroed`] is undefined behavior. +/// /// # Examples /// /// This wrapper helps with explicitly documenting the drop order dependencies between fields of @@ -931,6 +941,8 @@ pub fn discriminant(v: &T) -> Discriminant { /// } /// } /// ``` +/// +/// [`mem::zeroed`]: fn.zeroed.html #[stable(feature = "manually_drop", since = "1.20.0")] #[lang = "manually_drop"] #[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] @@ -954,7 +966,7 @@ impl ManuallyDrop { ManuallyDrop { value } } - /// Extract the value from the `ManuallyDrop` container. + /// Extracts the value from the `ManuallyDrop` container. /// /// This allows the value to be dropped again. /// @@ -984,6 +996,9 @@ impl ManuallyDrop { /// /// This function semantically moves out the contained value without preventing further usage. /// It is up to the user of this method to ensure that this container is not used again. + /// + /// [`ManuallyDrop::drop`]: #method.drop + /// [`ManuallyDrop::into_inner`]: #method.into_inner #[must_use = "if you don't need the value, you can use `ManuallyDrop::drop` instead"] #[unstable(feature = "manually_drop_take", issue = "55422")] #[inline] @@ -1028,19 +1043,98 @@ impl DerefMut for ManuallyDrop { } } -/// A newtype to construct uninitialized instances of `T` +/// A wrapper to construct uninitialized instances of `T`. +/// +/// The compiler, in general, assumes that variables are properly initialized +/// at their respective type. For example, a variable of reference type must +/// be aligned and non-NULL. This is an invariant that must *always* be upheld, +/// even in unsafe code. As a consequence, zero-initializing a variable of reference +/// type causes instantaneous undefined behavior, no matter whether that reference +/// ever gets used to access memory: +/// +/// ```rust,no_run +/// #![feature(maybe_uninit)] +/// use std::mem::{self, MaybeUninit}; +/// +/// let x: &i32 = unsafe { mem::zeroed() }; // undefined behavior! +/// // The equivalent code with `MaybeUninit<&i32>`: +/// let x: &i32 = unsafe { MaybeUninit::zeroed().assume_init() }; // undefined behavior! +/// ``` +/// +/// This is exploited by the compiler for various optimizations, such as eliding +/// run-time checks and optimizing `enum` layout. +/// +/// Similarly, entirely uninitialized memory may have any content, while a `bool` must +/// always be `true` or `false`. Hence, creating an uninitialized `bool` is undefined behavior: +/// +/// ```rust,no_run +/// #![feature(maybe_uninit)] +/// use std::mem::{self, MaybeUninit}; +/// +/// let b: bool = unsafe { mem::uninitialized() }; // undefined behavior! +/// // The equivalent code with `MaybeUninit`: +/// let b: bool = unsafe { MaybeUninit::uninit().assume_init() }; // undefined behavior! +/// ``` +/// +/// Moreover, uninitialized memory is special in that the compiler knows that +/// it does not have a fixed value. This makes it undefined behavior to have +/// uninitialized data in a variable even if that variable has an integer type, +/// which otherwise can hold any bit pattern: +/// +/// ```rust,no_run +/// #![feature(maybe_uninit)] +/// use std::mem::{self, MaybeUninit}; +/// +/// let x: i32 = unsafe { mem::uninitialized() }; // undefined behavior! +/// // The equivalent code with `MaybeUninit`: +/// let x: i32 = unsafe { MaybeUninit::uninit().assume_init() }; // undefined behavior! +/// ``` +/// (Notice that the rules around uninitialized integers are not finalized yet, but +/// until they are, it is advisable to avoid them.) +/// +/// `MaybeUninit` serves to enable unsafe code to deal with uninitialized data. +/// It is a signal to the compiler indicating that the data here might *not* +/// be initialized: +/// +/// ```rust +/// #![feature(maybe_uninit)] +/// use std::mem::MaybeUninit; +/// +/// // Create an explicitly uninitialized reference. The compiler knows that data inside +/// // a `MaybeUninit` may be invalid, and hence this is not UB: +/// let mut x = MaybeUninit::<&i32>::uninit(); +/// // Set it to a valid value. +/// x.write(&0); +/// // Extract the initialized data -- this is only allowed *after* properly +/// // initializing `x`! +/// let x = unsafe { x.assume_init() }; +/// ``` +/// +/// The compiler then knows to not make any incorrect assumptions or optimizations on this code. +// +// FIXME before stabilizing, explain how to initialize a struct field-by-field. #[allow(missing_debug_implementations)] #[unstable(feature = "maybe_uninit", issue = "53491")] -// NOTE after stabilizing `MaybeUninit` proceed to deprecate `mem::{uninitialized,zeroed}` +#[derive(Copy)] +// NOTE: after stabilizing `MaybeUninit`, proceed to deprecate `mem::uninitialized`. pub union MaybeUninit { uninit: (), value: ManuallyDrop, } +#[unstable(feature = "maybe_uninit", issue = "53491")] +impl Clone for MaybeUninit { + #[inline(always)] + fn clone(&self) -> Self { + // Not calling `T::clone()`, we cannot know if we are initialized enough for that. + *self + } +} + impl MaybeUninit { - /// Create a new `MaybeUninit` initialized with the given value. + /// Creates a new `MaybeUninit` initialized with the given value. /// - /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. + /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. /// It is your responsibility to make sure `T` gets dropped if it got initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] @@ -1048,97 +1142,329 @@ impl MaybeUninit { MaybeUninit { value: ManuallyDrop::new(val) } } - /// Create a new `MaybeUninit` in an uninitialized state. + /// Creates a new `MaybeUninit` in an uninitialized state. /// - /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. + /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. /// It is your responsibility to make sure `T` gets dropped if it got initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] - pub const fn uninitialized() -> MaybeUninit { + pub const fn uninit() -> MaybeUninit { MaybeUninit { uninit: () } } - /// Create a new `MaybeUninit` in an uninitialized state, with the memory being - /// filled with `0` bytes. It depends on `T` whether that already makes for + /// Deprecated before stabilization. + #[unstable(feature = "maybe_uninit", issue = "53491")] + #[inline(always)] + // FIXME: still used by stdsimd + // #[rustc_deprecated(since = "1.35.0", reason = "use `uninit` instead")] + pub const fn uninitialized() -> MaybeUninit { + Self::uninit() + } + + /// Creates a new `MaybeUninit` in an uninitialized state, with the memory being + /// filled with `0` bytes. It depends on `T` whether that already makes for /// proper initialization. For example, `MaybeUninit::zeroed()` is initialized, /// but `MaybeUninit<&'static i32>::zeroed()` is not because references must not /// be null. /// - /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. + /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. /// It is your responsibility to make sure `T` gets dropped if it got initialized. + /// + /// # Example + /// + /// Correct usage of this function: initializing a struct with zero, where all + /// fields of the struct can hold the bit-pattern 0 as a valid value. + /// + /// ```rust + /// #![feature(maybe_uninit)] + /// use std::mem::MaybeUninit; + /// + /// let x = MaybeUninit::<(u8, bool)>::zeroed(); + /// let x = unsafe { x.assume_init() }; + /// assert_eq!(x, (0, false)); + /// ``` + /// + /// *Incorrect* usage of this function: initializing a struct with zero, where some fields + /// cannot hold 0 as a valid value. + /// + /// ```rust,no_run + /// #![feature(maybe_uninit)] + /// use std::mem::MaybeUninit; + /// + /// enum NotZero { One = 1, Two = 2 }; + /// + /// let x = MaybeUninit::<(u8, NotZero)>::zeroed(); + /// let x = unsafe { x.assume_init() }; + /// // Inside a pair, we create a `NotZero` that does not have a valid discriminant. + /// // This is undefined behavior. + /// ``` #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline] pub fn zeroed() -> MaybeUninit { - let mut u = MaybeUninit::::uninitialized(); + let mut u = MaybeUninit::::uninit(); unsafe { u.as_mut_ptr().write_bytes(0u8, 1); } u } - /// Set the value of the `MaybeUninit`. This overwrites any previous value without dropping it. + /// Sets the value of the `MaybeUninit`. This overwrites any previous value + /// without dropping it, so be careful not to use this twice unless you want to + /// skip running the destructor. For your convenience, this also returns a mutable + /// reference to the (now safely initialized) contents of `self`. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] - pub fn set(&mut self, val: T) { + pub fn write(&mut self, val: T) -> &mut T { unsafe { self.value = ManuallyDrop::new(val); + self.get_mut() } } - /// Extract the value from the `MaybeUninit` container. This is a great way + /// Deprecated before stabilization. + #[unstable(feature = "maybe_uninit", issue = "53491")] + #[inline(always)] + #[rustc_deprecated(since = "1.35.0", reason = "use `write` instead")] + pub fn set(&mut self, val: T) -> &mut T { + self.write(val) + } + + /// Gets a pointer to the contained value. Reading from this pointer or turning it + /// into a reference is undefined behavior unless the `MaybeUninit` is initialized. + /// + /// # Examples + /// + /// Correct usage of this method: + /// + /// ```rust + /// #![feature(maybe_uninit)] + /// use std::mem::MaybeUninit; + /// + /// let mut x = MaybeUninit::>::uninit(); + /// unsafe { x.as_mut_ptr().write(vec![0,1,2]); } + /// // Create a reference into the `MaybeUninit`. This is okay because we initialized it. + /// let x_vec = unsafe { &*x.as_ptr() }; + /// assert_eq!(x_vec.len(), 3); + /// ``` + /// + /// *Incorrect* usage of this method: + /// + /// ```rust,no_run + /// #![feature(maybe_uninit)] + /// use std::mem::MaybeUninit; + /// + /// let x = MaybeUninit::>::uninit(); + /// let x_vec = unsafe { &*x.as_ptr() }; + /// // We have created a reference to an uninitialized vector! This is undefined behavior. + /// ``` + /// + /// (Notice that the rules around references to uninitialized data are not finalized yet, but + /// until they are, it is advisable to avoid them.) + #[unstable(feature = "maybe_uninit", issue = "53491")] + #[inline(always)] + pub fn as_ptr(&self) -> *const T { + unsafe { &*self.value as *const T } + } + + /// Gets a mutable pointer to the contained value. Reading from this pointer or turning it + /// into a reference is undefined behavior unless the `MaybeUninit` is initialized. + /// + /// # Examples + /// + /// Correct usage of this method: + /// + /// ```rust + /// #![feature(maybe_uninit)] + /// use std::mem::MaybeUninit; + /// + /// let mut x = MaybeUninit::>::uninit(); + /// unsafe { x.as_mut_ptr().write(vec![0,1,2]); } + /// // Create a reference into the `MaybeUninit>`. + /// // This is okay because we initialized it. + /// let x_vec = unsafe { &mut *x.as_mut_ptr() }; + /// x_vec.push(3); + /// assert_eq!(x_vec.len(), 4); + /// ``` + /// + /// *Incorrect* usage of this method: + /// + /// ```rust,no_run + /// #![feature(maybe_uninit)] + /// use std::mem::MaybeUninit; + /// + /// let mut x = MaybeUninit::>::uninit(); + /// let x_vec = unsafe { &mut *x.as_mut_ptr() }; + /// // We have created a reference to an uninitialized vector! This is undefined behavior. + /// ``` + /// + /// (Notice that the rules around references to uninitialized data are not finalized yet, but + /// until they are, it is advisable to avoid them.) + #[unstable(feature = "maybe_uninit", issue = "53491")] + #[inline(always)] + pub fn as_mut_ptr(&mut self) -> *mut T { + unsafe { &mut *self.value as *mut T } + } + + /// Extracts the value from the `MaybeUninit` container. This is a great way /// to ensure that the data will get dropped, because the resulting `T` is /// subject to the usual drop handling. /// - /// # Unsafety + /// # Safety + /// + /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized + /// state. Calling this when the content is not yet fully initialized causes undefined + /// behavior. + /// + /// # Examples + /// + /// Correct usage of this method: + /// + /// ```rust + /// #![feature(maybe_uninit)] + /// use std::mem::MaybeUninit; + /// + /// let mut x = MaybeUninit::::uninit(); + /// unsafe { x.as_mut_ptr().write(true); } + /// let x_init = unsafe { x.assume_init() }; + /// assert_eq!(x_init, true); + /// ``` + /// + /// *Incorrect* usage of this method: + /// + /// ```rust,no_run + /// #![feature(maybe_uninit)] + /// use std::mem::MaybeUninit; /// - /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized - /// state, otherwise this will immediately cause undefined behavior. + /// let x = MaybeUninit::>::uninit(); + /// let x_init = unsafe { x.assume_init() }; + /// // `x` had not been initialized yet, so this last line caused undefined behavior. + /// ``` #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] - pub unsafe fn into_inner(self) -> T { + pub unsafe fn assume_init(self) -> T { + intrinsics::panic_if_uninhabited::(); ManuallyDrop::into_inner(self.value) } - /// Get a reference to the contained value. + /// Deprecated before stabilization. + #[unstable(feature = "maybe_uninit", issue = "53491")] + #[inline(always)] + // FIXME: still used by stdsimd + // #[rustc_deprecated(since = "1.35.0", reason = "use `assume_init` instead")] + pub unsafe fn into_initialized(self) -> T { + self.assume_init() + } + + /// Reads the value from the `MaybeUninit` container. The resulting `T` is subject + /// to the usual drop handling. + /// + /// Whenever possible, it is preferrable to use [`assume_init`] instead, which + /// prevents duplicating the content of the `MaybeUninit`. + /// + /// # Safety + /// + /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized + /// state. Calling this when the content is not yet fully initialized causes undefined + /// behavior. + /// + /// Moreover, this leaves a copy of the same data behind in the `MaybeUninit`. When using + /// multiple copies of the data (by calling `read` multiple times, or first + /// calling `read` and then [`assume_init`]), it is your responsibility + /// to ensure that that data may indeed be duplicated. + /// + /// [`assume_init`]: #method.assume_init + /// + /// # Examples + /// + /// Correct usage of this method: + /// + /// ```rust + /// #![feature(maybe_uninit)] + /// use std::mem::MaybeUninit; + /// + /// let mut x = MaybeUninit::::uninit(); + /// x.write(13); + /// let x1 = unsafe { x.read() }; + /// // `u32` is `Copy`, so we may read multiple times. + /// let x2 = unsafe { x.read() }; + /// assert_eq!(x1, x2); + /// + /// let mut x = MaybeUninit::>>::uninit(); + /// x.write(None); + /// let x1 = unsafe { x.read() }; + /// // Duplicating a `None` value is okay, so we may read multiple times. + /// let x2 = unsafe { x.read() }; + /// assert_eq!(x1, x2); + /// ``` /// - /// # Unsafety + /// *Incorrect* usage of this method: /// - /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized - /// state, otherwise this will immediately cause undefined behavior. + /// ```rust,no_run + /// #![feature(maybe_uninit)] + /// use std::mem::MaybeUninit; + /// + /// let mut x = MaybeUninit::>>::uninit(); + /// x.write(Some(vec![0,1,2])); + /// let x1 = unsafe { x.read() }; + /// let x2 = unsafe { x.read() }; + /// // We now created two copies of the same vector, leading to a double-free when + /// // they both get dropped! + /// ``` + #[unstable(feature = "maybe_uninit", issue = "53491")] + #[inline(always)] + pub unsafe fn read(&self) -> T { + intrinsics::panic_if_uninhabited::(); + self.as_ptr().read() + } + + /// Deprecated before stabilization. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] + #[rustc_deprecated(since = "1.35.0", reason = "use `read` instead")] + pub unsafe fn read_initialized(&self) -> T { + self.read() + } + + /// Gets a reference to the contained value. + /// + /// # Safety + /// + /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized + /// state. Calling this when the content is not yet fully initialized causes undefined + /// behavior. + #[unstable(feature = "maybe_uninit_ref", issue = "53491")] + #[inline(always)] pub unsafe fn get_ref(&self) -> &T { &*self.value } - /// Get a mutable reference to the contained value. + /// Gets a mutable reference to the contained value. /// - /// # Unsafety + /// # Safety /// - /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized - /// state, otherwise this will immediately cause undefined behavior. + /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized + /// state. Calling this when the content is not yet fully initialized causes undefined + /// behavior. // FIXME(#53491): We currently rely on the above being incorrect, i.e., we have references // to uninitialized data (e.g., in `libcore/fmt/float.rs`). We should make // a final decision about the rules before stabilization. - #[unstable(feature = "maybe_uninit", issue = "53491")] + #[unstable(feature = "maybe_uninit_ref", issue = "53491")] #[inline(always)] pub unsafe fn get_mut(&mut self) -> &mut T { &mut *self.value } - /// Get a pointer to the contained value. Reading from this pointer will be undefined - /// behavior unless the `MaybeUninit` is initialized. - #[unstable(feature = "maybe_uninit", issue = "53491")] + /// Gets a pointer to the first element of the array. + #[unstable(feature = "maybe_uninit_slice", issue = "53491")] #[inline(always)] - pub fn as_ptr(&self) -> *const T { - unsafe { &*self.value as *const T } + pub fn first_ptr(this: &[MaybeUninit]) -> *const T { + this as *const [MaybeUninit] as *const T } - /// Get a mutable pointer to the contained value. Reading from this pointer will be undefined - /// behavior unless the `MaybeUninit` is initialized. - #[unstable(feature = "maybe_uninit", issue = "53491")] + /// Gets a mutable pointer to the first element of the array. + #[unstable(feature = "maybe_uninit_slice", issue = "53491")] #[inline(always)] - pub fn as_mut_ptr(&mut self) -> *mut T { - unsafe { &mut *self.value as *mut T } + pub fn first_ptr_mut(this: &mut [MaybeUninit]) -> *mut T { + this as *mut [MaybeUninit] as *mut T } } diff --git a/src/libcore/nonzero.rs b/src/libcore/nonzero.rs deleted file mode 100644 index a89c6ca60cbea..0000000000000 --- a/src/libcore/nonzero.rs +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Exposes the NonZero lang item which provides optimization hints. - -use ops::{CoerceUnsized, DispatchFromDyn}; -use marker::Freeze; - -/// A wrapper type for raw pointers and integers that will never be -/// NULL or 0 that might allow certain optimizations. -#[rustc_layout_scalar_valid_range_start(1)] -#[derive(Copy, Eq, PartialEq, Ord, PartialOrd, Hash)] -#[repr(transparent)] -pub(crate) struct NonZero(pub(crate) T); - -// Do not call `T::clone` as theoretically it could turn the field into `0` -// invalidating `NonZero`'s invariant. -impl Clone for NonZero { - fn clone(&self) -> Self { - unsafe { NonZero(self.0) } - } -} - -impl + Freeze, U: Freeze> CoerceUnsized> for NonZero {} - -impl + Freeze, U: Freeze> DispatchFromDyn> for NonZero {} diff --git a/src/libcore/num/bignum.rs b/src/libcore/num/bignum.rs index 2bfb49c0682bb..c3a42a0fc0494 100644 --- a/src/libcore/num/bignum.rs +++ b/src/libcore/num/bignum.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Custom arbitrary-precision number (bignum) implementation. //! //! This is designed to avoid the heap allocation at expense of stack memory. @@ -57,24 +47,25 @@ macro_rules! impl_full_ops { $( impl FullOps for $ty { fn full_add(self, other: $ty, carry: bool) -> (bool, $ty) { - // this cannot overflow, the output is between 0 and 2*2^nbits - 1 - // FIXME will LLVM optimize this into ADC or similar??? - let (v, carry1) = unsafe { intrinsics::add_with_overflow(self, other) }; - let (v, carry2) = unsafe { - intrinsics::add_with_overflow(v, if carry {1} else {0}) - }; + // This cannot overflow; the output is between `0` and `2 * 2^nbits - 1`. + // FIXME: will LLVM optimize this into ADC or similar? + let (v, carry1) = intrinsics::add_with_overflow(self, other); + let (v, carry2) = intrinsics::add_with_overflow(v, if carry {1} else {0}); (carry1 || carry2, v) } fn full_mul(self, other: $ty, carry: $ty) -> ($ty, $ty) { - // this cannot overflow, the output is between 0 and 2^nbits * (2^nbits - 1) + // This cannot overflow; + // the output is between `0` and `2^nbits * (2^nbits - 1)`. + // FIXME: will LLVM optimize this into ADC or similar? let nbits = mem::size_of::<$ty>() * 8; let v = (self as $bigty) * (other as $bigty) + (carry as $bigty); ((v >> nbits) as $ty, v as $ty) } fn full_mul_add(self, other: $ty, other2: $ty, carry: $ty) -> ($ty, $ty) { - // this cannot overflow, the output is between 0 and 2^(2*nbits) - 1 + // This cannot overflow; + // the output is between `0` and `2^nbits * (2^nbits - 1)`. let nbits = mem::size_of::<$ty>() * 8; let v = (self as $bigty) * (other as $bigty) + (other2 as $bigty) + (carry as $bigty); @@ -83,7 +74,7 @@ macro_rules! impl_full_ops { fn full_div_rem(self, other: $ty, borrow: $ty) -> ($ty, $ty) { debug_assert!(borrow < other); - // this cannot overflow, the dividend is between 0 and other * 2^nbits - 1 + // This cannot overflow; the output is between `0` and `other * (2^nbits - 1)`. let nbits = mem::size_of::<$ty>() * 8; let lhs = ((borrow as $bigty) << nbits) | (self as $bigty); let rhs = other as $bigty; @@ -98,7 +89,8 @@ impl_full_ops! { u8: add(intrinsics::u8_add_with_overflow), mul/div(u16); u16: add(intrinsics::u16_add_with_overflow), mul/div(u32); u32: add(intrinsics::u32_add_with_overflow), mul/div(u64); -// u64: add(intrinsics::u64_add_with_overflow), mul/div(u128); // see RFC #521 for enabling this. + // See RFC #521 for enabling this. + // u64: add(intrinsics::u64_add_with_overflow), mul/div(u128); } /// Table of powers of 5 representable in digits. Specifically, the largest {u8, u16, u32} value diff --git a/src/libcore/num/dec2flt/algorithm.rs b/src/libcore/num/dec2flt/algorithm.rs index c3a983d0f0e5b..a83134a6b2ca4 100644 --- a/src/libcore/num/dec2flt/algorithm.rs +++ b/src/libcore/num/dec2flt/algorithm.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The various algorithms from the paper. use cmp::min; @@ -71,7 +61,7 @@ mod fpu_precision { unsafe { asm!("fldcw $0" :: "m" (cw) :: "volatile") } } - /// Set the precision field of the FPU to `T` and return a `FPUControlWord` + /// Sets the precision field of the FPU to `T` and returns a `FPUControlWord`. pub fn set_precision() -> FPUControlWord { let cw = 0u16; @@ -336,7 +326,7 @@ pub fn algorithm_m(f: &Big, e: i16) -> T { round_by_remainder(v, rem, q, z) } -/// Skip over most Algorithm M iterations by checking the bit length. +/// Skips over most Algorithm M iterations by checking the bit length. fn quick_start(u: &mut Big, v: &mut Big, k: &mut i16) { // The bit length is an estimate of the base two logarithm, and log(u / v) = log(u) - log(v). // The estimate is off by at most 1, but always an under-estimate, so the error on log(u) diff --git a/src/libcore/num/dec2flt/mod.rs b/src/libcore/num/dec2flt/mod.rs index f93564c2849f5..d62cdae0688be 100644 --- a/src/libcore/num/dec2flt/mod.rs +++ b/src/libcore/num/dec2flt/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Converting decimal strings into IEEE 754 binary floating point numbers. //! //! # Problem statement @@ -47,7 +37,7 @@ //! //! In addition, there are numerous helper functions that are used in the paper but not available //! in Rust (or at least in core). Our version is additionally complicated by the need to handle -//! overflow and underflow and the desire to handle subnormal numbers. Bellerophon and +//! overflow and underflow and the desire to handle subnormal numbers. Bellerophon and //! Algorithm R have trouble with overflow, subnormals, and underflow. We conservatively switch to //! Algorithm M (with the modifications described in section 8 of the paper) well before the //! inputs get into the critical region. @@ -64,7 +54,7 @@ //! operations as well, if you want 0.5 ULP accuracy you need to do *everything* in full precision //! and round *exactly once, at the end*, by considering all truncated bits at once. //! -//! FIXME Although some code duplication is necessary, perhaps parts of the code could be shuffled +//! FIXME: Although some code duplication is necessary, perhaps parts of the code could be shuffled //! around such that less code is duplicated. Large parts of the algorithms are independent of the //! float type to output, or only needs access to a few constants, which could be passed in as //! parameters. @@ -122,11 +112,35 @@ macro_rules! from_str_float_impl { /// * '2.5E10', or equivalently, '2.5e10' /// * '2.5E-10' /// * '5.' - /// * '.5', or, equivalently, '0.5' + /// * '.5', or, equivalently, '0.5' /// * 'inf', '-inf', 'NaN' /// /// Leading and trailing whitespace represent an error. /// + /// # Grammar + /// + /// All strings that adhere to the following [EBNF] grammar + /// will result in an [`Ok`] being returned: + /// + /// ```txt + /// Float ::= Sign? ( 'inf' | 'NaN' | Number ) + /// Number ::= ( Digit+ | + /// Digit+ '.' Digit* | + /// Digit* '.' Digit+ ) Exp? + /// Exp ::= [eE] Sign? Digit+ + /// Sign ::= [+-] + /// Digit ::= [0-9] + /// ``` + /// + /// [EBNF]: https://www.w3.org/TR/REC-xml/#sec-notation + /// + /// # Known bugs + /// + /// In some situations, some strings that should create a valid float + /// instead return an error. See [issue #31407] for details. + /// + /// [issue #31407]: https://github.com/rust-lang/rust/issues/31407 + /// /// # Arguments /// /// * src - A string @@ -134,7 +148,7 @@ macro_rules! from_str_float_impl { /// # Return value /// /// `Err(ParseFloatError)` if the string did not represent a valid - /// number. Otherwise, `Ok(n)` where `n` is the floating-point + /// number. Otherwise, `Ok(n)` where `n` is the floating-point /// number represented by `src`. #[inline] fn from_str(src: &str) -> Result { @@ -195,7 +209,7 @@ fn pfe_invalid() -> ParseFloatError { ParseFloatError { kind: FloatErrorKind::Invalid } } -/// Split decimal string into sign and the rest, without inspecting or validating the rest. +/// Splits a decimal string into sign and the rest, without inspecting or validating the rest. fn extract_sign(s: &str) -> (Sign, &str) { match s.as_bytes()[0] { b'+' => (Sign::Positive, &s[1..]), @@ -205,7 +219,7 @@ fn extract_sign(s: &str) -> (Sign, &str) { } } -/// Convert a decimal string into a floating point number. +/// Converts a decimal string into a floating point number. fn dec2flt(s: &str) -> Result { if s.is_empty() { return Err(pfe_empty()) @@ -290,8 +304,8 @@ fn simplify(decimal: &mut Decimal) { } } -/// Quick and dirty upper bound on the size (log10) of the largest value that Algorithm R and -/// Algorithm M will compute while working on the given decimal. +/// Returns a quick-an-dirty upper bound on the size (log10) of the largest value that Algorithm R +/// and Algorithm M will compute while working on the given decimal. fn bound_intermediate_digits(decimal: &Decimal, e: i64) -> u64 { // We don't need to worry too much about overflow here thanks to trivial_cases() and the // parser, which filter out the most extreme inputs for us. @@ -310,7 +324,7 @@ fn bound_intermediate_digits(decimal: &Decimal, e: i64) -> u64 { } } -/// Detect obvious overflows and underflows without even looking at the decimal digits. +/// Detects obvious overflows and underflows without even looking at the decimal digits. fn trivial_cases(decimal: &Decimal) -> Option { // There were zeros but they were stripped by simplify() if decimal.integral.is_empty() && decimal.fractional.is_empty() { diff --git a/src/libcore/num/dec2flt/num.rs b/src/libcore/num/dec2flt/num.rs index 34b41fa9decd2..126713185711b 100644 --- a/src/libcore/num/dec2flt/num.rs +++ b/src/libcore/num/dec2flt/num.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Utility functions for bignums that don't make too much sense to turn into methods. // FIXME This module's name is a bit unfortunate, since other modules also import `core::num`. @@ -37,7 +27,7 @@ pub fn compare_with_half_ulp(f: &Big, ones_place: usize) -> Ordering { Equal } -/// Convert an ASCII string containing only decimal digits to a `u64`. +/// Converts an ASCII string containing only decimal digits to a `u64`. /// /// Does not perform checks for overflow or invalid characters, so if the caller is not careful, /// the result is bogus and can panic (though it won't be `unsafe`). Additionally, empty strings @@ -54,7 +44,7 @@ pub fn from_str_unchecked<'a, T>(bytes: T) -> u64 where T : IntoIterator Big { @@ -79,7 +69,7 @@ pub fn to_u64(x: &Big) -> u64 { } -/// Extract a range of bits. +/// Extracts a range of bits. /// Index 0 is the least significant bit and the range is half-open as usual. /// Panics if asked to extract more bits than fit into the return type. diff --git a/src/libcore/num/dec2flt/parse.rs b/src/libcore/num/dec2flt/parse.rs index e7ed94d4d91c2..f970595452ec9 100644 --- a/src/libcore/num/dec2flt/parse.rs +++ b/src/libcore/num/dec2flt/parse.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Validating and decomposing a decimal string of the form: //! //! `(digits | digits? '.'? digits?) (('e' | 'E') ('+' | '-')? digits)?` @@ -52,7 +42,7 @@ pub enum ParseResult<'a> { Invalid, } -/// Check if the input string is a valid floating point number and if so, locate the integral +/// Checks if the input string is a valid floating point number and if so, locate the integral /// part, the fractional part, and the exponent in it. Does not handle signs. pub fn parse_decimal(s: &str) -> ParseResult { if s.is_empty() { @@ -88,7 +78,7 @@ pub fn parse_decimal(s: &str) -> ParseResult { } } -/// Carve off decimal digits up to the first non-digit character. +/// Carves off decimal digits up to the first non-digit character. fn eat_digits(s: &[u8]) -> (&[u8], &[u8]) { let mut i = 0; while i < s.len() && b'0' <= s[i] && s[i] <= b'9' { diff --git a/src/libcore/num/dec2flt/rawfp.rs b/src/libcore/num/dec2flt/rawfp.rs index 18c30e29c7967..b65f539b29c97 100644 --- a/src/libcore/num/dec2flt/rawfp.rs +++ b/src/libcore/num/dec2flt/rawfp.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Bit fiddling on positive IEEE 754 floats. Negative numbers aren't and needn't be handled. //! Normal floating point numbers have a canonical representation as (frac, exp) such that the //! value is 2exp * (1 + sum(frac[N-i] / 2i)) where N is the number of bits. @@ -69,10 +59,10 @@ pub trait RawFloat /// Type used by `to_bits` and `from_bits`. type Bits: Add + From + TryFrom; - /// Raw transmutation to integer. + /// Performs a raw transmutation to an integer. fn to_bits(self) -> Self::Bits; - /// Raw transmutation from integer. + /// Performs a raw transmutation from an integer. fn from_bits(v: Self::Bits) -> Self; /// Returns the category that this number falls into. @@ -81,14 +71,14 @@ pub trait RawFloat /// Returns the mantissa, exponent and sign as integers. fn integer_decode(self) -> (u64, i16, i8); - /// Decode the float. + /// Decodes the float. fn unpack(self) -> Unpacked; - /// Cast from a small integer that can be represented exactly. Panic if the integer can't be + /// Casts from a small integer that can be represented exactly. Panic if the integer can't be /// represented, the other code in this module makes sure to never let that happen. fn from_int(x: u64) -> Self; - /// Get the value 10e from a pre-computed table. + /// Gets the value 10e from a pre-computed table. /// Panics for `e >= CEIL_LOG5_OF_MAX_SIG`. fn short_fast_pow10(e: usize) -> Self; @@ -250,7 +240,7 @@ impl RawFloat for f64 { fn from_bits(v: Self::Bits) -> Self { Self::from_bits(v) } } -/// Convert an Fp to the closest machine float type. +/// Converts an `Fp` to the closest machine float type. /// Does not handle subnormal results. pub fn fp_to_float(x: Fp) -> T { let x = x.normalize(); @@ -329,7 +319,7 @@ pub fn big_to_fp(f: &Big) -> Fp { } } -/// Find the largest floating point number strictly smaller than the argument. +/// Finds the largest floating point number strictly smaller than the argument. /// Does not handle subnormals, zero, or exponent underflow. pub fn prev_float(x: T) -> T { match x.classify() { diff --git a/src/libcore/num/dec2flt/table.rs b/src/libcore/num/dec2flt/table.rs index cb8c94313d030..345ac830aaa77 100644 --- a/src/libcore/num/dec2flt/table.rs +++ b/src/libcore/num/dec2flt/table.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Tables of approximations of powers of ten. //! DO NOT MODIFY: Generated by `src/etc/dec2flt_table.py` diff --git a/src/libcore/num/diy_float.rs b/src/libcore/num/diy_float.rs index b0561da5934c0..cdf332989492f 100644 --- a/src/libcore/num/diy_float.rs +++ b/src/libcore/num/diy_float.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Extended precision "soft float", for internal use only. // This module is only for dec2flt and flt2dec, and only public because of coretests. diff --git a/src/libcore/num/f32.rs b/src/libcore/num/f32.rs index d6c3996971a58..dc0580764acb7 100644 --- a/src/libcore/num/f32.rs +++ b/src/libcore/num/f32.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This module provides constants which are specific to the implementation //! of the `f32` floating point data type. //! @@ -154,7 +144,7 @@ pub mod consts { #[lang = "f32"] #[cfg(not(test))] impl f32 { - /// Returns `true` if this value is `NaN` and false otherwise. + /// Returns `true` if this value is `NaN`. /// /// ``` /// use std::f32; @@ -171,8 +161,16 @@ impl f32 { self != self } - /// Returns `true` if this value is positive infinity or negative infinity and - /// false otherwise. + // FIXME(#50145): `abs` is publicly unavailable in libcore due to + // concerns about portability, so this implementation is for + // private use internally. + #[inline] + fn abs_private(self) -> f32 { + f32::from_bits(self.to_bits() & 0x7fff_ffff) + } + + /// Returns `true` if this value is positive infinity or negative infinity, and + /// `false` otherwise. /// /// ``` /// use std::f32; @@ -191,7 +189,7 @@ impl f32 { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_infinite(self) -> bool { - self == INFINITY || self == NEG_INFINITY + self.abs_private() == INFINITY } /// Returns `true` if this number is neither infinite nor `NaN`. @@ -213,7 +211,9 @@ impl f32 { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_finite(self) -> bool { - !(self.is_nan() || self.is_infinite()) + // There's no need to handle NaN separately: if self is NaN, + // the comparison is not true, exactly as desired. + self.abs_private() < INFINITY } /// Returns `true` if the number is neither zero, infinite, @@ -272,7 +272,7 @@ impl f32 { } } - /// Returns `true` if and only if `self` has a positive sign, including `+0.0`, `NaN`s with + /// Returns `true` if `self` has a positive sign, including `+0.0`, `NaN`s with /// positive sign bit and positive infinity. /// /// ``` @@ -288,7 +288,7 @@ impl f32 { !self.is_sign_negative() } - /// Returns `true` if and only if `self` has a negative sign, including `-0.0`, `NaN`s with + /// Returns `true` if `self` has a negative sign, including `-0.0`, `NaN`s with /// negative sign bit and negative infinity. /// /// ``` diff --git a/src/libcore/num/f64.rs b/src/libcore/num/f64.rs index b8e3dd6ed646c..c3677f8c8faea 100644 --- a/src/libcore/num/f64.rs +++ b/src/libcore/num/f64.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This module provides constants which are specific to the implementation //! of the `f64` floating point data type. //! @@ -154,7 +144,7 @@ pub mod consts { #[lang = "f64"] #[cfg(not(test))] impl f64 { - /// Returns `true` if this value is `NaN` and false otherwise. + /// Returns `true` if this value is `NaN`. /// /// ``` /// use std::f64; @@ -171,8 +161,16 @@ impl f64 { self != self } - /// Returns `true` if this value is positive infinity or negative infinity and - /// false otherwise. + // FIXME(#50145): `abs` is publicly unavailable in libcore due to + // concerns about portability, so this implementation is for + // private use internally. + #[inline] + fn abs_private(self) -> f64 { + f64::from_bits(self.to_bits() & 0x7fff_ffff_ffff_ffff) + } + + /// Returns `true` if this value is positive infinity or negative infinity, and + /// `false` otherwise. /// /// ``` /// use std::f64; @@ -191,7 +189,7 @@ impl f64 { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_infinite(self) -> bool { - self == INFINITY || self == NEG_INFINITY + self.abs_private() == INFINITY } /// Returns `true` if this number is neither infinite nor `NaN`. @@ -213,7 +211,9 @@ impl f64 { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is_finite(self) -> bool { - !(self.is_nan() || self.is_infinite()) + // There's no need to handle NaN separately: if self is NaN, + // the comparison is not true, exactly as desired. + self.abs_private() < INFINITY } /// Returns `true` if the number is neither zero, infinite, @@ -272,7 +272,7 @@ impl f64 { } } - /// Returns `true` if and only if `self` has a positive sign, including `+0.0`, `NaN`s with + /// Returns `true` if `self` has a positive sign, including `+0.0`, `NaN`s with /// positive sign bit and positive infinity. /// /// ``` @@ -296,7 +296,7 @@ impl f64 { self.is_sign_positive() } - /// Returns `true` if and only if `self` has a negative sign, including `-0.0`, `NaN`s with + /// Returns `true` if `self` has a negative sign, including `-0.0`, `NaN`s with /// negative sign bit and negative infinity. /// /// ``` diff --git a/src/libcore/num/flt2dec/decoder.rs b/src/libcore/num/flt2dec/decoder.rs index c34a56f288fd4..a8da31d3e4858 100644 --- a/src/libcore/num/flt2dec/decoder.rs +++ b/src/libcore/num/flt2dec/decoder.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Decodes a floating-point value into individual parts and error ranges. use {f32, f64}; @@ -20,7 +10,7 @@ use num::dec2flt::rawfp::RawFloat; /// /// - Any number from `(mant - minus) * 2^exp` to `(mant + plus) * 2^exp` will /// round to the original value. The range is inclusive only when -/// `inclusive` is true. +/// `inclusive` is `true`. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct Decoded { /// The scaled mantissa. diff --git a/src/libcore/num/flt2dec/estimator.rs b/src/libcore/num/flt2dec/estimator.rs index 4e33fcfd76e61..50e2f70528383 100644 --- a/src/libcore/num/flt2dec/estimator.rs +++ b/src/libcore/num/flt2dec/estimator.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The exponent estimator. /// Finds `k_0` such that `10^(k_0-1) < mant * 2^exp <= 10^(k_0+1)`. diff --git a/src/libcore/num/flt2dec/mod.rs b/src/libcore/num/flt2dec/mod.rs index 097240e58ae50..c9a9375ec590e 100644 --- a/src/libcore/num/flt2dec/mod.rs +++ b/src/libcore/num/flt2dec/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /*! Floating-point number to decimal conversion routines. @@ -249,10 +239,8 @@ impl<'a> Formatted<'a> { let mut written = self.sign.len(); for part in self.parts { - match part.write(&mut out[written..]) { - Some(len) => { written += len; } - None => { return None; } - } + let len = part.write(&mut out[written..])?; + written += len; } Some(written) } @@ -325,15 +313,15 @@ fn digits_to_dec_str<'a>(buf: &'a [u8], exp: i16, frac_digits: usize, } } -/// Formats given decimal digits `0.<...buf...> * 10^exp` into the exponential form -/// with at least given number of significant digits. When `upper` is true, +/// Formats the given decimal digits `0.<...buf...> * 10^exp` into the exponential +/// form with at least the given number of significant digits. When `upper` is `true`, /// the exponent will be prefixed by `E`; otherwise that's `e`. The result is /// stored to the supplied parts array and a slice of written parts is returned. /// /// `min_digits` can be less than the number of actual significant digits in `buf`; /// it will be ignored and full digits will be printed. It is only used to print -/// additional zeroes after rendered digits. Thus `min_digits` of 0 means that -/// it will only print given digits and nothing else. +/// additional zeroes after rendered digits. Thus, `min_digits == 0` means that +/// it will only print the given digits and nothing else. fn digits_to_exp_str<'a>(buf: &'a [u8], exp: i16, min_ndigits: usize, upper: bool, parts: &'a mut [Part<'a>]) -> &'a [Part<'a>] { assert!(!buf.is_empty()); @@ -394,7 +382,7 @@ fn determine_sign(sign: Sign, decoded: &FullDecoded, negative: bool) -> &'static } } -/// Formats given floating point number into the decimal form with at least +/// Formats the given floating point number into the decimal form with at least /// given number of fractional digits. The result is stored to the supplied parts /// array while utilizing given byte buffer as a scratch. `upper` is currently /// unused but left for the future decision to change the case of non-finite values, @@ -448,7 +436,7 @@ pub fn to_shortest_str<'a, T, F>(mut format_shortest: F, v: T, } } -/// Formats given floating point number into the decimal form or +/// Formats the given floating point number into the decimal form or /// the exponential form, depending on the resulting exponent. The result is /// stored to the supplied parts array while utilizing given byte buffer /// as a scratch. `upper` is used to determine the case of non-finite values @@ -507,7 +495,7 @@ pub fn to_shortest_exp_str<'a, T, F>(mut format_shortest: F, v: T, } } -/// Returns rather crude approximation (upper bound) for the maximum buffer size +/// Returns a rather crude approximation (upper bound) for the maximum buffer size /// calculated from the given decoded exponent. /// /// The exact limit is: diff --git a/src/libcore/num/flt2dec/strategy/dragon.rs b/src/libcore/num/flt2dec/strategy/dragon.rs index cda0773afbd5b..582fe22f85406 100644 --- a/src/libcore/num/flt2dec/strategy/dragon.rs +++ b/src/libcore/num/flt2dec/strategy/dragon.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Almost direct (but slightly optimized) Rust translation of Figure 3 of "Printing //! Floating-Point Numbers Quickly and Accurately"[^1]. //! diff --git a/src/libcore/num/flt2dec/strategy/grisu.rs b/src/libcore/num/flt2dec/strategy/grisu.rs index 3e76feca885bc..aa21fcffa5c61 100644 --- a/src/libcore/num/flt2dec/strategy/grisu.rs +++ b/src/libcore/num/flt2dec/strategy/grisu.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Rust adaptation of the Grisu3 algorithm described in "Printing Floating-Point Numbers Quickly //! and Accurately with Integers"[^1]. It uses about 1KB of precomputed table, and in turn, it's //! very quick for most inputs. diff --git a/src/libcore/num/i128.rs b/src/libcore/num/i128.rs index 989376d1ac2d2..564ed598a882c 100644 --- a/src/libcore/num/i128.rs +++ b/src/libcore/num/i128.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The 128-bit signed integer type. //! //! *[See also the `i128` primitive type](../../std/primitive.i128.html).* diff --git a/src/libcore/num/i16.rs b/src/libcore/num/i16.rs index 0f3a5baa2dd9e..44d6aaef25ba0 100644 --- a/src/libcore/num/i16.rs +++ b/src/libcore/num/i16.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The 16-bit signed integer type. //! //! *[See also the `i16` primitive type](../../std/primitive.i16.html).* diff --git a/src/libcore/num/i32.rs b/src/libcore/num/i32.rs index ea8b3a9145c6e..90a5f89195e31 100644 --- a/src/libcore/num/i32.rs +++ b/src/libcore/num/i32.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The 32-bit signed integer type. //! //! *[See also the `i32` primitive type](../../std/primitive.i32.html).* diff --git a/src/libcore/num/i64.rs b/src/libcore/num/i64.rs index aa21b1190aef5..04a8a9d757915 100644 --- a/src/libcore/num/i64.rs +++ b/src/libcore/num/i64.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The 64-bit signed integer type. //! //! *[See also the `i64` primitive type](../../std/primitive.i64.html).* diff --git a/src/libcore/num/i8.rs b/src/libcore/num/i8.rs index 1bed4861594c9..5a52a967cf96d 100644 --- a/src/libcore/num/i8.rs +++ b/src/libcore/num/i8.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The 8-bit signed integer type. //! //! *[See also the `i8` primitive type](../../std/primitive.i8.html).* diff --git a/src/libcore/num/int_macros.rs b/src/libcore/num/int_macros.rs index 3b1612a4ee29f..5c59fe25f6483 100644 --- a/src/libcore/num/int_macros.rs +++ b/src/libcore/num/int_macros.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![doc(hidden)] macro_rules! int_module { diff --git a/src/libcore/num/isize.rs b/src/libcore/num/isize.rs index e0917f79c43dc..143f8b3b272d6 100644 --- a/src/libcore/num/isize.rs +++ b/src/libcore/num/isize.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The pointer-sized signed integer type. //! //! *[See also the `isize` primitive type](../../std/primitive.isize.html).* diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index f1df1f2856ed6..01da5518868a1 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -1,29 +1,18 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Numeric traits and functions for the built-in numeric types. #![stable(feature = "rust1", since = "1.0.0")] -use convert::TryFrom; +use convert::{TryFrom, Infallible}; use fmt; use intrinsics; use mem; -use nonzero::NonZero; use ops; use str::FromStr; macro_rules! impl_nonzero_fmt { - ( ( $( $Trait: ident ),+ ) for $Ty: ident ) => { + ( #[$stability: meta] ( $( $Trait: ident ),+ ) for $Ty: ident ) => { $( - #[stable(feature = "nonzero", since = "1.28.0")] + #[$stability] impl fmt::$Trait for $Ty { #[inline] fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -42,7 +31,7 @@ macro_rules! doc_comment { } macro_rules! nonzero_integers { - ( $( $Ty: ident($Int: ty); )+ ) => { + ( $( #[$stability: meta] $Ty: ident($Int: ty); )+ ) => { $( doc_comment! { concat!("An integer that is known not to equal zero. @@ -52,43 +41,44 @@ For example, `Option<", stringify!($Ty), ">` is the same size as `", stringify!( ```rust use std::mem::size_of; -assert_eq!(size_of::>(), size_of::<", stringify!($Int), +assert_eq!(size_of::>(), size_of::<", stringify!($Int), ">()); ```"), - #[stable(feature = "nonzero", since = "1.28.0")] + #[$stability] #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] #[repr(transparent)] - pub struct $Ty(NonZero<$Int>); + #[rustc_layout_scalar_valid_range_start(1)] + pub struct $Ty($Int); } impl $Ty { - /// Create a non-zero without checking the value. + /// Creates a non-zero without checking the value. /// /// # Safety /// /// The value must not be zero. - #[stable(feature = "nonzero", since = "1.28.0")] + #[$stability] #[inline] pub const unsafe fn new_unchecked(n: $Int) -> Self { - $Ty(NonZero(n)) + $Ty(n) } - /// Create a non-zero if the given value is not zero. - #[stable(feature = "nonzero", since = "1.28.0")] + /// Creates a non-zero if the given value is not zero. + #[$stability] #[inline] pub fn new(n: $Int) -> Option { if n != 0 { - Some($Ty(unsafe { NonZero(n) })) + Some(unsafe { $Ty(n) }) } else { None } } /// Returns the value as a primitive type. - #[stable(feature = "nonzero", since = "1.28.0")] + #[$stability] #[inline] - pub fn get(self) -> $Int { - self.0 .0 + pub const fn get(self) -> $Int { + self.0 } } @@ -96,26 +86,50 @@ assert_eq!(size_of::>(), size_of::<", st #[stable(feature = "from_nonzero", since = "1.31.0")] impl From<$Ty> for $Int { fn from(nonzero: $Ty) -> Self { - nonzero.0 .0 + nonzero.0 } } impl_nonzero_fmt! { - (Debug, Display, Binary, Octal, LowerHex, UpperHex) for $Ty + #[$stability] (Debug, Display, Binary, Octal, LowerHex, UpperHex) for $Ty } )+ } } nonzero_integers! { - NonZeroU8(u8); - NonZeroU16(u16); - NonZeroU32(u32); - NonZeroU64(u64); - NonZeroU128(u128); - NonZeroUsize(usize); + #[stable(feature = "nonzero", since = "1.28.0")] NonZeroU8(u8); + #[stable(feature = "nonzero", since = "1.28.0")] NonZeroU16(u16); + #[stable(feature = "nonzero", since = "1.28.0")] NonZeroU32(u32); + #[stable(feature = "nonzero", since = "1.28.0")] NonZeroU64(u64); + #[stable(feature = "nonzero", since = "1.28.0")] NonZeroU128(u128); + #[stable(feature = "nonzero", since = "1.28.0")] NonZeroUsize(usize); + #[stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI8(i8); + #[stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI16(i16); + #[stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI32(i32); + #[stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI64(i64); + #[stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroI128(i128); + #[stable(feature = "signed_nonzero", since = "1.34.0")] NonZeroIsize(isize); } +macro_rules! from_str_radix_nzint_impl { + ($($t:ty)*) => {$( + #[stable(feature = "nonzero_parse", since = "1.35.0")] + impl FromStr for $t { + type Err = ParseIntError; + fn from_str(src: &str) -> Result { + Self::new(from_str_radix(src, 10)?) + .ok_or(ParseIntError { + kind: IntErrorKind::Zero + }) + } + } + )*} +} + +from_str_radix_nzint_impl! { NonZeroU8 NonZeroU16 NonZeroU32 NonZeroU64 NonZeroU128 NonZeroUsize + NonZeroI8 NonZeroI16 NonZeroI32 NonZeroI64 NonZeroI128 NonZeroIsize } + /// Provides intentionally-wrapped arithmetic on `T`. /// /// Operations like `+` on `u32` values is intended to never overflow, @@ -285,7 +299,6 @@ $EndFeature, " ``` "), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn count_ones(self) -> u32 { (self as $UnsignedT).count_ones() } } @@ -301,7 +314,6 @@ Basic usage: ", $Feature, "assert_eq!(", stringify!($SelfT), "::max_value().count_zeros(), 1);", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn count_zeros(self) -> u32 { (!self).count_ones() @@ -322,7 +334,6 @@ assert_eq!(n.leading_zeros(), 0);", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn leading_zeros(self) -> u32 { (self as $UnsignedT).leading_zeros() @@ -343,7 +354,6 @@ assert_eq!(n.trailing_zeros(), 2);", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn trailing_zeros(self) -> u32 { (self as $UnsignedT).trailing_zeros() @@ -354,7 +364,7 @@ $EndFeature, " concat!("Shifts the bits to the left by a specified amount, `n`, wrapping the truncated bits to the end of the resulting integer. -Please note this isn't the same operation as `<<`! +Please note this isn't the same operation as the `<<` shifting operator! # Examples @@ -367,7 +377,6 @@ let m = ", $rot_result, "; assert_eq!(n.rotate_left(", $rot, "), m); ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_rotate")] #[inline] pub const fn rotate_left(self, n: u32) -> Self { (self as $UnsignedT).rotate_left(n) as Self @@ -379,7 +388,7 @@ assert_eq!(n.rotate_left(", $rot, "), m); wrapping the truncated bits to the beginning of the resulting integer. -Please note this isn't the same operation as `>>`! +Please note this isn't the same operation as the `>>` shifting operator! # Examples @@ -392,7 +401,6 @@ let m = ", $rot_op, "; assert_eq!(n.rotate_right(", $rot, "), m); ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_rotate")] #[inline] pub const fn rotate_right(self, n: u32) -> Self { (self as $UnsignedT).rotate_right(n) as Self @@ -414,7 +422,6 @@ let m = n.swap_bytes(); assert_eq!(m, ", $swapped, "); ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn swap_bytes(self) -> Self { (self as $UnsignedT).swap_bytes() as Self @@ -464,7 +471,6 @@ if cfg!(target_endian = \"big\") { $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn from_be(x: Self) -> Self { #[cfg(target_endian = "big")] @@ -498,7 +504,6 @@ if cfg!(target_endian = \"little\") { $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn from_le(x: Self) -> Self { #[cfg(target_endian = "little")] @@ -532,7 +537,6 @@ if cfg!(target_endian = \"big\") { $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn to_be(self) -> Self { // or not to be? #[cfg(target_endian = "big")] @@ -566,7 +570,6 @@ if cfg!(target_endian = \"little\") { $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn to_le(self) -> Self { #[cfg(target_endian = "little")] @@ -673,7 +676,7 @@ $EndFeature, " } doc_comment! { - concat!("Checked Euclidean division. Computes `self.div_euc(rhs)`, + concat!("Checked Euclidean division. Computes `self.div_euclid(rhs)`, returning `None` if `rhs == 0` or the division results in overflow. # Examples @@ -683,17 +686,17 @@ Basic usage: ``` #![feature(euclidean_division)] assert_eq!((", stringify!($SelfT), -"::min_value() + 1).checked_div_euc(-1), Some(", stringify!($Max), ")); -assert_eq!(", stringify!($SelfT), "::min_value().checked_div_euc(-1), None); -assert_eq!((1", stringify!($SelfT), ").checked_div_euc(0), None); +"::min_value() + 1).checked_div_euclid(-1), Some(", stringify!($Max), ")); +assert_eq!(", stringify!($SelfT), "::min_value().checked_div_euclid(-1), None); +assert_eq!((1", stringify!($SelfT), ").checked_div_euclid(0), None); ```"), #[unstable(feature = "euclidean_division", issue = "49048")] #[inline] - pub fn checked_div_euc(self, rhs: Self) -> Option { + pub fn checked_div_euclid(self, rhs: Self) -> Option { if rhs == 0 || (self == Self::min_value() && rhs == -1) { None } else { - Some(self.div_euc(rhs)) + Some(self.div_euclid(rhs)) } } } @@ -726,8 +729,8 @@ $EndFeature, " } doc_comment! { - concat!("Checked Euclidean modulo. Computes `self.mod_euc(rhs)`, returning `None` if -`rhs == 0` or the division results in overflow. + concat!("Checked Euclidean remainder. Computes `self.rem_euclid(rhs)`, returning `None` +if `rhs == 0` or the division results in overflow. # Examples @@ -737,17 +740,17 @@ Basic usage: #![feature(euclidean_division)] use std::", stringify!($SelfT), "; -assert_eq!(5", stringify!($SelfT), ".checked_mod_euc(2), Some(1)); -assert_eq!(5", stringify!($SelfT), ".checked_mod_euc(0), None); -assert_eq!(", stringify!($SelfT), "::MIN.checked_mod_euc(-1), None); +assert_eq!(5", stringify!($SelfT), ".checked_rem_euclid(2), Some(1)); +assert_eq!(5", stringify!($SelfT), ".checked_rem_euclid(0), None); +assert_eq!(", stringify!($SelfT), "::MIN.checked_rem_euclid(-1), None); ```"), #[unstable(feature = "euclidean_division", issue = "49048")] #[inline] - pub fn checked_mod_euc(self, rhs: Self) -> Option { + pub fn checked_rem_euclid(self, rhs: Self) -> Option { if rhs == 0 || (self == Self::min_value() && rhs == -1) { None } else { - Some(self.mod_euc(rhs)) + Some(self.rem_euclid(rhs)) } } } @@ -851,13 +854,12 @@ overflow occurred. Basic usage: ``` -#![feature(no_panic_pow)] ", $Feature, "assert_eq!(8", stringify!($SelfT), ".checked_pow(2), Some(64)); assert_eq!(", stringify!($SelfT), "::max_value().checked_pow(2), None);", $EndFeature, " ```"), - #[unstable(feature = "no_panic_pow", issue = "48320")] + #[stable(feature = "no_panic_pow", since = "1.34.0")] #[inline] pub fn checked_pow(self, mut exp: u32) -> Option { let mut base = self; @@ -896,17 +898,16 @@ assert_eq!(", stringify!($SelfT), "::max_value().saturating_add(100), ", stringi "::max_value());", $EndFeature, " ```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_saturating_int_methods")] #[inline] - pub fn saturating_add(self, rhs: Self) -> Self { - match self.checked_add(rhs) { - Some(x) => x, - None if rhs >= 0 => Self::max_value(), - None => Self::min_value(), - } + pub const fn saturating_add(self, rhs: Self) -> Self { + intrinsics::saturating_add(self, rhs) } } + doc_comment! { concat!("Saturating integer subtraction. Computes `self - rhs`, saturating at the numeric bounds instead of overflowing. @@ -922,13 +923,10 @@ assert_eq!(", stringify!($SelfT), "::min_value().saturating_sub(100), ", stringi $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_saturating_int_methods")] #[inline] - pub fn saturating_sub(self, rhs: Self) -> Self { - match self.checked_sub(rhs) { - Some(x) => x, - None if rhs >= 0 => Self::min_value(), - None => Self::max_value(), - } + pub const fn saturating_sub(self, rhs: Self) -> Self { + intrinsics::saturating_sub(self, rhs) } } @@ -970,7 +968,6 @@ saturating at the numeric bounds instead of overflowing. Basic usage: ``` -#![feature(no_panic_pow)] ", $Feature, "use std::", stringify!($SelfT), "; assert_eq!((-4", stringify!($SelfT), ").saturating_pow(3), -64); @@ -978,7 +975,7 @@ assert_eq!(", stringify!($SelfT), "::MIN.saturating_pow(2), ", stringify!($SelfT assert_eq!(", stringify!($SelfT), "::MIN.saturating_pow(3), ", stringify!($SelfT), "::MIN);", $EndFeature, " ```"), - #[unstable(feature = "no_panic_pow", issue = "48320")] + #[stable(feature = "no_panic_pow", since = "1.34.0")] #[inline] pub fn saturating_pow(self, exp: u32) -> Self { match self.checked_pow(exp) { @@ -1004,12 +1001,9 @@ assert_eq!(", stringify!($SelfT), "::max_value().wrapping_add(2), ", stringify!( $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_wrapping")] #[inline] pub const fn wrapping_add(self, rhs: Self) -> Self { - unsafe { - intrinsics::overflowing_add(self, rhs) - } + intrinsics::overflowing_add(self, rhs) } } @@ -1028,12 +1022,9 @@ stringify!($SelfT), "::max_value());", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_wrapping")] #[inline] pub const fn wrapping_sub(self, rhs: Self) -> Self { - unsafe { - intrinsics::overflowing_sub(self, rhs) - } + intrinsics::overflowing_sub(self, rhs) } } @@ -1051,12 +1042,9 @@ assert_eq!(11i8.wrapping_mul(12), -124);", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_wrapping")] #[inline] pub const fn wrapping_mul(self, rhs: Self) -> Self { - unsafe { - intrinsics::overflowing_mul(self, rhs) - } + intrinsics::overflowing_mul(self, rhs) } } @@ -1089,7 +1077,7 @@ $EndFeature, " } doc_comment! { - concat!("Wrapping Euclidean division. Computes `self.div_euc(rhs)`, + concat!("Wrapping Euclidean division. Computes `self.div_euclid(rhs)`, wrapping around at the boundary of the type. Wrapping will only occur in `MIN / -1` on a signed type (where `MIN` is the negative minimal value @@ -1106,13 +1094,13 @@ Basic usage: ``` #![feature(euclidean_division)] -assert_eq!(100", stringify!($SelfT), ".wrapping_div_euc(10), 10); -assert_eq!((-128i8).wrapping_div_euc(-1), -128); +assert_eq!(100", stringify!($SelfT), ".wrapping_div_euclid(10), 10); +assert_eq!((-128i8).wrapping_div_euclid(-1), -128); ```"), #[unstable(feature = "euclidean_division", issue = "49048")] #[inline] - pub fn wrapping_div_euc(self, rhs: Self) -> Self { - self.overflowing_div_euc(rhs).0 + pub fn wrapping_div_euclid(self, rhs: Self) -> Self { + self.overflowing_div_euclid(rhs).0 } } @@ -1145,8 +1133,8 @@ $EndFeature, " } doc_comment! { - concat!("Wrapping Euclidean modulo. Computes `self.mod_euc(rhs)`, wrapping around at the -boundary of the type. + concat!("Wrapping Euclidean remainder. Computes `self.rem_euclid(rhs)`, wrapping around +at the boundary of the type. Wrapping will only occur in `MIN % -1` on a signed type (where `MIN` is the negative minimal value for the type). In this case, this method returns 0. @@ -1161,13 +1149,13 @@ Basic usage: ``` #![feature(euclidean_division)] -assert_eq!(100", stringify!($SelfT), ".wrapping_mod_euc(10), 0); -assert_eq!((-128i8).wrapping_mod_euc(-1), 0); +assert_eq!(100", stringify!($SelfT), ".wrapping_rem_euclid(10), 0); +assert_eq!((-128i8).wrapping_rem_euclid(-1), 0); ```"), #[unstable(feature = "euclidean_division", issue = "49048")] #[inline] - pub fn wrapping_mod_euc(self, rhs: Self) -> Self { - self.overflowing_mod_euc(rhs).0 + pub fn wrapping_rem_euclid(self, rhs: Self) -> Self { + self.overflowing_rem_euclid(rhs).0 } } @@ -1191,7 +1179,7 @@ $EndFeature, " ```"), #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline] - pub fn wrapping_neg(self) -> Self { + pub const fn wrapping_neg(self) -> Self { self.overflowing_neg().0 } } @@ -1215,7 +1203,6 @@ assert_eq!((-1", stringify!($SelfT), ").wrapping_shl(128), -1);", $EndFeature, " ```"), #[stable(feature = "num_wrapping", since = "1.2.0")] - #[rustc_const_unstable(feature = "const_int_wrapping")] #[inline] pub const fn wrapping_shl(self, rhs: u32) -> Self { unsafe { @@ -1243,7 +1230,6 @@ assert_eq!((-128i16).wrapping_shr(64), -128);", $EndFeature, " ```"), #[stable(feature = "num_wrapping", since = "1.2.0")] - #[rustc_const_unstable(feature = "const_int_wrapping")] #[inline] pub const fn wrapping_shr(self, rhs: u32) -> Self { unsafe { @@ -1292,13 +1278,12 @@ wrapping around at the boundary of the type. Basic usage: ``` -#![feature(no_panic_pow)] ", $Feature, "assert_eq!(3", stringify!($SelfT), ".wrapping_pow(4), 81); assert_eq!(3i8.wrapping_pow(5), -13); assert_eq!(3i8.wrapping_pow(6), -39);", $EndFeature, " ```"), - #[unstable(feature = "no_panic_pow", issue = "48320")] + #[stable(feature = "no_panic_pow", since = "1.34.0")] #[inline] pub fn wrapping_pow(self, mut exp: u32) -> Self { let mut base = self; @@ -1341,13 +1326,9 @@ assert_eq!(", stringify!($SelfT), "::MAX.overflowing_add(1), (", stringify!($Sel "::MIN, true));", $EndFeature, " ```"), #[stable(feature = "wrapping", since = "1.7.0")] - #[rustc_const_unstable(feature = "const_int_overflowing")] #[inline] pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) { - let (a, b) = unsafe { - intrinsics::add_with_overflow(self as $ActualT, - rhs as $ActualT) - }; + let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT); (a as Self, b) } } @@ -1370,13 +1351,9 @@ assert_eq!(", stringify!($SelfT), "::MIN.overflowing_sub(1), (", stringify!($Sel "::MAX, true));", $EndFeature, " ```"), #[stable(feature = "wrapping", since = "1.7.0")] - #[rustc_const_unstable(feature = "const_int_overflowing")] #[inline] pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) { - let (a, b) = unsafe { - intrinsics::sub_with_overflow(self as $ActualT, - rhs as $ActualT) - }; + let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT); (a as Self, b) } } @@ -1397,13 +1374,9 @@ assert_eq!(1_000_000_000i32.overflowing_mul(10), (1410065408, true));", $EndFeature, " ```"), #[stable(feature = "wrapping", since = "1.7.0")] - #[rustc_const_unstable(feature = "const_int_overflowing")] #[inline] pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) { - let (a, b) = unsafe { - intrinsics::mul_with_overflow(self as $ActualT, - rhs as $ActualT) - }; + let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT); (a as Self, b) } } @@ -1442,7 +1415,7 @@ $EndFeature, " } doc_comment! { - concat!("Calculates the quotient of Euclidean division `self.div_euc(rhs)`. + concat!("Calculates the quotient of Euclidean division `self.div_euclid(rhs)`. Returns a tuple of the divisor along with a boolean indicating whether an arithmetic overflow would occur. If an overflow would occur then `self` is returned. @@ -1459,17 +1432,17 @@ Basic usage: #![feature(euclidean_division)] use std::", stringify!($SelfT), "; -assert_eq!(5", stringify!($SelfT), ".overflowing_div_euc(2), (2, false)); -assert_eq!(", stringify!($SelfT), "::MIN.overflowing_div_euc(-1), (", stringify!($SelfT), +assert_eq!(5", stringify!($SelfT), ".overflowing_div_euclid(2), (2, false)); +assert_eq!(", stringify!($SelfT), "::MIN.overflowing_div_euclid(-1), (", stringify!($SelfT), "::MIN, true)); ```"), #[inline] #[unstable(feature = "euclidean_division", issue = "49048")] - pub fn overflowing_div_euc(self, rhs: Self) -> (Self, bool) { + pub fn overflowing_div_euclid(self, rhs: Self) -> (Self, bool) { if self == Self::min_value() && rhs == -1 { (self, true) } else { - (self.div_euc(rhs), false) + (self.div_euclid(rhs), false) } } } @@ -1508,7 +1481,7 @@ $EndFeature, " doc_comment! { - concat!("Calculates the remainder `self.mod_euc(rhs)` by Euclidean division. + concat!("Overflowing Euclidean remainder. Calculates `self.rem_euclid(rhs)`. Returns a tuple of the remainder after dividing along with a boolean indicating whether an arithmetic overflow would occur. If an overflow would occur then 0 is returned. @@ -1525,16 +1498,16 @@ Basic usage: #![feature(euclidean_division)] use std::", stringify!($SelfT), "; -assert_eq!(5", stringify!($SelfT), ".overflowing_mod_euc(2), (1, false)); -assert_eq!(", stringify!($SelfT), "::MIN.overflowing_mod_euc(-1), (0, true)); +assert_eq!(5", stringify!($SelfT), ".overflowing_rem_euclid(2), (1, false)); +assert_eq!(", stringify!($SelfT), "::MIN.overflowing_rem_euclid(-1), (0, true)); ```"), #[unstable(feature = "euclidean_division", issue = "49048")] #[inline] - pub fn overflowing_mod_euc(self, rhs: Self) -> (Self, bool) { + pub fn overflowing_rem_euclid(self, rhs: Self) -> (Self, bool) { if self == Self::min_value() && rhs == -1 { (0, true) } else { - (self.mod_euc(rhs), false) + (self.rem_euclid(rhs), false) } } } @@ -1560,12 +1533,8 @@ assert_eq!(", stringify!($SelfT), "::MIN.overflowing_neg(), (", stringify!($Self ```"), #[inline] #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_neg(self) -> (Self, bool) { - if self == Self::min_value() { - (Self::min_value(), true) - } else { - (-self, false) - } + pub const fn overflowing_neg(self) -> (Self, bool) { + ((!self).wrapping_add(1), self == Self::min_value()) } } @@ -1586,7 +1555,6 @@ assert_eq!(0x1i32.overflowing_shl(36), (0x10, true));", $EndFeature, " ```"), #[stable(feature = "wrapping", since = "1.7.0")] - #[rustc_const_unstable(feature = "const_int_overflowing")] #[inline] pub const fn overflowing_shl(self, rhs: u32) -> (Self, bool) { (self.wrapping_shl(rhs), (rhs > ($BITS - 1))) @@ -1610,7 +1578,6 @@ assert_eq!(0x10i32.overflowing_shr(36), (0x1, true));", $EndFeature, " ```"), #[stable(feature = "wrapping", since = "1.7.0")] - #[rustc_const_unstable(feature = "const_int_overflowing")] #[inline] pub const fn overflowing_shr(self, rhs: u32) -> (Self, bool) { (self.wrapping_shr(rhs), (rhs > ($BITS - 1))) @@ -1658,12 +1625,11 @@ whether an overflow happened. Basic usage: ``` -#![feature(no_panic_pow)] ", $Feature, "assert_eq!(3", stringify!($SelfT), ".overflowing_pow(4), (81, false)); assert_eq!(3i8.overflowing_pow(5), (-13, true));", $EndFeature, " ```"), - #[unstable(feature = "no_panic_pow", issue = "48320")] + #[stable(feature = "no_panic_pow", since = "1.34.0")] #[inline] pub fn overflowing_pow(self, mut exp: u32) -> (Self, bool) { let mut base = self; @@ -1739,9 +1705,13 @@ $EndFeature, " doc_comment! { concat!("Calculates the quotient of Euclidean division of `self` by `rhs`. -This computes the integer `n` such that `self = n * rhs + self.mod_euc(rhs)`. +This computes the integer `n` such that `self = n * rhs + self.rem_euclid(rhs)`, +with `0 <= self.rem_euclid(rhs) < rhs`. + In other words, the result is `self / rhs` rounded to the integer `n` such that `self >= n * rhs`. +If `self > 0`, this is equal to round towards zero (the default in Rust); +if `self < 0`, this is equal to round towards +/- infinity. # Panics @@ -1756,15 +1726,15 @@ Basic usage: let a: ", stringify!($SelfT), " = 7; // or any other integer type let b = 4; -assert_eq!(a.div_euc(b), 1); // 7 >= 4 * 1 -assert_eq!(a.div_euc(-b), -1); // 7 >= -4 * -1 -assert_eq!((-a).div_euc(b), -2); // -7 >= 4 * -2 -assert_eq!((-a).div_euc(-b), 2); // -7 >= -4 * 2 +assert_eq!(a.div_euclid(b), 1); // 7 >= 4 * 1 +assert_eq!(a.div_euclid(-b), -1); // 7 >= -4 * -1 +assert_eq!((-a).div_euclid(b), -2); // -7 >= 4 * -2 +assert_eq!((-a).div_euclid(-b), 2); // -7 >= -4 * 2 ```"), #[unstable(feature = "euclidean_division", issue = "49048")] #[inline] #[rustc_inherit_overflow_checks] - pub fn div_euc(self, rhs: Self) -> Self { + pub fn div_euclid(self, rhs: Self) -> Self { let q = self / rhs; if self % rhs < 0 { return if rhs > 0 { q - 1 } else { q + 1 } @@ -1775,9 +1745,11 @@ assert_eq!((-a).div_euc(-b), 2); // -7 >= -4 * 2 doc_comment! { - concat!("Calculates the remainder `self mod rhs` by Euclidean division. + concat!("Calculates the least nonnegative remainder of `self (mod rhs)`. -In particular, the result `n` satisfies `0 <= n < rhs.abs()`. +This is done as if by the Euclidean division algorithm -- given +`r = self.rem_euclid(rhs)`, `self = rhs * self.div_euclid(rhs) + r`, and +`0 <= r < abs(rhs)`. # Panics @@ -1792,15 +1764,15 @@ Basic usage: let a: ", stringify!($SelfT), " = 7; // or any other integer type let b = 4; -assert_eq!(a.mod_euc(b), 3); -assert_eq!((-a).mod_euc(b), 1); -assert_eq!(a.mod_euc(-b), 3); -assert_eq!((-a).mod_euc(-b), 1); +assert_eq!(a.rem_euclid(b), 3); +assert_eq!((-a).rem_euclid(b), 1); +assert_eq!(a.rem_euclid(-b), 3); +assert_eq!((-a).rem_euclid(-b), 1); ```"), #[unstable(feature = "euclidean_division", issue = "49048")] #[inline] #[rustc_inherit_overflow_checks] - pub fn mod_euc(self, rhs: Self) -> Self { + pub fn rem_euclid(self, rhs: Self) -> Self { let r = self % rhs; if r < 0 { if rhs < 0 { @@ -1890,7 +1862,6 @@ assert!(!(-10", stringify!($SelfT), ").is_positive());", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_sign")] #[inline] pub const fn is_positive(self) -> bool { self > 0 } } @@ -1909,7 +1880,6 @@ assert!(!10", stringify!($SelfT), ".is_negative());", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_sign")] #[inline] pub const fn is_negative(self) -> bool { self < 0 } } @@ -1994,7 +1964,6 @@ assert_eq!(value, ", $swap_op, "); When starting from a slice rather than an array, fallible conversion APIs can be used: ``` -#![feature(try_from)] use std::convert::TryInto; fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { @@ -2026,13 +1995,12 @@ assert_eq!(value, ", $swap_op, "); When starting from a slice rather than an array, fallible conversion APIs can be used: ``` -#![feature(try_from)] use std::convert::TryInto; -fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { +fn read_le_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">()); *input = rest; - ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap()) + ", stringify!($SelfT), "::from_le_bytes(int_bytes.try_into().unwrap()) } ```"), #[stable(feature = "int_to_from_bytes", since = "1.32.0")] @@ -2068,13 +2036,12 @@ assert_eq!(value, ", $swap_op, "); When starting from a slice rather than an array, fallible conversion APIs can be used: ``` -#![feature(try_from)] use std::convert::TryInto; -fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { +fn read_ne_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">()); *input = rest; - ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap()) + ", stringify!($SelfT), "::from_ne_bytes(int_bytes.try_into().unwrap()) } ```"), #[stable(feature = "int_to_from_bytes", since = "1.32.0")] @@ -2231,10 +2198,9 @@ Basic usage: assert_eq!(n.count_ones(), 3);", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn count_ones(self) -> u32 { - unsafe { intrinsics::ctpop(self as $ActualT) as u32 } + intrinsics::ctpop(self as $ActualT) as u32 } } @@ -2249,7 +2215,6 @@ Basic usage: ", $Feature, "assert_eq!(", stringify!($SelfT), "::max_value().count_zeros(), 0);", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn count_zeros(self) -> u32 { (!self).count_ones() @@ -2269,10 +2234,9 @@ Basic usage: assert_eq!(n.leading_zeros(), 2);", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn leading_zeros(self) -> u32 { - unsafe { intrinsics::ctlz(self as $ActualT) as u32 } + intrinsics::ctlz(self as $ActualT) as u32 } } @@ -2290,10 +2254,9 @@ Basic usage: assert_eq!(n.trailing_zeros(), 3);", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn trailing_zeros(self) -> u32 { - unsafe { intrinsics::cttz(self) as u32 } + intrinsics::cttz(self) as u32 } } @@ -2301,7 +2264,7 @@ assert_eq!(n.trailing_zeros(), 3);", $EndFeature, " concat!("Shifts the bits to the left by a specified amount, `n`, wrapping the truncated bits to the end of the resulting integer. -Please note this isn't the same operation as `<<`! +Please note this isn't the same operation as the `<<` shifting operator! # Examples @@ -2314,10 +2277,9 @@ let m = ", $rot_result, "; assert_eq!(n.rotate_left(", $rot, "), m); ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_rotate")] #[inline] pub const fn rotate_left(self, n: u32) -> Self { - unsafe { intrinsics::rotate_left(self, n as $SelfT) } + intrinsics::rotate_left(self, n as $SelfT) } } @@ -2326,7 +2288,7 @@ assert_eq!(n.rotate_left(", $rot, "), m); wrapping the truncated bits to the beginning of the resulting integer. -Please note this isn't the same operation as `>>`! +Please note this isn't the same operation as the `>>` shifting operator! # Examples @@ -2339,10 +2301,9 @@ let m = ", $rot_op, "; assert_eq!(n.rotate_right(", $rot, "), m); ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_rotate")] #[inline] pub const fn rotate_right(self, n: u32) -> Self { - unsafe { intrinsics::rotate_right(self, n as $SelfT) } + intrinsics::rotate_right(self, n as $SelfT) } } @@ -2361,10 +2322,9 @@ let m = n.swap_bytes(); assert_eq!(m, ", $swapped, "); ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn swap_bytes(self) -> Self { - unsafe { intrinsics::bswap(self as $ActualT) as Self } + intrinsics::bswap(self as $ActualT) as Self } } @@ -2384,10 +2344,9 @@ let m = n.reverse_bits(); assert_eq!(m, ", $reversed, "); ```"), #[unstable(feature = "reverse_bits", issue = "48763")] - #[rustc_const_unstable(feature = "const_int_conversion")] #[inline] pub const fn reverse_bits(self) -> Self { - unsafe { intrinsics::bitreverse(self as $ActualT) as Self } + intrinsics::bitreverse(self as $ActualT) as Self } } @@ -2411,7 +2370,6 @@ if cfg!(target_endian = \"big\") { }", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn from_be(x: Self) -> Self { #[cfg(target_endian = "big")] @@ -2445,7 +2403,6 @@ if cfg!(target_endian = \"little\") { }", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn from_le(x: Self) -> Self { #[cfg(target_endian = "little")] @@ -2479,7 +2436,6 @@ if cfg!(target_endian = \"big\") { }", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn to_be(self) -> Self { // or not to be? #[cfg(target_endian = "big")] @@ -2513,7 +2469,6 @@ if cfg!(target_endian = \"little\") { }", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_ops")] #[inline] pub const fn to_le(self) -> Self { #[cfg(target_endian = "little")] @@ -2611,7 +2566,7 @@ assert_eq!(1", stringify!($SelfT), ".checked_div(0), None);", $EndFeature, " } doc_comment! { - concat!("Checked Euclidean division. Computes `self.div_euc(rhs)`, returning `None` + concat!("Checked Euclidean division. Computes `self.div_euclid(rhs)`, returning `None` if `rhs == 0`. # Examples @@ -2620,16 +2575,16 @@ Basic usage: ``` #![feature(euclidean_division)] -assert_eq!(128", stringify!($SelfT), ".checked_div(2), Some(64)); -assert_eq!(1", stringify!($SelfT), ".checked_div_euc(0), None); +assert_eq!(128", stringify!($SelfT), ".checked_div_euclid(2), Some(64)); +assert_eq!(1", stringify!($SelfT), ".checked_div_euclid(0), None); ```"), #[unstable(feature = "euclidean_division", issue = "49048")] #[inline] - pub fn checked_div_euc(self, rhs: Self) -> Option { + pub fn checked_div_euclid(self, rhs: Self) -> Option { if rhs == 0 { None } else { - Some(self.div_euc(rhs)) + Some(self.div_euclid(rhs)) } } } @@ -2659,7 +2614,7 @@ assert_eq!(5", stringify!($SelfT), ".checked_rem(0), None);", $EndFeature, " } doc_comment! { - concat!("Checked Euclidean modulo. Computes `self.mod_euc(rhs)`, returning `None` + concat!("Checked Euclidean modulo. Computes `self.rem_euclid(rhs)`, returning `None` if `rhs == 0`. # Examples @@ -2668,16 +2623,16 @@ Basic usage: ``` #![feature(euclidean_division)] -assert_eq!(5", stringify!($SelfT), ".checked_mod_euc(2), Some(1)); -assert_eq!(5", stringify!($SelfT), ".checked_mod_euc(0), None); +assert_eq!(5", stringify!($SelfT), ".checked_rem_euclid(2), Some(1)); +assert_eq!(5", stringify!($SelfT), ".checked_rem_euclid(0), None); ```"), #[unstable(feature = "euclidean_division", issue = "49048")] #[inline] - pub fn checked_mod_euc(self, rhs: Self) -> Option { + pub fn checked_rem_euclid(self, rhs: Self) -> Option { if rhs == 0 { None } else { - Some(self.mod_euc(rhs)) + Some(self.rem_euclid(rhs)) } } } @@ -2753,11 +2708,10 @@ overflow occurred. Basic usage: ``` -#![feature(no_panic_pow)] ", $Feature, "assert_eq!(2", stringify!($SelfT), ".checked_pow(5), Some(32)); assert_eq!(", stringify!($SelfT), "::max_value().checked_pow(2), None);", $EndFeature, " ```"), - #[unstable(feature = "no_panic_pow", issue = "48320")] + #[stable(feature = "no_panic_pow", since = "1.34.0")] #[inline] pub fn checked_pow(self, mut exp: u32) -> Option { let mut base = self; @@ -2794,13 +2748,12 @@ Basic usage: ", $Feature, "assert_eq!(100", stringify!($SelfT), ".saturating_add(1), 101); assert_eq!(200u8.saturating_add(127), 255);", $EndFeature, " ```"), + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_saturating_int_methods")] #[inline] - pub fn saturating_add(self, rhs: Self) -> Self { - match self.checked_add(rhs) { - Some(x) => x, - None => Self::max_value(), - } + pub const fn saturating_add(self, rhs: Self) -> Self { + intrinsics::saturating_add(self, rhs) } } @@ -2817,12 +2770,10 @@ Basic usage: assert_eq!(13", stringify!($SelfT), ".saturating_sub(127), 0);", $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_saturating_int_methods")] #[inline] - pub fn saturating_sub(self, rhs: Self) -> Self { - match self.checked_sub(rhs) { - Some(x) => x, - None => Self::min_value(), - } + pub const fn saturating_sub(self, rhs: Self) -> Self { + intrinsics::saturating_sub(self, rhs) } } @@ -2857,14 +2808,13 @@ saturating at the numeric bounds instead of overflowing. Basic usage: ``` -#![feature(no_panic_pow)] ", $Feature, "use std::", stringify!($SelfT), "; assert_eq!(4", stringify!($SelfT), ".saturating_pow(3), 64); assert_eq!(", stringify!($SelfT), "::MAX.saturating_pow(2), ", stringify!($SelfT), "::MAX);", $EndFeature, " ```"), - #[unstable(feature = "no_panic_pow", issue = "48320")] + #[stable(feature = "no_panic_pow", since = "1.34.0")] #[inline] pub fn saturating_pow(self, exp: u32) -> Self { match self.checked_pow(exp) { @@ -2888,12 +2838,9 @@ assert_eq!(200", stringify!($SelfT), ".wrapping_add(", stringify!($SelfT), "::ma $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_wrapping")] #[inline] pub const fn wrapping_add(self, rhs: Self) -> Self { - unsafe { - intrinsics::overflowing_add(self, rhs) - } + intrinsics::overflowing_add(self, rhs) } } @@ -2911,12 +2858,9 @@ assert_eq!(100", stringify!($SelfT), ".wrapping_sub(", stringify!($SelfT), "::ma $EndFeature, " ```"), #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_wrapping")] #[inline] pub const fn wrapping_sub(self, rhs: Self) -> Self { - unsafe { - intrinsics::overflowing_sub(self, rhs) - } + intrinsics::overflowing_sub(self, rhs) } } @@ -2935,12 +2879,9 @@ $EndFeature, " /// assert_eq!(25u8.wrapping_mul(12), 44); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_const_unstable(feature = "const_int_wrapping")] #[inline] pub const fn wrapping_mul(self, rhs: Self) -> Self { - unsafe { - intrinsics::overflowing_mul(self, rhs) - } + intrinsics::overflowing_mul(self, rhs) } doc_comment! { @@ -2965,11 +2906,14 @@ Basic usage: } doc_comment! { - concat!("Wrapping Euclidean division. Computes `self.div_euc(rhs)`. + concat!("Wrapping Euclidean division. Computes `self.div_euclid(rhs)`. Wrapped division on unsigned types is just normal division. There's no way wrapping could ever happen. This function exists, so that all operations are accounted for in the wrapping operations. +Since, for the positive integers, all common +definitions of division are equal, this +is exactly equal to `self.wrapping_div(rhs)`. # Examples @@ -2977,11 +2921,11 @@ Basic usage: ``` #![feature(euclidean_division)] -assert_eq!(100", stringify!($SelfT), ".wrapping_div_euc(10), 10); +assert_eq!(100", stringify!($SelfT), ".wrapping_div_euclid(10), 10); ```"), #[unstable(feature = "euclidean_division", issue = "49048")] #[inline] - pub fn wrapping_div_euc(self, rhs: Self) -> Self { + pub fn wrapping_div_euclid(self, rhs: Self) -> Self { self / rhs } } @@ -3009,12 +2953,15 @@ Basic usage: } doc_comment! { - concat!("Wrapping Euclidean modulo. Computes `self.mod_euc(rhs)`. + concat!("Wrapping Euclidean modulo. Computes `self.rem_euclid(rhs)`. Wrapped modulo calculation on unsigned types is just the regular remainder calculation. There's no way wrapping could ever happen. This function exists, so that all operations are accounted for in the wrapping operations. +Since, for the positive integers, all common +definitions of division are equal, this +is exactly equal to `self.wrapping_rem(rhs)`. # Examples @@ -3022,11 +2969,11 @@ Basic usage: ``` #![feature(euclidean_division)] -assert_eq!(100", stringify!($SelfT), ".wrapping_mod_euc(10), 0); +assert_eq!(100", stringify!($SelfT), ".wrapping_rem_euclid(10), 0); ```"), #[unstable(feature = "euclidean_division", issue = "49048")] #[inline] - pub fn wrapping_mod_euc(self, rhs: Self) -> Self { + pub fn wrapping_rem_euclid(self, rhs: Self) -> Self { self % rhs } } @@ -3054,7 +3001,7 @@ assert_eq!(100", stringify!($SelfT), ".wrapping_mod_euc(10), 0); /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline] - pub fn wrapping_neg(self) -> Self { + pub const fn wrapping_neg(self) -> Self { self.overflowing_neg().0 } @@ -3079,7 +3026,6 @@ Basic usage: assert_eq!(1", stringify!($SelfT), ".wrapping_shl(128), 1);", $EndFeature, " ```"), #[stable(feature = "num_wrapping", since = "1.2.0")] - #[rustc_const_unstable(feature = "const_int_wrapping")] #[inline] pub const fn wrapping_shl(self, rhs: u32) -> Self { unsafe { @@ -3109,7 +3055,6 @@ Basic usage: assert_eq!(128", stringify!($SelfT), ".wrapping_shr(128), 128);", $EndFeature, " ```"), #[stable(feature = "num_wrapping", since = "1.2.0")] - #[rustc_const_unstable(feature = "const_int_wrapping")] #[inline] pub const fn wrapping_shr(self, rhs: u32) -> Self { unsafe { @@ -3127,11 +3072,10 @@ wrapping around at the boundary of the type. Basic usage: ``` -#![feature(no_panic_pow)] ", $Feature, "assert_eq!(3", stringify!($SelfT), ".wrapping_pow(5), 243); assert_eq!(3u8.wrapping_pow(6), 217);", $EndFeature, " ```"), - #[unstable(feature = "no_panic_pow", issue = "48320")] + #[stable(feature = "no_panic_pow", since = "1.34.0")] #[inline] pub fn wrapping_pow(self, mut exp: u32) -> Self { let mut base = self; @@ -3174,13 +3118,9 @@ assert_eq!(5", stringify!($SelfT), ".overflowing_add(2), (7, false)); assert_eq!(", stringify!($SelfT), "::MAX.overflowing_add(1), (0, true));", $EndFeature, " ```"), #[stable(feature = "wrapping", since = "1.7.0")] - #[rustc_const_unstable(feature = "const_int_overflowing")] #[inline] pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) { - let (a, b) = unsafe { - intrinsics::add_with_overflow(self as $ActualT, - rhs as $ActualT) - }; + let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT); (a as Self, b) } } @@ -3204,13 +3144,9 @@ assert_eq!(0", stringify!($SelfT), ".overflowing_sub(1), (", stringify!($SelfT), $EndFeature, " ```"), #[stable(feature = "wrapping", since = "1.7.0")] - #[rustc_const_unstable(feature = "const_int_overflowing")] #[inline] pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) { - let (a, b) = unsafe { - intrinsics::sub_with_overflow(self as $ActualT, - rhs as $ActualT) - }; + let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT); (a as Self, b) } } @@ -3233,13 +3169,9 @@ $EndFeature, " /// assert_eq!(1_000_000_000u32.overflowing_mul(10), (1410065408, true)); /// ``` #[stable(feature = "wrapping", since = "1.7.0")] - #[rustc_const_unstable(feature = "const_int_overflowing")] #[inline] pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) { - let (a, b) = unsafe { - intrinsics::mul_with_overflow(self as $ActualT, - rhs as $ActualT) - }; + let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT); (a as Self, b) } @@ -3270,12 +3202,15 @@ Basic usage } doc_comment! { - concat!("Calculates the quotient of Euclidean division `self.div_euc(rhs)`. + concat!("Calculates the quotient of Euclidean division `self.div_euclid(rhs)`. Returns a tuple of the divisor along with a boolean indicating whether an arithmetic overflow would occur. Note that for unsigned integers overflow never occurs, so the second value is always `false`. +Since, for the positive integers, all common +definitions of division are equal, this +is exactly equal to `self.overflowing_div(rhs)`. # Panics @@ -3287,11 +3222,11 @@ Basic usage ``` #![feature(euclidean_division)] -assert_eq!(5", stringify!($SelfT), ".overflowing_div_euc(2), (2, false)); +assert_eq!(5", stringify!($SelfT), ".overflowing_div_euclid(2), (2, false)); ```"), #[inline] #[unstable(feature = "euclidean_division", issue = "49048")] - pub fn overflowing_div_euc(self, rhs: Self) -> (Self, bool) { + pub fn overflowing_div_euclid(self, rhs: Self) -> (Self, bool) { (self / rhs, false) } } @@ -3323,12 +3258,15 @@ Basic usage } doc_comment! { - concat!("Calculates the remainder `self.mod_euc(rhs)` by Euclidean division. + concat!("Calculates the remainder `self.rem_euclid(rhs)` as if by Euclidean division. Returns a tuple of the modulo after dividing along with a boolean indicating whether an arithmetic overflow would occur. Note that for unsigned integers overflow never occurs, so the second value is always `false`. +Since, for the positive integers, all common +definitions of division are equal, this operation +is exactly equal to `self.overflowing_rem(rhs)`. # Panics @@ -3340,11 +3278,11 @@ Basic usage ``` #![feature(euclidean_division)] -assert_eq!(5", stringify!($SelfT), ".overflowing_mod_euc(2), (1, false)); +assert_eq!(5", stringify!($SelfT), ".overflowing_rem_euclid(2), (1, false)); ```"), #[inline] #[unstable(feature = "euclidean_division", issue = "49048")] - pub fn overflowing_mod_euc(self, rhs: Self) -> (Self, bool) { + pub fn overflowing_rem_euclid(self, rhs: Self) -> (Self, bool) { (self % rhs, false) } } @@ -3368,7 +3306,7 @@ assert_eq!(2", stringify!($SelfT), ".overflowing_neg(), (-2i32 as ", stringify!( ```"), #[inline] #[stable(feature = "wrapping", since = "1.7.0")] - pub fn overflowing_neg(self) -> (Self, bool) { + pub const fn overflowing_neg(self) -> (Self, bool) { ((!self).wrapping_add(1), self != 0) } } @@ -3391,7 +3329,6 @@ Basic usage assert_eq!(0x1", stringify!($SelfT), ".overflowing_shl(132), (0x10, true));", $EndFeature, " ```"), #[stable(feature = "wrapping", since = "1.7.0")] - #[rustc_const_unstable(feature = "const_int_overflowing")] #[inline] pub const fn overflowing_shl(self, rhs: u32) -> (Self, bool) { (self.wrapping_shl(rhs), (rhs > ($BITS - 1))) @@ -3416,7 +3353,6 @@ Basic usage assert_eq!(0x10", stringify!($SelfT), ".overflowing_shr(132), (0x1, true));", $EndFeature, " ```"), #[stable(feature = "wrapping", since = "1.7.0")] - #[rustc_const_unstable(feature = "const_int_overflowing")] #[inline] pub const fn overflowing_shr(self, rhs: u32) -> (Self, bool) { (self.wrapping_shr(rhs), (rhs > ($BITS - 1))) @@ -3434,11 +3370,10 @@ whether an overflow happened. Basic usage: ``` -#![feature(no_panic_pow)] ", $Feature, "assert_eq!(3", stringify!($SelfT), ".overflowing_pow(5), (243, false)); assert_eq!(3u8.overflowing_pow(6), (217, true));", $EndFeature, " ```"), - #[unstable(feature = "no_panic_pow", issue = "48320")] + #[stable(feature = "no_panic_pow", since = "1.34.0")] #[inline] pub fn overflowing_pow(self, mut exp: u32) -> (Self, bool) { let mut base = self; @@ -3511,7 +3446,9 @@ Basic usage: doc_comment! { concat!("Performs Euclidean division. -For unsigned types, this is just the same as `self / rhs`. +Since, for the positive integers, all common +definitions of division are equal, this +is exactly equal to `self / rhs`. # Examples @@ -3519,21 +3456,23 @@ Basic usage: ``` #![feature(euclidean_division)] -assert_eq!(7", stringify!($SelfT), ".div_euc(4), 1); // or any other integer type +assert_eq!(7", stringify!($SelfT), ".div_euclid(4), 1); // or any other integer type ```"), #[unstable(feature = "euclidean_division", issue = "49048")] #[inline] #[rustc_inherit_overflow_checks] - pub fn div_euc(self, rhs: Self) -> Self { + pub fn div_euclid(self, rhs: Self) -> Self { self / rhs } } doc_comment! { - concat!("Calculates the remainder `self mod rhs` by Euclidean division. + concat!("Calculates the least remainder of `self (mod rhs)`. -For unsigned types, this is just the same as `self % rhs`. +Since, for the positive integers, all common +definitions of division are equal, this +is exactly equal to `self % rhs`. # Examples @@ -3541,12 +3480,12 @@ Basic usage: ``` #![feature(euclidean_division)] -assert_eq!(7", stringify!($SelfT), ".mod_euc(4), 3); // or any other integer type +assert_eq!(7", stringify!($SelfT), ".rem_euclid(4), 3); // or any other integer type ```"), #[unstable(feature = "euclidean_division", issue = "49048")] #[inline] #[rustc_inherit_overflow_checks] - pub fn mod_euc(self, rhs: Self) -> Self { + pub fn rem_euclid(self, rhs: Self) -> Self { self % rhs } } @@ -3741,7 +3680,6 @@ assert_eq!(value, ", $swap_op, "); When starting from a slice rather than an array, fallible conversion APIs can be used: ``` -#![feature(try_from)] use std::convert::TryInto; fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { @@ -3773,13 +3711,12 @@ assert_eq!(value, ", $swap_op, "); When starting from a slice rather than an array, fallible conversion APIs can be used: ``` -#![feature(try_from)] use std::convert::TryInto; -fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { +fn read_le_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">()); *input = rest; - ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap()) + ", stringify!($SelfT), "::from_le_bytes(int_bytes.try_into().unwrap()) } ```"), #[stable(feature = "int_to_from_bytes", since = "1.32.0")] @@ -3815,13 +3752,12 @@ assert_eq!(value, ", $swap_op, "); When starting from a slice rather than an array, fallible conversion APIs can be used: ``` -#![feature(try_from)] use std::convert::TryInto; -fn read_be_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { +fn read_ne_", stringify!($SelfT), "(input: &mut &[u8]) -> ", stringify!($SelfT), " { let (int_bytes, rest) = input.split_at(std::mem::size_of::<", stringify!($SelfT), ">()); *input = rest; - ", stringify!($SelfT), "::from_be_bytes(int_bytes.try_into().unwrap()) + ", stringify!($SelfT), "::from_ne_bytes(int_bytes.try_into().unwrap()) } ```"), #[stable(feature = "int_to_from_bytes", since = "1.32.0")] @@ -3876,7 +3812,8 @@ impl u8 { #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] #[inline] pub fn to_ascii_uppercase(&self) -> u8 { - ASCII_UPPERCASE_MAP[*self as usize] + // Unset the fith bit if this is a lowercase letter + *self & !((self.is_ascii_lowercase() as u8) << 5) } /// Makes a copy of the value in its ASCII lower case equivalent. @@ -3898,7 +3835,8 @@ impl u8 { #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")] #[inline] pub fn to_ascii_lowercase(&self) -> u8 { - ASCII_LOWERCASE_MAP[*self as usize] + // Set the fith bit if this is an uppercase letter + *self | ((self.is_ascii_uppercase() as u8) << 5) } /// Checks that two values are an ASCII case-insensitive match. @@ -4000,9 +3938,8 @@ impl u8 { #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] #[inline] pub fn is_ascii_alphabetic(&self) -> bool { - if *self >= 0x80 { return false; } - match ASCII_CHARACTER_CLASS[*self as usize] { - L | Lx | U | Ux => true, + match *self { + b'A'...b'Z' | b'a'...b'z' => true, _ => false } } @@ -4036,9 +3973,8 @@ impl u8 { #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] #[inline] pub fn is_ascii_uppercase(&self) -> bool { - if *self >= 0x80 { return false } - match ASCII_CHARACTER_CLASS[*self as usize] { - U | Ux => true, + match *self { + b'A'...b'Z' => true, _ => false } } @@ -4072,9 +4008,8 @@ impl u8 { #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] #[inline] pub fn is_ascii_lowercase(&self) -> bool { - if *self >= 0x80 { return false } - match ASCII_CHARACTER_CLASS[*self as usize] { - L | Lx => true, + match *self { + b'a'...b'z' => true, _ => false } } @@ -4111,9 +4046,8 @@ impl u8 { #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] #[inline] pub fn is_ascii_alphanumeric(&self) -> bool { - if *self >= 0x80 { return false } - match ASCII_CHARACTER_CLASS[*self as usize] { - D | L | Lx | U | Ux => true, + match *self { + b'0'...b'9' | b'A'...b'Z' | b'a'...b'z' => true, _ => false } } @@ -4147,9 +4081,8 @@ impl u8 { #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] #[inline] pub fn is_ascii_digit(&self) -> bool { - if *self >= 0x80 { return false } - match ASCII_CHARACTER_CLASS[*self as usize] { - D => true, + match *self { + b'0'...b'9' => true, _ => false } } @@ -4186,9 +4119,8 @@ impl u8 { #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] #[inline] pub fn is_ascii_hexdigit(&self) -> bool { - if *self >= 0x80 { return false } - match ASCII_CHARACTER_CLASS[*self as usize] { - D | Lx | Ux => true, + match *self { + b'0'...b'9' | b'A'...b'F' | b'a'...b'f' => true, _ => false } } @@ -4226,9 +4158,8 @@ impl u8 { #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] #[inline] pub fn is_ascii_punctuation(&self) -> bool { - if *self >= 0x80 { return false } - match ASCII_CHARACTER_CLASS[*self as usize] { - P => true, + match *self { + b'!'...b'/' | b':'...b'@' | b'['...b'`' | b'{'...b'~' => true, _ => false } } @@ -4262,9 +4193,8 @@ impl u8 { #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] #[inline] pub fn is_ascii_graphic(&self) -> bool { - if *self >= 0x80 { return false; } - match ASCII_CHARACTER_CLASS[*self as usize] { - Ux | U | Lx | L | D | P => true, + match *self { + b'!'...b'~' => true, _ => false } } @@ -4315,9 +4245,8 @@ impl u8 { #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] #[inline] pub fn is_ascii_whitespace(&self) -> bool { - if *self >= 0x80 { return false; } - match ASCII_CHARACTER_CLASS[*self as usize] { - Cw | W => true, + match *self { + b'\t' | b'\n' | b'\x0C' | b'\r' | b' ' => true, _ => false } } @@ -4353,9 +4282,8 @@ impl u8 { #[stable(feature = "ascii_ctype_on_intrinsics", since = "1.24.0")] #[inline] pub fn is_ascii_control(&self) -> bool { - if *self >= 0x80 { return false; } - match ASCII_CHARACTER_CLASS[*self as usize] { - C | Cw => true, + match *self { + b'\0'...b'\x1F' | b'\x7F' => true, _ => false } } @@ -4478,7 +4406,7 @@ macro_rules! from_str_radix_int_impl { from_str_radix_int_impl! { isize i8 i16 i32 i64 i128 usize u8 u16 u32 u64 u128 } /// The error type returned when a checked integral type conversion fails. -#[unstable(feature = "try_from", issue = "33417")] +#[stable(feature = "try_from", since = "1.34.0")] #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct TryFromIntError(()); @@ -4493,27 +4421,40 @@ impl TryFromIntError { } } -#[unstable(feature = "try_from", issue = "33417")] +#[stable(feature = "try_from", since = "1.34.0")] impl fmt::Display for TryFromIntError { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { self.__description().fmt(fmt) } } -#[unstable(feature = "try_from", issue = "33417")] +#[stable(feature = "try_from", since = "1.34.0")] +impl From for TryFromIntError { + fn from(x: Infallible) -> TryFromIntError { + match x {} + } +} + +#[unstable(feature = "never_type", issue = "35121")] impl From for TryFromIntError { fn from(never: !) -> TryFromIntError { - never + // Match rather than coerce to make sure that code like + // `From for TryFromIntError` above will keep working + // when `Infallible` becomes an alias to `!`. + match never {} } } // no possible bounds violation macro_rules! try_from_unbounded { ($source:ty, $($target:ty),*) => {$( - #[unstable(feature = "try_from", issue = "33417")] + #[stable(feature = "try_from", since = "1.34.0")] impl TryFrom<$source> for $target { type Error = TryFromIntError; + /// Try to create the target number type from a source + /// number type. This returns an error if the source value + /// is outside of the range of the target type. #[inline] fn try_from(value: $source) -> Result { Ok(value as $target) @@ -4525,10 +4466,13 @@ macro_rules! try_from_unbounded { // only negative bounds macro_rules! try_from_lower_bounded { ($source:ty, $($target:ty),*) => {$( - #[unstable(feature = "try_from", issue = "33417")] + #[stable(feature = "try_from", since = "1.34.0")] impl TryFrom<$source> for $target { type Error = TryFromIntError; + /// Try to create the target number type from a source + /// number type. This returns an error if the source value + /// is outside of the range of the target type. #[inline] fn try_from(u: $source) -> Result<$target, TryFromIntError> { if u >= 0 { @@ -4544,10 +4488,13 @@ macro_rules! try_from_lower_bounded { // unsigned to signed (only positive bound) macro_rules! try_from_upper_bounded { ($source:ty, $($target:ty),*) => {$( - #[unstable(feature = "try_from", issue = "33417")] + #[stable(feature = "try_from", since = "1.34.0")] impl TryFrom<$source> for $target { type Error = TryFromIntError; + /// Try to create the target number type from a source + /// number type. This returns an error if the source value + /// is outside of the range of the target type. #[inline] fn try_from(u: $source) -> Result<$target, TryFromIntError> { if u > (<$target>::max_value() as $source) { @@ -4563,10 +4510,13 @@ macro_rules! try_from_upper_bounded { // all other cases macro_rules! try_from_both_bounded { ($source:ty, $($target:ty),*) => {$( - #[unstable(feature = "try_from", issue = "33417")] + #[stable(feature = "try_from", since = "1.34.0")] impl TryFrom<$source> for $target { type Error = TryFromIntError; + /// Try to create the target number type from a source + /// number type. This returns an error if the source value + /// is outside of the range of the target type. #[inline] fn try_from(u: $source) -> Result<$target, TryFromIntError> { let min = <$target>::min_value() as $source; @@ -4587,7 +4537,7 @@ macro_rules! rev { )*} } -/// intra-sign conversions +// intra-sign conversions try_from_upper_bounded!(u16, u8); try_from_upper_bounded!(u32, u16, u8); try_from_upper_bounded!(u64, u32, u16, u8); @@ -4836,6 +4786,11 @@ pub enum IntErrorKind { Overflow, /// Integer is too small to store in target integer type. Underflow, + /// Value was Zero + /// + /// This variant will be emitted when the parsing string has a value of zero, which + /// would be illegal for non-zero types. + Zero, } impl ParseIntError { @@ -4858,6 +4813,7 @@ impl ParseIntError { IntErrorKind::InvalidDigit => "invalid digit found in string", IntErrorKind::Overflow => "number too large to fit in target type", IntErrorKind::Underflow => "number too small to fit in target type", + IntErrorKind::Zero => "number would be zero for non-zero type", } } } @@ -4999,106 +4955,3 @@ impl_from! { u32, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0" // Float -> Float impl_from! { f32, f64, #[stable(feature = "lossless_float_conv", since = "1.6.0")] } - -static ASCII_LOWERCASE_MAP: [u8; 256] = [ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - b' ', b'!', b'"', b'#', b'$', b'%', b'&', b'\'', - b'(', b')', b'*', b'+', b',', b'-', b'.', b'/', - b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', - b'8', b'9', b':', b';', b'<', b'=', b'>', b'?', - b'@', - - b'a', b'b', b'c', b'd', b'e', b'f', b'g', - b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', - b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', - b'x', b'y', b'z', - - b'[', b'\\', b']', b'^', b'_', - b'`', b'a', b'b', b'c', b'd', b'e', b'f', b'g', - b'h', b'i', b'j', b'k', b'l', b'm', b'n', b'o', - b'p', b'q', b'r', b's', b't', b'u', b'v', b'w', - b'x', b'y', b'z', b'{', b'|', b'}', b'~', 0x7f, - 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, - 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, - 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, - 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, - 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, - 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, - 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, - 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, - 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, - 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, - 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, - 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, - 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, - 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, - 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, - 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, -]; - -static ASCII_UPPERCASE_MAP: [u8; 256] = [ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - b' ', b'!', b'"', b'#', b'$', b'%', b'&', b'\'', - b'(', b')', b'*', b'+', b',', b'-', b'.', b'/', - b'0', b'1', b'2', b'3', b'4', b'5', b'6', b'7', - b'8', b'9', b':', b';', b'<', b'=', b'>', b'?', - b'@', b'A', b'B', b'C', b'D', b'E', b'F', b'G', - b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', - b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', - b'X', b'Y', b'Z', b'[', b'\\', b']', b'^', b'_', - b'`', - - b'A', b'B', b'C', b'D', b'E', b'F', b'G', - b'H', b'I', b'J', b'K', b'L', b'M', b'N', b'O', - b'P', b'Q', b'R', b'S', b'T', b'U', b'V', b'W', - b'X', b'Y', b'Z', - - b'{', b'|', b'}', b'~', 0x7f, - 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, - 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, - 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, - 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, - 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, - 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, - 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, - 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, - 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, - 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, - 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, - 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, - 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, - 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, - 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, - 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff, -]; - -enum AsciiCharacterClass { - C, // control - Cw, // control whitespace - W, // whitespace - D, // digit - L, // lowercase - Lx, // lowercase hex digit - U, // uppercase - Ux, // uppercase hex digit - P, // punctuation -} -use self::AsciiCharacterClass::*; - -static ASCII_CHARACTER_CLASS: [AsciiCharacterClass; 128] = [ -// _0 _1 _2 _3 _4 _5 _6 _7 _8 _9 _a _b _c _d _e _f - C, C, C, C, C, C, C, C, C, Cw,Cw,C, Cw,Cw,C, C, // 0_ - C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, C, // 1_ - W, P, P, P, P, P, P, P, P, P, P, P, P, P, P, P, // 2_ - D, D, D, D, D, D, D, D, D, D, P, P, P, P, P, P, // 3_ - P, Ux,Ux,Ux,Ux,Ux,Ux,U, U, U, U, U, U, U, U, U, // 4_ - U, U, U, U, U, U, U, U, U, U, U, P, P, P, P, P, // 5_ - P, Lx,Lx,Lx,Lx,Lx,Lx,L, L, L, L, L, L, L, L, L, // 6_ - L, L, L, L, L, L, L, L, L, L, L, P, P, P, P, C, // 7_ -]; diff --git a/src/libcore/num/u128.rs b/src/libcore/num/u128.rs index e8c783a1bb542..7d1aa664de30c 100644 --- a/src/libcore/num/u128.rs +++ b/src/libcore/num/u128.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The 128-bit unsigned integer type. //! //! *[See also the `u128` primitive type](../../std/primitive.u128.html).* diff --git a/src/libcore/num/u16.rs b/src/libcore/num/u16.rs index 9c318216f1fba..34f80abaecc05 100644 --- a/src/libcore/num/u16.rs +++ b/src/libcore/num/u16.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The 16-bit unsigned integer type. //! //! *[See also the `u16` primitive type](../../std/primitive.u16.html).* diff --git a/src/libcore/num/u32.rs b/src/libcore/num/u32.rs index 84367c2073833..5fd486f546608 100644 --- a/src/libcore/num/u32.rs +++ b/src/libcore/num/u32.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The 32-bit unsigned integer type. //! //! *[See also the `u32` primitive type](../../std/primitive.u32.html).* diff --git a/src/libcore/num/u64.rs b/src/libcore/num/u64.rs index cc48a28b22f69..044d238aea9be 100644 --- a/src/libcore/num/u64.rs +++ b/src/libcore/num/u64.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The 64-bit unsigned integer type. //! //! *[See also the `u64` primitive type](../../std/primitive.u64.html).* diff --git a/src/libcore/num/u8.rs b/src/libcore/num/u8.rs index 6c0daa7763ae1..6747e6a0f6c24 100644 --- a/src/libcore/num/u8.rs +++ b/src/libcore/num/u8.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The 8-bit unsigned integer type. //! //! *[See also the `u8` primitive type](../../std/primitive.u8.html).* diff --git a/src/libcore/num/uint_macros.rs b/src/libcore/num/uint_macros.rs index f7e1f78d69ebf..a94b541ddb907 100644 --- a/src/libcore/num/uint_macros.rs +++ b/src/libcore/num/uint_macros.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![doc(hidden)] macro_rules! uint_module { diff --git a/src/libcore/num/usize.rs b/src/libcore/num/usize.rs index 0b6f1c73c5834..e3a5239d908dc 100644 --- a/src/libcore/num/usize.rs +++ b/src/libcore/num/usize.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The pointer-sized unsigned integer type. //! //! *[See also the `usize` primitive type](../../std/primitive.usize.html).* diff --git a/src/libcore/num/wrapping.rs b/src/libcore/num/wrapping.rs index 94dd657ec97c5..9cd5108ade411 100644 --- a/src/libcore/num/wrapping.rs +++ b/src/libcore/num/wrapping.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::Wrapping; use ops::*; @@ -439,7 +429,8 @@ assert_eq!(n.trailing_zeros(), 3); /// wrapping the truncated bits to the end of the resulting /// integer. /// - /// Please note this isn't the same operation as `>>`! + /// Please note this isn't the same operation as the `>>` shifting + /// operator! /// /// # Examples /// @@ -464,7 +455,8 @@ assert_eq!(n.trailing_zeros(), 3); /// wrapping the truncated bits to the beginning of the resulting /// integer. /// - /// Please note this isn't the same operation as `<<`! + /// Please note this isn't the same operation as the `<<` shifting + /// operator! /// /// # Examples /// diff --git a/src/libcore/ops/arith.rs b/src/libcore/ops/arith.rs index a1bc5463f7333..8139305f5302d 100644 --- a/src/libcore/ops/arith.rs +++ b/src/libcore/ops/arith.rs @@ -1,16 +1,6 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// The addition operator `+`. /// -/// Note that `RHS` is `Self` by default, but this is not mandatory. For +/// Note that `Rhs` is `Self` by default, but this is not mandatory. For /// example, [`std::time::SystemTime`] implements `Add`, which permits /// operations of the form `SystemTime = SystemTime + Duration`. /// @@ -30,10 +20,10 @@ /// } /// /// impl Add for Point { -/// type Output = Point; +/// type Output = Self; /// -/// fn add(self, other: Point) -> Point { -/// Point { +/// fn add(self, other: Self) -> Self { +/// Self { /// x: self.x + other.x, /// y: self.y + other.y, /// } @@ -59,11 +49,11 @@ /// } /// /// // Notice that the implementation uses the associated type `Output`. -/// impl> Add for Point { -/// type Output = Point; +/// impl> Add for Point { +/// type Output = Self; /// -/// fn add(self, other: Point) -> Point { -/// Point { +/// fn add(self, other: Self) -> Self::Output { +/// Self { /// x: self.x + other.x, /// y: self.y + other.y, /// } @@ -77,18 +67,18 @@ #[stable(feature = "rust1", since = "1.0.0")] #[rustc_on_unimplemented( on( - all(_Self="{integer}", RHS="{float}"), + all(_Self="{integer}", Rhs="{float}"), message="cannot add a float to an integer", ), on( - all(_Self="{float}", RHS="{integer}"), + all(_Self="{float}", Rhs="{integer}"), message="cannot add an integer to a float", ), - message="cannot add `{RHS}` to `{Self}`", - label="no implementation for `{Self} + {RHS}`", + message="cannot add `{Rhs}` to `{Self}`", + label="no implementation for `{Self} + {Rhs}`", )] #[doc(alias = "+")] -pub trait Add { +pub trait Add { /// The resulting type after applying the `+` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; @@ -96,7 +86,7 @@ pub trait Add { /// Performs the `+` operation. #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - fn add(self, rhs: RHS) -> Self::Output; + fn add(self, rhs: Rhs) -> Self::Output; } macro_rules! add_impl { @@ -118,7 +108,7 @@ add_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// The subtraction operator `-`. /// -/// Note that `RHS` is `Self` by default, but this is not mandatory. For +/// Note that `Rhs` is `Self` by default, but this is not mandatory. For /// example, [`std::time::SystemTime`] implements `Sub`, which permits /// operations of the form `SystemTime = SystemTime - Duration`. /// @@ -167,10 +157,10 @@ add_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// } /// /// // Notice that the implementation uses the associated type `Output`. -/// impl> Sub for Point { -/// type Output = Point; +/// impl> Sub for Point { +/// type Output = Self; /// -/// fn sub(self, other: Point) -> Point { +/// fn sub(self, other: Self) -> Self::Output { /// Point { /// x: self.x - other.x, /// y: self.y - other.y, @@ -183,10 +173,10 @@ add_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// ``` #[lang = "sub"] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented(message="cannot subtract `{RHS}` from `{Self}`", - label="no implementation for `{Self} - {RHS}`")] +#[rustc_on_unimplemented(message="cannot subtract `{Rhs}` from `{Self}`", + label="no implementation for `{Self} - {Rhs}`")] #[doc(alias = "-")] -pub trait Sub { +pub trait Sub { /// The resulting type after applying the `-` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; @@ -194,7 +184,7 @@ pub trait Sub { /// Performs the `-` operation. #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - fn sub(self, rhs: RHS) -> Self::Output; + fn sub(self, rhs: Rhs) -> Self::Output; } macro_rules! sub_impl { @@ -216,7 +206,7 @@ sub_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// The multiplication operator `*`. /// -/// Note that `RHS` is `Self` by default, but this is not mandatory. +/// Note that `Rhs` is `Self` by default, but this is not mandatory. /// /// # Examples /// @@ -230,21 +220,21 @@ sub_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// // derive `Eq` and `PartialEq`. /// #[derive(Debug, Eq, PartialEq)] /// struct Rational { -/// nominator: usize, +/// numerator: usize, /// denominator: usize, /// } /// /// impl Rational { -/// fn new(nominator: usize, denominator: usize) -> Self { +/// fn new(numerator: usize, denominator: usize) -> Self { /// if denominator == 0 { /// panic!("Zero is an invalid denominator!"); /// } /// /// // Reduce to lowest terms by dividing by the greatest common /// // divisor. -/// let gcd = gcd(nominator, denominator); +/// let gcd = gcd(numerator, denominator); /// Rational { -/// nominator: nominator / gcd, +/// numerator: numerator / gcd, /// denominator: denominator / gcd, /// } /// } @@ -255,9 +245,9 @@ sub_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// type Output = Self; /// /// fn mul(self, rhs: Self) -> Self { -/// let nominator = self.nominator * rhs.nominator; +/// let numerator = self.numerator * rhs.numerator; /// let denominator = self.denominator * rhs.denominator; -/// Rational::new(nominator, denominator) +/// Rational::new(numerator, denominator) /// } /// } /// @@ -290,9 +280,9 @@ sub_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// struct Vector { value: Vec } /// /// impl Mul for Vector { -/// type Output = Vector; +/// type Output = Self; /// -/// fn mul(self, rhs: Scalar) -> Vector { +/// fn mul(self, rhs: Scalar) -> Self::Output { /// Vector { value: self.value.iter().map(|v| v * rhs.value).collect() } /// } /// } @@ -303,10 +293,10 @@ sub_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// ``` #[lang = "mul"] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented(message="cannot multiply `{RHS}` to `{Self}`", - label="no implementation for `{Self} * {RHS}`")] +#[rustc_on_unimplemented(message="cannot multiply `{Rhs}` to `{Self}`", + label="no implementation for `{Self} * {Rhs}`")] #[doc(alias = "*")] -pub trait Mul { +pub trait Mul { /// The resulting type after applying the `*` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; @@ -314,7 +304,7 @@ pub trait Mul { /// Performs the `*` operation. #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - fn mul(self, rhs: RHS) -> Self::Output; + fn mul(self, rhs: Rhs) -> Self::Output; } macro_rules! mul_impl { @@ -336,7 +326,7 @@ mul_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// The division operator `/`. /// -/// Note that `RHS` is `Self` by default, but this is not mandatory. +/// Note that `Rhs` is `Self` by default, but this is not mandatory. /// /// # Examples /// @@ -350,21 +340,21 @@ mul_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// // derive `Eq` and `PartialEq`. /// #[derive(Debug, Eq, PartialEq)] /// struct Rational { -/// nominator: usize, +/// numerator: usize, /// denominator: usize, /// } /// /// impl Rational { -/// fn new(nominator: usize, denominator: usize) -> Self { +/// fn new(numerator: usize, denominator: usize) -> Self { /// if denominator == 0 { /// panic!("Zero is an invalid denominator!"); /// } /// /// // Reduce to lowest terms by dividing by the greatest common /// // divisor. -/// let gcd = gcd(nominator, denominator); +/// let gcd = gcd(numerator, denominator); /// Rational { -/// nominator: nominator / gcd, +/// numerator: numerator / gcd, /// denominator: denominator / gcd, /// } /// } @@ -374,14 +364,14 @@ mul_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// // The division of rational numbers is a closed operation. /// type Output = Self; /// -/// fn div(self, rhs: Self) -> Self { -/// if rhs.nominator == 0 { +/// fn div(self, rhs: Self) -> Self::Output { +/// if rhs.numerator == 0 { /// panic!("Cannot divide by zero-valued `Rational`!"); /// } /// -/// let nominator = self.nominator * rhs.denominator; -/// let denominator = self.denominator * rhs.nominator; -/// Rational::new(nominator, denominator) +/// let numerator = self.numerator * rhs.denominator; +/// let denominator = self.denominator * rhs.numerator; +/// Rational::new(numerator, denominator) /// } /// } /// @@ -414,9 +404,9 @@ mul_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// struct Vector { value: Vec } /// /// impl Div for Vector { -/// type Output = Vector; +/// type Output = Self; /// -/// fn div(self, rhs: Scalar) -> Vector { +/// fn div(self, rhs: Scalar) -> Self::Output { /// Vector { value: self.value.iter().map(|v| v / rhs.value).collect() } /// } /// } @@ -427,10 +417,10 @@ mul_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// ``` #[lang = "div"] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented(message="cannot divide `{Self}` by `{RHS}`", - label="no implementation for `{Self} / {RHS}`")] +#[rustc_on_unimplemented(message="cannot divide `{Self}` by `{Rhs}`", + label="no implementation for `{Self} / {Rhs}`")] #[doc(alias = "/")] -pub trait Div { +pub trait Div { /// The resulting type after applying the `/` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; @@ -438,7 +428,7 @@ pub trait Div { /// Performs the `/` operation. #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - fn div(self, rhs: RHS) -> Self::Output; + fn div(self, rhs: Rhs) -> Self::Output; } macro_rules! div_impl_integer { @@ -477,7 +467,7 @@ div_impl_float! { f32 f64 } /// The remainder operator `%`. /// -/// Note that `RHS` is `Self` by default, but this is not mandatory. +/// Note that `Rhs` is `Self` by default, but this is not mandatory. /// /// # Examples /// @@ -495,9 +485,9 @@ div_impl_float! { f32 f64 } /// } /// /// impl<'a, T> Rem for SplitSlice<'a, T> { -/// type Output = SplitSlice<'a, T>; +/// type Output = Self; /// -/// fn rem(self, modulus: usize) -> Self { +/// fn rem(self, modulus: usize) -> Self::Output { /// let len = self.slice.len(); /// let rem = len % modulus; /// let start = len - rem; @@ -512,10 +502,10 @@ div_impl_float! { f32 f64 } /// ``` #[lang = "rem"] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented(message="cannot mod `{Self}` by `{RHS}`", - label="no implementation for `{Self} % {RHS}`")] +#[rustc_on_unimplemented(message="cannot mod `{Self}` by `{Rhs}`", + label="no implementation for `{Self} % {Rhs}`")] #[doc(alias = "%")] -pub trait Rem { +pub trait Rem { /// The resulting type after applying the `%` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output = Self; @@ -523,12 +513,12 @@ pub trait Rem { /// Performs the `%` operation. #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - fn rem(self, rhs: RHS) -> Self::Output; + fn rem(self, rhs: Rhs) -> Self::Output; } macro_rules! rem_impl_integer { ($($t:ty)*) => ($( - /// This operation satisfies `n % d == n - (n / d) * d`. The + /// This operation satisfies `n % d == n - (n / d) * d`. The /// result has the same sign as the left operand. #[stable(feature = "rust1", since = "1.0.0")] impl Rem for $t { @@ -547,6 +537,21 @@ rem_impl_integer! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } macro_rules! rem_impl_float { ($($t:ty)*) => ($( + + /// The remainder from the division of two floats. + /// + /// The remainder has the same sign as the dividend and is computed as: + /// `x - (x / y).trunc() * y`. + /// + /// # Examples + /// ``` + /// let x: f32 = 50.50; + /// let y: f32 = 8.125; + /// let remainder = x - (x / y).trunc() * y; + /// + /// // The answer to both operations is 1.75 + /// assert_eq!(x % y, remainder); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] impl Rem for $t { type Output = $t; @@ -581,7 +586,7 @@ rem_impl_float! { f32 f64 } /// impl Neg for Sign { /// type Output = Sign; /// -/// fn neg(self) -> Sign { +/// fn neg(self) -> Self::Output { /// match self { /// Sign::Negative => Sign::Positive, /// Sign::Zero => Sign::Zero, @@ -660,8 +665,8 @@ neg_impl_numeric! { isize i8 i16 i32 i64 i128 f32 f64 } /// } /// /// impl AddAssign for Point { -/// fn add_assign(&mut self, other: Point) { -/// *self = Point { +/// fn add_assign(&mut self, other: Self) { +/// *self = Self { /// x: self.x + other.x, /// y: self.y + other.y, /// }; @@ -716,8 +721,8 @@ add_assign_impl! { usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 f32 f64 } /// } /// /// impl SubAssign for Point { -/// fn sub_assign(&mut self, other: Point) { -/// *self = Point { +/// fn sub_assign(&mut self, other: Self) { +/// *self = Self { /// x: self.x - other.x, /// y: self.y - other.y, /// }; diff --git a/src/libcore/ops/bit.rs b/src/libcore/ops/bit.rs index 3900f365b0ab1..a8f862f6c05a5 100644 --- a/src/libcore/ops/bit.rs +++ b/src/libcore/ops/bit.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// The unary logical negation operator `!`. /// /// # Examples @@ -27,7 +17,7 @@ /// impl Not for Answer { /// type Output = Answer; /// -/// fn not(self) -> Answer { +/// fn not(self) -> Self::Output { /// match self { /// Answer::Yes => Answer::No, /// Answer::No => Answer::Yes @@ -69,7 +59,7 @@ not_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// The bitwise AND operator `&`. /// -/// Note that `RHS` is `Self` by default, but this is not mandatory. +/// Note that `Rhs` is `Self` by default, but this is not mandatory. /// /// # Examples /// @@ -85,7 +75,7 @@ not_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// type Output = Self; /// /// // rhs is the "right-hand side" of the expression `a & b` -/// fn bitand(self, rhs: Self) -> Self { +/// fn bitand(self, rhs: Self) -> Self::Output { /// Scalar(self.0 & rhs.0) /// } /// } @@ -107,7 +97,7 @@ not_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// impl BitAnd for BooleanVector { /// type Output = Self; /// -/// fn bitand(self, BooleanVector(rhs): Self) -> Self { +/// fn bitand(self, BooleanVector(rhs): Self) -> Self::Output { /// let BooleanVector(lhs) = self; /// assert_eq!(lhs.len(), rhs.len()); /// BooleanVector(lhs.iter().zip(rhs.iter()).map(|(x, y)| *x && *y).collect()) @@ -122,9 +112,9 @@ not_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } #[lang = "bitand"] #[doc(alias = "&")] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented(message="no implementation for `{Self} & {RHS}`", - label="no implementation for `{Self} & {RHS}`")] -pub trait BitAnd { +#[rustc_on_unimplemented(message="no implementation for `{Self} & {Rhs}`", + label="no implementation for `{Self} & {Rhs}`")] +pub trait BitAnd { /// The resulting type after applying the `&` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; @@ -132,7 +122,7 @@ pub trait BitAnd { /// Performs the `&` operation. #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - fn bitand(self, rhs: RHS) -> Self::Output; + fn bitand(self, rhs: Rhs) -> Self::Output; } macro_rules! bitand_impl { @@ -153,7 +143,7 @@ bitand_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// The bitwise OR operator `|`. /// -/// Note that `RHS` is `Self` by default, but this is not mandatory. +/// Note that `Rhs` is `Self` by default, but this is not mandatory. /// /// # Examples /// @@ -191,7 +181,7 @@ bitand_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// impl BitOr for BooleanVector { /// type Output = Self; /// -/// fn bitor(self, BooleanVector(rhs): Self) -> Self { +/// fn bitor(self, BooleanVector(rhs): Self) -> Self::Output { /// let BooleanVector(lhs) = self; /// assert_eq!(lhs.len(), rhs.len()); /// BooleanVector(lhs.iter().zip(rhs.iter()).map(|(x, y)| *x || *y).collect()) @@ -206,9 +196,9 @@ bitand_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } #[lang = "bitor"] #[doc(alias = "|")] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented(message="no implementation for `{Self} | {RHS}`", - label="no implementation for `{Self} | {RHS}`")] -pub trait BitOr { +#[rustc_on_unimplemented(message="no implementation for `{Self} | {Rhs}`", + label="no implementation for `{Self} | {Rhs}`")] +pub trait BitOr { /// The resulting type after applying the `|` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; @@ -216,7 +206,7 @@ pub trait BitOr { /// Performs the `|` operation. #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - fn bitor(self, rhs: RHS) -> Self::Output; + fn bitor(self, rhs: Rhs) -> Self::Output; } macro_rules! bitor_impl { @@ -237,7 +227,7 @@ bitor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// The bitwise XOR operator `^`. /// -/// Note that `RHS` is `Self` by default, but this is not mandatory. +/// Note that `Rhs` is `Self` by default, but this is not mandatory. /// /// # Examples /// @@ -253,7 +243,7 @@ bitor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// type Output = Self; /// /// // rhs is the "right-hand side" of the expression `a ^ b` -/// fn bitxor(self, rhs: Self) -> Self { +/// fn bitxor(self, rhs: Self) -> Self::Output { /// Scalar(self.0 ^ rhs.0) /// } /// } @@ -275,7 +265,7 @@ bitor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// impl BitXor for BooleanVector { /// type Output = Self; /// -/// fn bitxor(self, BooleanVector(rhs): Self) -> Self { +/// fn bitxor(self, BooleanVector(rhs): Self) -> Self::Output { /// let BooleanVector(lhs) = self; /// assert_eq!(lhs.len(), rhs.len()); /// BooleanVector(lhs.iter() @@ -293,9 +283,9 @@ bitor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } #[lang = "bitxor"] #[doc(alias = "^")] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented(message="no implementation for `{Self} ^ {RHS}`", - label="no implementation for `{Self} ^ {RHS}`")] -pub trait BitXor { +#[rustc_on_unimplemented(message="no implementation for `{Self} ^ {Rhs}`", + label="no implementation for `{Self} ^ {Rhs}`")] +pub trait BitXor { /// The resulting type after applying the `^` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; @@ -303,7 +293,7 @@ pub trait BitXor { /// Performs the `^` operation. #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - fn bitxor(self, rhs: RHS) -> Self::Output; + fn bitxor(self, rhs: Rhs) -> Self::Output; } macro_rules! bitxor_impl { @@ -365,7 +355,7 @@ bitxor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } /// impl Shl for SpinVector { /// type Output = Self; /// -/// fn shl(self, rhs: usize) -> SpinVector { +/// fn shl(self, rhs: usize) -> Self::Output { /// // Rotate the vector by `rhs` places. /// let (a, b) = self.vec.split_at(rhs); /// let mut spun_vector: Vec = vec![]; @@ -381,9 +371,9 @@ bitxor_impl! { bool usize u8 u16 u32 u64 u128 isize i8 i16 i32 i64 i128 } #[lang = "shl"] #[doc(alias = "<<")] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented(message="no implementation for `{Self} << {RHS}`", - label="no implementation for `{Self} << {RHS}`")] -pub trait Shl { +#[rustc_on_unimplemented(message="no implementation for `{Self} << {Rhs}`", + label="no implementation for `{Self} << {Rhs}`")] +pub trait Shl { /// The resulting type after applying the `<<` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; @@ -391,7 +381,7 @@ pub trait Shl { /// Performs the `<<` operation. #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - fn shl(self, rhs: RHS) -> Self::Output; + fn shl(self, rhs: Rhs) -> Self::Output; } macro_rules! shl_impl { @@ -474,7 +464,7 @@ shl_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 isize i128 } /// impl Shr for SpinVector { /// type Output = Self; /// -/// fn shr(self, rhs: usize) -> SpinVector { +/// fn shr(self, rhs: usize) -> Self::Output { /// // Rotate the vector by `rhs` places. /// let (a, b) = self.vec.split_at(self.vec.len() - rhs); /// let mut spun_vector: Vec = vec![]; @@ -490,9 +480,9 @@ shl_impl_all! { u8 u16 u32 u64 u128 usize i8 i16 i32 i64 isize i128 } #[lang = "shr"] #[doc(alias = ">>")] #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented(message="no implementation for `{Self} >> {RHS}`", - label="no implementation for `{Self} >> {RHS}`")] -pub trait Shr { +#[rustc_on_unimplemented(message="no implementation for `{Self} >> {Rhs}`", + label="no implementation for `{Self} >> {Rhs}`")] +pub trait Shr { /// The resulting type after applying the `>>` operator. #[stable(feature = "rust1", since = "1.0.0")] type Output; @@ -500,7 +490,7 @@ pub trait Shr { /// Performs the `>>` operation. #[must_use] #[stable(feature = "rust1", since = "1.0.0")] - fn shr(self, rhs: RHS) -> Self::Output; + fn shr(self, rhs: Rhs) -> Self::Output; } macro_rules! shr_impl { diff --git a/src/libcore/ops/deref.rs b/src/libcore/ops/deref.rs index 91a3d77e8b2ef..e44a6c4d2a0e9 100644 --- a/src/libcore/ops/deref.rs +++ b/src/libcore/ops/deref.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// Used for immutable dereferencing operations, like `*v`. /// /// In addition to being used for explicit dereferencing operations with the @@ -37,7 +27,7 @@ /// [book] as well as the reference sections on [the dereference operator] /// [ref-deref-op], [method resolution] and [type coercions]. /// -/// [book]: ../../book/second-edition/ch15-02-deref.html +/// [book]: ../../book/ch15-02-deref.html /// [`DerefMut`]: trait.DerefMut.html /// [more]: #more-on-deref-coercion /// [ref-deref-op]: ../../reference/expressions/operator-expr.html#the-dereference-operator @@ -59,7 +49,7 @@ /// impl Deref for DerefExample { /// type Target = T; /// -/// fn deref(&self) -> &T { +/// fn deref(&self) -> &Self::Target { /// &self.value /// } /// } @@ -119,7 +109,7 @@ impl Deref for &mut T { /// then: /// /// * In mutable contexts, `*x` on non-pointer types is equivalent to -/// `*Deref::deref(&x)`. +/// `*DerefMut::deref_mut(&mut x)`. /// * Values of type `&mut T` are coerced to values of type `&mut U` /// * `T` implicitly implements all the (mutable) methods of the type `U`. /// @@ -127,7 +117,7 @@ impl Deref for &mut T { /// [book] as well as the reference sections on [the dereference operator] /// [ref-deref-op], [method resolution] and [type coercions]. /// -/// [book]: ../../book/second-edition/ch15-02-deref.html +/// [book]: ../../book/ch15-02-deref.html /// [`Deref`]: trait.Deref.html /// [more]: #more-on-deref-coercion /// [ref-deref-op]: ../../reference/expressions/operator-expr.html#the-dereference-operator @@ -149,13 +139,13 @@ impl Deref for &mut T { /// impl Deref for DerefMutExample { /// type Target = T; /// -/// fn deref(&self) -> &T { +/// fn deref(&self) -> &Self::Target { /// &self.value /// } /// } /// /// impl DerefMut for DerefMutExample { -/// fn deref_mut(&mut self) -> &mut T { +/// fn deref_mut(&mut self) -> &mut Self::Target { /// &mut self.value /// } /// } @@ -177,3 +167,19 @@ pub trait DerefMut: Deref { impl DerefMut for &mut T { fn deref_mut(&mut self) -> &mut T { *self } } + +/// Indicates that a struct can be used as a method receiver, without the +/// `arbitrary_self_types` feature. This is implemented by stdlib pointer types like `Box`, +/// `Rc`, `&T`, and `Pin

`. +#[lang = "receiver"] +#[unstable(feature = "receiver_trait", issue = "0")] +#[doc(hidden)] +pub trait Receiver { + // Empty. +} + +#[unstable(feature = "receiver_trait", issue = "0")] +impl Receiver for &T {} + +#[unstable(feature = "receiver_trait", issue = "0")] +impl Receiver for &mut T {} diff --git a/src/libcore/ops/drop.rs b/src/libcore/ops/drop.rs index 474f7e34c3470..eae63ea2390a8 100644 --- a/src/libcore/ops/drop.rs +++ b/src/libcore/ops/drop.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// Used to run some code when a value goes out of scope. /// This is sometimes called a 'destructor'. /// @@ -21,7 +11,7 @@ /// Refer to [the chapter on `Drop` in *The Rust Programming Language*][book] /// for some more elaboration. /// -/// [book]: ../../book/second-edition/ch15-03-drop.html +/// [book]: ../../book/ch15-03-drop.html /// /// # Examples /// diff --git a/src/libcore/ops/function.rs b/src/libcore/ops/function.rs index 3b356b9a1e7b4..c69f5fd989696 100644 --- a/src/libcore/ops/function.rs +++ b/src/libcore/ops/function.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// The version of the call operator that takes an immutable receiver. /// /// Instances of `Fn` can be called repeatedly without mutating state. @@ -37,7 +27,7 @@ /// `Fn(usize, bool) -> usize`). Those interested in the technical details of /// this can refer to [the relevant section in the *Rustonomicon*][nomicon]. /// -/// [book]: ../../book/second-edition/ch13-01-closures.html +/// [book]: ../../book/ch13-01-closures.html /// [`FnMut`]: trait.FnMut.html /// [`FnOnce`]: trait.FnOnce.html /// [function pointers]: ../../std/primitive.fn.html @@ -72,7 +62,7 @@ label="expected an `Fn<{Args}>` closure, found `{Self}`", )] #[fundamental] // so that regex can rely that `&str: !FnMut` -#[must_use] +#[must_use = "closures are lazy and do nothing unless called"] pub trait Fn : FnMut { /// Performs the call operation. #[unstable(feature = "fn_traits", issue = "29625")] @@ -105,7 +95,7 @@ pub trait Fn : FnMut { /// `Fn(usize, bool) -> usize`). Those interested in the technical details of /// this can refer to [the relevant section in the *Rustonomicon*][nomicon]. /// -/// [book]: ../../book/second-edition/ch13-01-closures.html +/// [book]: ../../book/ch13-01-closures.html /// [`Fn`]: trait.Fn.html /// [`FnOnce`]: trait.FnOnce.html /// [function pointers]: ../../std/primitive.fn.html @@ -151,7 +141,7 @@ pub trait Fn : FnMut { label="expected an `FnMut<{Args}>` closure, found `{Self}`", )] #[fundamental] // so that regex can rely that `&str: !FnMut` -#[must_use] +#[must_use = "closures are lazy and do nothing unless called"] pub trait FnMut : FnOnce { /// Performs the call operation. #[unstable(feature = "fn_traits", issue = "29625")] @@ -183,7 +173,7 @@ pub trait FnMut : FnOnce { /// `Fn(usize, bool) -> usize`). Those interested in the technical details of /// this can refer to [the relevant section in the *Rustonomicon*][nomicon]. /// -/// [book]: ../../book/second-edition/ch13-01-closures.html +/// [book]: ../../book/ch13-01-closures.html /// [`Fn`]: trait.Fn.html /// [`FnMut`]: trait.FnMut.html /// [function pointers]: ../../std/primitive.fn.html @@ -230,7 +220,7 @@ pub trait FnMut : FnOnce { label="expected an `FnOnce<{Args}>` closure, found `{Self}`", )] #[fundamental] // so that regex can rely that `&str: !FnMut` -#[must_use] +#[must_use = "closures are lazy and do nothing unless called"] pub trait FnOnce { /// The returned type after the call operator is used. #[stable(feature = "fn_once_output", since = "1.12.0")] diff --git a/src/libcore/ops/generator.rs b/src/libcore/ops/generator.rs index 297089926b536..5401fff860e9b 100644 --- a/src/libcore/ops/generator.rs +++ b/src/libcore/ops/generator.rs @@ -1,12 +1,5 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +use crate::marker::Unpin; +use crate::pin::Pin; /// The result of a generator resumption. /// @@ -49,6 +42,7 @@ pub enum GeneratorState { /// #![feature(generators, generator_trait)] /// /// use std::ops::{Generator, GeneratorState}; +/// use std::pin::Pin; /// /// fn main() { /// let mut generator = || { @@ -56,11 +50,11 @@ pub enum GeneratorState { /// return "foo" /// }; /// -/// match unsafe { generator.resume() } { +/// match Pin::new(&mut generator).resume() { /// GeneratorState::Yielded(1) => {} /// _ => panic!("unexpected return from resume"), /// } -/// match unsafe { generator.resume() } { +/// match Pin::new(&mut generator).resume() { /// GeneratorState::Complete("foo") => {} /// _ => panic!("unexpected return from resume"), /// } @@ -98,10 +92,6 @@ pub trait Generator { /// generator will continue executing until it either yields or returns, at /// which point this function will return. /// - /// The function is unsafe because it can be used on an immovable generator. - /// After such a call, the immovable generator must not move again, but - /// this is not enforced by the compiler. - /// /// # Return value /// /// The `GeneratorState` enum returned from this function indicates what @@ -120,16 +110,25 @@ pub trait Generator { /// been returned previously. While generator literals in the language are /// guaranteed to panic on resuming after `Complete`, this is not guaranteed /// for all implementations of the `Generator` trait. - unsafe fn resume(&mut self) -> GeneratorState; + fn resume(self: Pin<&mut Self>) -> GeneratorState; } #[unstable(feature = "generator_trait", issue = "43122")] -impl Generator for &mut T - where T: Generator + ?Sized -{ - type Yield = T::Yield; - type Return = T::Return; - unsafe fn resume(&mut self) -> GeneratorState { - (**self).resume() +impl Generator for Pin<&mut G> { + type Yield = G::Yield; + type Return = G::Return; + + fn resume(mut self: Pin<&mut Self>) -> GeneratorState { + G::resume((*self).as_mut()) + } +} + +#[unstable(feature = "generator_trait", issue = "43122")] +impl Generator for &mut G { + type Yield = G::Yield; + type Return = G::Return; + + fn resume(mut self: Pin<&mut Self>) -> GeneratorState { + G::resume(Pin::new(&mut *self)) } } diff --git a/src/libcore/ops/index.rs b/src/libcore/ops/index.rs index 1ac80ecc96ffe..3158f58e95806 100644 --- a/src/libcore/ops/index.rs +++ b/src/libcore/ops/index.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// Used for indexing operations (`container[index]`) in immutable contexts. /// /// `container[index]` is actually syntactic sugar for `*container.index(index)`, @@ -43,7 +33,7 @@ /// impl Index for NucleotideCount { /// type Output = usize; /// -/// fn index(&self, nucleotide: Nucleotide) -> &usize { +/// fn index(&self, nucleotide: Nucleotide) -> &Self::Output { /// match nucleotide { /// Nucleotide::A => &self.a, /// Nucleotide::C => &self.c, @@ -115,7 +105,7 @@ pub trait Index { /// impl Index for Balance { /// type Output = Weight; /// -/// fn index<'a>(&'a self, index: Side) -> &'a Weight { +/// fn index<'a>(&'a self, index: Side) -> &'a Self::Output { /// println!("Accessing {:?}-side of balance immutably", index); /// match index { /// Side::Left => &self.left, @@ -125,7 +115,7 @@ pub trait Index { /// } /// /// impl IndexMut for Balance { -/// fn index_mut<'a>(&'a mut self, index: Side) -> &'a mut Weight { +/// fn index_mut<'a>(&'a mut self, index: Side) -> &'a mut Self::Output { /// println!("Accessing {:?}-side of balance mutably", index); /// match index { /// Side::Left => &mut self.left, @@ -151,6 +141,21 @@ pub trait Index { /// ``` #[lang = "index_mut"] #[rustc_on_unimplemented( + on( + _Self="&str", + note="you can use `.chars().nth()` or `.bytes().nth()` +see chapter in The Book " + ), + on( + _Self="str", + note="you can use `.chars().nth()` or `.bytes().nth()` +see chapter in The Book " + ), + on( + _Self="std::string::String", + note="you can use `.chars().nth()` or `.bytes().nth()` +see chapter in The Book " + ), message="the type `{Self}` cannot be mutably indexed by `{Idx}`", label="`{Self}` cannot be mutably indexed by `{Idx}`", )] diff --git a/src/libcore/ops/mod.rs b/src/libcore/ops/mod.rs index 785f0733df2b8..0ca64f28ff309 100644 --- a/src/libcore/ops/mod.rs +++ b/src/libcore/ops/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Overloadable operators. //! //! Implementing these traits allows you to overload certain operators. @@ -178,6 +168,9 @@ pub use self::bit::{BitAndAssign, BitOrAssign, BitXorAssign, ShlAssign, ShrAssig #[stable(feature = "rust1", since = "1.0.0")] pub use self::deref::{Deref, DerefMut}; +#[unstable(feature = "receiver_trait", issue = "0")] +pub use self::deref::Receiver; + #[stable(feature = "rust1", since = "1.0.0")] pub use self::drop::Drop; diff --git a/src/libcore/ops/range.rs b/src/libcore/ops/range.rs index 908490e1c839e..5b6023f2e2cbb 100644 --- a/src/libcore/ops/range.rs +++ b/src/libcore/ops/range.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use fmt; use hash::{Hash, Hasher}; @@ -36,11 +26,13 @@ use hash::{Hash, Hasher}; /// Used as a [slicing index], `RangeFull` produces the full array as a slice. /// /// ``` -/// let arr = [0, 1, 2, 3]; -/// assert_eq!(arr[ .. ], [0,1,2,3]); // RangeFull -/// assert_eq!(arr[ ..3], [0,1,2 ]); -/// assert_eq!(arr[1.. ], [ 1,2,3]); -/// assert_eq!(arr[1..3], [ 1,2 ]); +/// let arr = [0, 1, 2, 3, 4]; +/// assert_eq!(arr[ .. ], [0,1,2,3,4]); // RangeFull +/// assert_eq!(arr[ .. 3], [0,1,2 ]); +/// assert_eq!(arr[ ..=3], [0,1,2,3 ]); +/// assert_eq!(arr[1.. ], [ 1,2,3,4]); +/// assert_eq!(arr[1.. 3], [ 1,2 ]); +/// assert_eq!(arr[1..=3], [ 1,2,3 ]); /// ``` /// /// [`IntoIterator`]: ../iter/trait.Iterator.html @@ -62,7 +54,7 @@ impl fmt::Debug for RangeFull { /// (`start..end`). /// /// The `Range` `start..end` contains all values with `x >= start` and -/// `x < end`. It is empty unless `start < end`. +/// `x < end`. It is empty unless `start < end`. /// /// # Examples /// @@ -70,14 +62,16 @@ impl fmt::Debug for RangeFull { /// assert_eq!((3..5), std::ops::Range { start: 3, end: 5 }); /// assert_eq!(3 + 4 + 5, (3..6).sum()); /// -/// let arr = ['a', 'b', 'c', 'd']; -/// assert_eq!(arr[ .. ], ['a', 'b', 'c', 'd']); -/// assert_eq!(arr[ ..3], ['a', 'b', 'c', ]); -/// assert_eq!(arr[1.. ], [ 'b', 'c', 'd']); -/// assert_eq!(arr[1..3], [ 'b', 'c' ]); // Range +/// let arr = [0, 1, 2, 3, 4]; +/// assert_eq!(arr[ .. ], [0,1,2,3,4]); +/// assert_eq!(arr[ .. 3], [0,1,2 ]); +/// assert_eq!(arr[ ..=3], [0,1,2,3 ]); +/// assert_eq!(arr[1.. ], [ 1,2,3,4]); +/// assert_eq!(arr[1.. 3], [ 1,2 ]); // Range +/// assert_eq!(arr[1..=3], [ 1,2,3 ]); /// ``` #[doc(alias = "..")] -#[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186 +#[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186 #[stable(feature = "rust1", since = "1.0.0")] pub struct Range { /// The lower bound of the range (inclusive). @@ -91,7 +85,10 @@ pub struct Range { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for Range { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{:?}..{:?}", self.start, self.end) + self.start.fmt(fmt)?; + write!(fmt, "..")?; + self.end.fmt(fmt)?; + Ok(()) } } @@ -101,8 +98,6 @@ impl> Range { /// # Examples /// /// ``` - /// #![feature(range_contains)] - /// /// use std::f32; /// /// assert!(!(3..5).contains(&2)); @@ -118,7 +113,7 @@ impl> Range { /// assert!(!(0.0..f32::NAN).contains(&0.5)); /// assert!(!(f32::NAN..1.0).contains(&0.5)); /// ``` - #[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] + #[stable(feature = "range_contains", since = "1.35.0")] pub fn contains(&self, item: &U) -> bool where Idx: PartialOrd, @@ -170,16 +165,18 @@ impl> Range { /// assert_eq!((2..), std::ops::RangeFrom { start: 2 }); /// assert_eq!(2 + 3 + 4, (2..).take(3).sum()); /// -/// let arr = [0, 1, 2, 3]; -/// assert_eq!(arr[ .. ], [0,1,2,3]); -/// assert_eq!(arr[ ..3], [0,1,2 ]); -/// assert_eq!(arr[1.. ], [ 1,2,3]); // RangeFrom -/// assert_eq!(arr[1..3], [ 1,2 ]); +/// let arr = [0, 1, 2, 3, 4]; +/// assert_eq!(arr[ .. ], [0,1,2,3,4]); +/// assert_eq!(arr[ .. 3], [0,1,2 ]); +/// assert_eq!(arr[ ..=3], [0,1,2,3 ]); +/// assert_eq!(arr[1.. ], [ 1,2,3,4]); // RangeFrom +/// assert_eq!(arr[1.. 3], [ 1,2 ]); +/// assert_eq!(arr[1..=3], [ 1,2,3 ]); /// ``` /// /// [`Iterator`]: ../iter/trait.IntoIterator.html #[doc(alias = "..")] -#[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186 +#[derive(Clone, PartialEq, Eq, Hash)] // not Copy -- see #27186 #[stable(feature = "rust1", since = "1.0.0")] pub struct RangeFrom { /// The lower bound of the range (inclusive). @@ -190,7 +187,9 @@ pub struct RangeFrom { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for RangeFrom { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{:?}..", self.start) + self.start.fmt(fmt)?; + write!(fmt, "..")?; + Ok(()) } } @@ -200,8 +199,6 @@ impl> RangeFrom { /// # Examples /// /// ``` - /// #![feature(range_contains)] - /// /// use std::f32; /// /// assert!(!(3..).contains(&2)); @@ -212,7 +209,7 @@ impl> RangeFrom { /// assert!(!(0.0..).contains(&f32::NAN)); /// assert!(!(f32::NAN..).contains(&0.5)); /// ``` - #[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] + #[stable(feature = "range_contains", since = "1.35.0")] pub fn contains(&self, item: &U) -> bool where Idx: PartialOrd, @@ -250,11 +247,13 @@ impl> RangeFrom { /// elements before the index indicated by `end`. /// /// ``` -/// let arr = [0, 1, 2, 3]; -/// assert_eq!(arr[ .. ], [0,1,2,3]); -/// assert_eq!(arr[ ..3], [0,1,2 ]); // RangeTo -/// assert_eq!(arr[1.. ], [ 1,2,3]); -/// assert_eq!(arr[1..3], [ 1,2 ]); +/// let arr = [0, 1, 2, 3, 4]; +/// assert_eq!(arr[ .. ], [0,1,2,3,4]); +/// assert_eq!(arr[ .. 3], [0,1,2 ]); // RangeTo +/// assert_eq!(arr[ ..=3], [0,1,2,3 ]); +/// assert_eq!(arr[1.. ], [ 1,2,3,4]); +/// assert_eq!(arr[1.. 3], [ 1,2 ]); +/// assert_eq!(arr[1..=3], [ 1,2,3 ]); /// ``` /// /// [`IntoIterator`]: ../iter/trait.Iterator.html @@ -272,7 +271,9 @@ pub struct RangeTo { #[stable(feature = "rust1", since = "1.0.0")] impl fmt::Debug for RangeTo { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "..{:?}", self.end) + write!(fmt, "..")?; + self.end.fmt(fmt)?; + Ok(()) } } @@ -282,8 +283,6 @@ impl> RangeTo { /// # Examples /// /// ``` - /// #![feature(range_contains)] - /// /// use std::f32; /// /// assert!( (..5).contains(&-1_000_000_000)); @@ -294,7 +293,7 @@ impl> RangeTo { /// assert!(!(..1.0).contains(&f32::NAN)); /// assert!(!(..f32::NAN).contains(&0.5)); /// ``` - #[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] + #[stable(feature = "range_contains", since = "1.35.0")] pub fn contains(&self, item: &U) -> bool where Idx: PartialOrd, @@ -307,7 +306,7 @@ impl> RangeTo { /// A range bounded inclusively below and above (`start..=end`). /// /// The `RangeInclusive` `start..=end` contains all values with `x >= start` -/// and `x <= end`. It is empty unless `start <= end`. +/// and `x <= end`. It is empty unless `start <= end`. /// /// This iterator is [fused], but the specific values of `start` and `end` after /// iteration has finished are **unspecified** other than that [`.is_empty()`] @@ -322,12 +321,16 @@ impl> RangeTo { /// assert_eq!((3..=5), std::ops::RangeInclusive::new(3, 5)); /// assert_eq!(3 + 4 + 5, (3..=5).sum()); /// -/// let arr = [0, 1, 2, 3]; -/// assert_eq!(arr[ ..=2], [0,1,2 ]); -/// assert_eq!(arr[1..=2], [ 1,2 ]); // RangeInclusive +/// let arr = [0, 1, 2, 3, 4]; +/// assert_eq!(arr[ .. ], [0,1,2,3,4]); +/// assert_eq!(arr[ .. 3], [0,1,2 ]); +/// assert_eq!(arr[ ..=3], [0,1,2,3 ]); +/// assert_eq!(arr[1.. ], [ 1,2,3,4]); +/// assert_eq!(arr[1.. 3], [ 1,2 ]); +/// assert_eq!(arr[1..=3], [ 1,2,3 ]); // RangeInclusive /// ``` #[doc(alias = "..=")] -#[derive(Clone)] // not Copy -- see #27186 +#[derive(Clone)] // not Copy -- see #27186 #[stable(feature = "inclusive_range", since = "1.26.0")] pub struct RangeInclusive { pub(crate) start: Idx, @@ -344,12 +347,14 @@ pub struct RangeInclusive { trait RangeInclusiveEquality: Sized { fn canonicalized_is_empty(range: &RangeInclusive) -> bool; } + impl RangeInclusiveEquality for T { #[inline] default fn canonicalized_is_empty(range: &RangeInclusive) -> bool { range.is_empty.unwrap_or_default() } } + impl RangeInclusiveEquality for T { #[inline] fn canonicalized_is_empty(range: &RangeInclusive) -> bool { @@ -361,7 +366,8 @@ impl RangeInclusiveEquality for T { impl PartialEq for RangeInclusive { #[inline] fn eq(&self, other: &Self) -> bool { - self.start == other.start && self.end == other.end + self.start == other.start + && self.end == other.end && RangeInclusiveEquality::canonicalized_is_empty(self) == RangeInclusiveEquality::canonicalized_is_empty(other) } @@ -393,7 +399,11 @@ impl RangeInclusive { #[inline] #[rustc_promotable] pub const fn new(start: Idx, end: Idx) -> Self { - Self { start, end, is_empty: None } + Self { + start, + end, + is_empty: None, + } } /// Returns the lower bound of the range (inclusive). @@ -464,7 +474,10 @@ impl RangeInclusive { #[stable(feature = "inclusive_range", since = "1.26.0")] impl fmt::Debug for RangeInclusive { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{:?}..={:?}", self.start, self.end) + self.start.fmt(fmt)?; + write!(fmt, "..=")?; + self.end.fmt(fmt)?; + Ok(()) } } @@ -474,8 +487,6 @@ impl> RangeInclusive { /// # Examples /// /// ``` - /// #![feature(range_contains)] - /// /// use std::f32; /// /// assert!(!(3..=5).contains(&2)); @@ -492,7 +503,7 @@ impl> RangeInclusive { /// assert!(!(0.0..=f32::NAN).contains(&0.0)); /// assert!(!(f32::NAN..=1.0).contains(&1.0)); /// ``` - #[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] + #[stable(feature = "range_contains", since = "1.35.0")] pub fn contains(&self, item: &U) -> bool where Idx: PartialOrd, @@ -577,9 +588,13 @@ impl> RangeInclusive { /// array elements up to and including the index indicated by `end`. /// /// ``` -/// let arr = [0, 1, 2, 3]; -/// assert_eq!(arr[ ..=2], [0,1,2 ]); // RangeToInclusive -/// assert_eq!(arr[1..=2], [ 1,2 ]); +/// let arr = [0, 1, 2, 3, 4]; +/// assert_eq!(arr[ .. ], [0,1,2,3,4]); +/// assert_eq!(arr[ .. 3], [0,1,2 ]); +/// assert_eq!(arr[ ..=3], [0,1,2,3 ]); // RangeToInclusive +/// assert_eq!(arr[1.. ], [ 1,2,3,4]); +/// assert_eq!(arr[1.. 3], [ 1,2 ]); +/// assert_eq!(arr[1..=3], [ 1,2,3 ]); /// ``` /// /// [`IntoIterator`]: ../iter/trait.Iterator.html @@ -597,19 +612,18 @@ pub struct RangeToInclusive { #[stable(feature = "inclusive_range", since = "1.26.0")] impl fmt::Debug for RangeToInclusive { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "..={:?}", self.end) + write!(fmt, "..=")?; + self.end.fmt(fmt)?; + Ok(()) } } -#[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] impl> RangeToInclusive { /// Returns `true` if `item` is contained in the range. /// /// # Examples /// /// ``` - /// #![feature(range_contains)] - /// /// use std::f32; /// /// assert!( (..=5).contains(&-1_000_000_000)); @@ -620,7 +634,7 @@ impl> RangeToInclusive { /// assert!(!(..=1.0).contains(&f32::NAN)); /// assert!(!(..=f32::NAN).contains(&0.5)); /// ``` - #[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] + #[stable(feature = "range_contains", since = "1.35.0")] pub fn contains(&self, item: &U) -> bool where Idx: PartialOrd, @@ -684,7 +698,7 @@ pub enum Bound { #[stable(feature = "collections_range", since = "1.28.0")] /// `RangeBounds` is implemented by Rust's built-in range types, produced -/// by range syntax like `..`, `a..`, `..b` or `c..d`. +/// by range syntax like `..`, `a..`, `..b`, `..=c`, `d..e`, or `f..=g`. pub trait RangeBounds { /// Start index bound. /// @@ -722,14 +736,11 @@ pub trait RangeBounds { #[stable(feature = "collections_range", since = "1.28.0")] fn end_bound(&self) -> Bound<&T>; - /// Returns `true` if `item` is contained in the range. /// /// # Examples /// /// ``` - /// #![feature(range_contains)] - /// /// use std::f32; /// /// assert!( (3..5).contains(&4)); @@ -739,7 +750,7 @@ pub trait RangeBounds { /// assert!(!(0.0..1.0).contains(&f32::NAN)); /// assert!(!(0.0..f32::NAN).contains(&0.5)); /// assert!(!(f32::NAN..1.0).contains(&0.5)); - #[unstable(feature = "range_contains", reason = "recently added as per RFC", issue = "32311")] + #[stable(feature = "range_contains", since = "1.35.0")] fn contains(&self, item: &U) -> bool where T: PartialOrd, @@ -749,9 +760,7 @@ pub trait RangeBounds { Included(ref start) => *start <= item, Excluded(ref start) => *start < item, Unbounded => true, - }) - && - (match self.end_bound() { + }) && (match self.end_bound() { Included(ref end) => item <= *end, Excluded(ref end) => item < *end, Unbounded => true, @@ -827,7 +836,7 @@ impl RangeBounds for (Bound, Bound) { match *self { (Included(ref start), _) => Included(start), (Excluded(ref start), _) => Excluded(start), - (Unbounded, _) => Unbounded, + (Unbounded, _) => Unbounded, } } @@ -835,7 +844,7 @@ impl RangeBounds for (Bound, Bound) { match *self { (_, Included(ref end)) => Included(end), (_, Excluded(ref end)) => Excluded(end), - (_, Unbounded) => Unbounded, + (_, Unbounded) => Unbounded, } } } diff --git a/src/libcore/ops/try.rs b/src/libcore/ops/try.rs index 4f2d30aa6a8a7..9fa2c81954ee1 100644 --- a/src/libcore/ops/try.rs +++ b/src/libcore/ops/try.rs @@ -1,17 +1,7 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// A trait for customizing the behavior of the `?` operator. /// /// A type implementing `Try` is one that has a canonical way to view it -/// in terms of a success/failure dichotomy. This trait allows both +/// in terms of a success/failure dichotomy. This trait allows both /// extracting those success or failure values from an existing instance and /// creating a new instance from a success or failure value. #[unstable(feature = "try_trait", issue = "42327")] diff --git a/src/libcore/ops/unsize.rs b/src/libcore/ops/unsize.rs index e86a392a2c828..bd95ddf060ee4 100644 --- a/src/libcore/ops/unsize.rs +++ b/src/libcore/ops/unsize.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use marker::Unsize; /// Trait that indicates that this is a pointer or a wrapper for one, diff --git a/src/libcore/option.rs b/src/libcore/option.rs index 44d632ece055c..3da92c0a05ac4 100644 --- a/src/libcore/option.rs +++ b/src/libcore/option.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Optional values. //! //! Type [`Option`] represents an optional value: every [`Option`] @@ -220,11 +210,11 @@ impl Option { // Adapter for working with references ///////////////////////////////////////////////////////////////////////// - /// Converts from `Option` to `Option<&T>`. + /// Converts from `&Option` to `Option<&T>`. /// /// # Examples /// - /// Convert an `Option<`[`String`]`>` into an `Option<`[`usize`]`>`, preserving the original. + /// Converts an `Option<`[`String`]`>` into an `Option<`[`usize`]`>`, preserving the original. /// The [`map`] method takes the `self` argument by value, consuming the original, /// so this technique uses `as_ref` to first take an `Option` to a reference /// to the value inside the original. @@ -249,7 +239,7 @@ impl Option { } } - /// Converts from `Option` to `Option<&mut T>`. + /// Converts from `&mut Option` to `Option<&mut T>`. /// /// # Examples /// @@ -273,7 +263,7 @@ impl Option { /// Converts from `Pin<&Option>` to `Option>` #[inline] - #[unstable(feature = "pin", issue = "49150")] + #[stable(feature = "pin", since = "1.33.0")] pub fn as_pin_ref<'a>(self: Pin<&'a Option>) -> Option> { unsafe { Pin::get_ref(self).as_ref().map(|x| Pin::new_unchecked(x)) @@ -282,10 +272,10 @@ impl Option { /// Converts from `Pin<&mut Option>` to `Option>` #[inline] - #[unstable(feature = "pin", issue = "49150")] + #[stable(feature = "pin", since = "1.33.0")] pub fn as_pin_mut<'a>(self: Pin<&'a mut Option>) -> Option> { unsafe { - Pin::get_mut_unchecked(self).as_mut().map(|x| Pin::new_unchecked(x)) + Pin::get_unchecked_mut(self).as_mut().map(|x| Pin::new_unchecked(x)) } } @@ -405,7 +395,7 @@ impl Option { /// /// # Examples /// - /// Convert an `Option<`[`String`]`>` into an `Option<`[`usize`]`>`, consuming the original: + /// Converts an `Option<`[`String`]`>` into an `Option<`[`usize`]`>`, consuming the original: /// /// [`String`]: ../../std/string/struct.String.html /// [`usize`]: ../../std/primitive.usize.html @@ -884,7 +874,45 @@ impl Option { } } -impl<'a, T: Clone> Option<&'a T> { +impl Option<&T> { + /// Maps an `Option<&T>` to an `Option` by copying the contents of the + /// option. + /// + /// # Examples + /// + /// ``` + /// let x = 12; + /// let opt_x = Some(&x); + /// assert_eq!(opt_x, Some(&12)); + /// let copied = opt_x.copied(); + /// assert_eq!(copied, Some(12)); + /// ``` + #[stable(feature = "copied", since = "1.35.0")] + pub fn copied(self) -> Option { + self.map(|&t| t) + } +} + +impl Option<&mut T> { + /// Maps an `Option<&mut T>` to an `Option` by copying the contents of the + /// option. + /// + /// # Examples + /// + /// ``` + /// let mut x = 12; + /// let opt_x = Some(&mut x); + /// assert_eq!(opt_x, Some(&mut 12)); + /// let copied = opt_x.copied(); + /// assert_eq!(copied, Some(12)); + /// ``` + #[stable(feature = "copied", since = "1.35.0")] + pub fn copied(self) -> Option { + self.map(|&mut t| t) + } +} + +impl Option<&T> { /// Maps an `Option<&T>` to an `Option` by cloning the contents of the /// option. /// @@ -903,7 +931,7 @@ impl<'a, T: Clone> Option<&'a T> { } } -impl<'a, T: Clone> Option<&'a mut T> { +impl Option<&mut T> { /// Maps an `Option<&mut T>` to an `Option` by cloning the contents of the /// option. /// @@ -931,7 +959,7 @@ impl Option { /// /// # Examples /// - /// Convert a string to an integer, turning poorly-formed strings + /// Converts a string to an integer, turning poorly-formed strings /// into 0 (the default value for integers). [`parse`] converts /// a string to any other type that implements [`FromStr`], returning /// [`None`] on error. @@ -981,8 +1009,6 @@ impl Option> { /// # Examples /// /// ``` - /// #![feature(transpose_result)] - /// /// #[derive(Debug, Eq, PartialEq)] /// struct SomeErr; /// @@ -991,7 +1017,7 @@ impl Option> { /// assert_eq!(x, y.transpose()); /// ``` #[inline] - #[unstable(feature = "transpose_result", issue = "47338")] + #[stable(feature = "transpose_result", since = "1.33.0")] pub fn transpose(self) -> Result, E> { match self { Some(Ok(x)) => Ok(Some(x)), @@ -1253,19 +1279,61 @@ impl> FromIterator> for Option { /// returned. Should no [`None`][Option::None] occur, a container with the /// values of each [`Option`] is returned. /// - /// Here is an example which increments every integer in a vector, - /// checking for overflow: + /// # Examples + /// + /// Here is an example which increments every integer in a vector. + /// We use the checked variant of `add` that returns `None` when the + /// calculation would result in an overflow. /// /// ``` - /// use std::u16; + /// let items = vec![0_u16, 1, 2]; + /// + /// let res: Option> = items + /// .iter() + /// .map(|x| x.checked_add(1)) + /// .collect(); + /// + /// assert_eq!(res, Some(vec![1, 2, 3])); + /// ``` + /// + /// As you can see, this will return the expected, valid items. + /// + /// Here is another example that tries to subtract one from another list + /// of integers, this time checking for underflow: /// - /// let v = vec![1, 2]; - /// let res: Option> = v.iter().map(|&x: &u16| - /// if x == u16::MAX { None } - /// else { Some(x + 1) } - /// ).collect(); - /// assert!(res == Some(vec![2, 3])); /// ``` + /// let items = vec![2_u16, 1, 0]; + /// + /// let res: Option> = items + /// .iter() + /// .map(|x| x.checked_sub(1)) + /// .collect(); + /// + /// assert_eq!(res, None); + /// ``` + /// + /// Since the last element is zero, it would underflow. Thus, the resulting + /// value is `None`. + /// + /// Here is a variation on the previous example, showing that no + /// further elements are taken from `iter` after the first `None`. + /// + /// ``` + /// let items = vec![3_u16, 2, 1, 10]; + /// + /// let mut shared = 0; + /// + /// let res: Option> = items + /// .iter() + /// .map(|x| { shared += x; x.checked_sub(2) }) + /// .collect(); + /// + /// assert_eq!(res, None); + /// assert_eq!(shared, 6); + /// ``` + /// + /// Since the third element caused an underflow, no further elements were taken, + /// so the final value of `shared` is 6 (= `3 + 2 + 1`), not 16. /// /// [`Iterator`]: ../iter/trait.Iterator.html #[inline] diff --git a/src/libcore/panic.rs b/src/libcore/panic.rs index f0efeb59e8d6e..1abc0a18a9cc9 100644 --- a/src/libcore/panic.rs +++ b/src/libcore/panic.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Panic support in the standard library. #![unstable(feature = "core_panic_info", diff --git a/src/libcore/panicking.rs b/src/libcore/panicking.rs index aa18a60fc0f6d..d9cdb2a2b8a9f 100644 --- a/src/libcore/panicking.rs +++ b/src/libcore/panicking.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Panic support for libcore //! //! The core library cannot define panicking, but it does *declare* panicking. This diff --git a/src/libcore/pin.rs b/src/libcore/pin.rs index 0ad6e8c7c1c7d..57bd3ed12b28e 100644 --- a/src/libcore/pin.rs +++ b/src/libcore/pin.rs @@ -1,50 +1,66 @@ -//! Types which pin data to its location in memory +//! Types that pin data to its location in memory. //! //! It is sometimes useful to have objects that are guaranteed to not move, //! in the sense that their placement in memory does not change, and can thus be relied upon. -//! //! A prime example of such a scenario would be building self-referential structs, //! since moving an object with pointers to itself will invalidate them, //! which could cause undefined behavior. //! +//! A [`Pin

`] ensures that the pointee of any pointer type `P` has a stable location in memory, +//! meaning it cannot be moved elsewhere and its memory cannot be deallocated +//! until it gets dropped. We say that the pointee is "pinned". +//! //! By default, all types in Rust are movable. Rust allows passing all types by-value, -//! and common smart-pointer types such as `Box`, `Rc`, and `&mut` allow replacing and -//! moving the values they contain. In order to prevent objects from moving, they must -//! be pinned by wrapping a pointer to the data in the [`Pin`] type. -//! Doing this prohibits moving the value behind the pointer. -//! For example, `Pin>` functions much like a regular `Box`, -//! but doesn't allow moving `T`. The pointer value itself (the `Box`) can still be moved, -//! but the value behind it cannot. -//! -//! Since data can be moved out of `&mut` and `Box` with functions such as [`swap`], -//! changing the location of the underlying data, [`Pin`] prohibits accessing the -//! underlying pointer type (the `&mut` or `Box`) directly, and provides its own set of -//! APIs for accessing and using the value. [`Pin`] also guarantees that no other -//! functions will move the pointed-to value. This allows for the creation of -//! self-references and other special behaviors that are only possible for unmovable -//! values. +//! and common smart-pointer types such as `Box` and `&mut T` allow replacing and +//! moving the values they contain: you can move out of a `Box`, or you can use [`mem::swap`]. +//! [`Pin

`] wraps a pointer type `P`, so `Pin>` functions much like a regular `Box`: +//! when a `Pin>` gets dropped, so do its contents, and the memory gets deallocated. +//! Similarily, `Pin<&mut T>` is a lot like `&mut T`. However, [`Pin

`] does not let clients +//! actually obtain a `Box` or `&mut T` to pinned data, which implies that you cannot use +//! operations such as [`mem::swap`]: +//! ``` +//! use std::pin::Pin; +//! fn swap_pins(x: Pin<&mut T>, y: Pin<&mut T>) { +//! // `mem::swap` needs `&mut T`, but we cannot get it. +//! // We are stuck, we cannot swap the contents of these references. +//! // We could use `Pin::get_unchecked_mut`, but that is unsafe for a reason: +//! // we are not allowed to use it for moving things out of the `Pin`. +//! } +//! ``` //! -//! However, these restrictions are usually not necessary. Many types are always freely -//! movable. These types implement the [`Unpin`] auto-trait, which nullifies the affect -//! of [`Pin`]. For `T: Unpin`, `Pin>` and `Box` function identically, as do -//! `Pin<&mut T>` and `&mut T`. +//! It is worth reiterating that [`Pin

`] does *not* change the fact that a Rust compiler +//! considers all types movable. [`mem::swap`] remains callable for any `T`. Instead, `Pin

` +//! prevents certain *values* (pointed to by pointers wrapped in `Pin

`) from being +//! moved by making it impossible to call methods that require `&mut T` on them +//! (like [`mem::swap`]). //! -//! Note that pinning and `Unpin` only affect the pointed-to type. For example, whether -//! or not `Box` is `Unpin` has no affect on the behavior of `Pin>`. Similarly, -//! `Pin>` and `Pin<&mut T>` are always `Unpin` themselves, even though the -//! `T` underneath them isn't, because the pointers in `Pin>` and `Pin<&mut _>` -//! are always freely movable, even if the data they point to isn't. +//! [`Pin

`] can be used to wrap any pointer type `P`, and as such it interacts with +//! [`Deref`] and [`DerefMut`]. A `Pin

` where `P: Deref` should be considered +//! as a "`P`-style pointer" to a pinned `P::Target` -- so, a `Pin>` is +//! an owned pointer to a pinned `T`, and a `Pin>` is a reference-counted +//! pointer to a pinned `T`. +//! For correctness, [`Pin

`] relies on the [`Deref`] and [`DerefMut`] implementations +//! to not move out of their `self` parameter, and to only ever return a pointer +//! to pinned data when they are called on a pinned pointer. //! -//! [`Pin`]: struct.Pin.html -//! [`Unpin`]: trait.Unpin.html -//! [`swap`]: ../../std/mem/fn.swap.html -//! [`Box`]: ../../std/boxed/struct.Box.html +//! # `Unpin` //! -//! # Examples +//! However, these restrictions are usually not necessary. Many types are always freely +//! movable, even when pinned, because they do not rely on having a stable address. +//! This includes all the basic types (like `bool`, `i32`, references) +//! as well as types consisting solely of these types. +//! Types that do not care about pinning implement the [`Unpin`] auto-trait, which +//! cancels the effect of [`Pin

`]. For `T: Unpin`, `Pin>` and `Box` function +//! identically, as do `Pin<&mut T>` and `&mut T`. //! -//! ```rust -//! #![feature(pin)] +//! Note that pinning and `Unpin` only affect the pointed-to type `P::Target`, not the pointer +//! type `P` itself that got wrapped in `Pin

`. For example, whether or not `Box` is +//! `Unpin` has no effect on the behavior of `Pin>` (here, `T` is the +//! pointed-to type). +//! +//! # Example: self-referential struct //! +//! ```rust //! use std::pin::Pin; //! use std::marker::PhantomPinned; //! use std::ptr::NonNull; @@ -72,13 +88,13 @@ //! slice: NonNull::dangling(), //! _pin: PhantomPinned, //! }; -//! let mut boxed = Box::pinned(res); +//! let mut boxed = Box::pin(res); //! //! let slice = NonNull::from(&boxed.data); //! // we know this is safe because modifying a field doesn't move the whole struct //! unsafe { //! let mut_ref: Pin<&mut Self> = Pin::as_mut(&mut boxed); -//! Pin::get_mut_unchecked(mut_ref).slice = slice; +//! Pin::get_unchecked_mut(mut_ref).slice = slice; //! } //! boxed //! } @@ -93,18 +109,160 @@ //! assert_eq!(still_unmoved.slice, NonNull::from(&still_unmoved.data)); //! //! // Since our type doesn't implement Unpin, this will fail to compile: -//! // let new_unmoved = Unmovable::new("world".to_string()); +//! // let mut new_unmoved = Unmovable::new("world".to_string()); //! // std::mem::swap(&mut *still_unmoved, &mut *new_unmoved); //! ``` +//! +//! # Example: intrusive doubly-linked list +//! +//! In an intrusive doubly-linked list, the collection does not actually allocate +//! the memory for the elements itself. Allocation is controlled by the clients, +//! and elements can live on a stack frame that lives shorter than the collection does. +//! +//! To make this work, every element has pointers to its predecessor and successor in +//! the list. Elements can only be added when they are pinned, because moving the elements +//! around would invalidate the pointers. Moreover, the `Drop` implementation of a linked +//! list element will patch the pointers of its predecessor and successor to remove itself +//! from the list. +//! +//! Crucially, we have to be able to rely on `drop` being called. If an element +//! could be deallocated or otherwise invalidated without calling `drop`, the pointers into it +//! from its neighbouring elements would become invalid, which would break the data structure. +//! +//! Therefore, pinning also comes with a `drop`-related guarantee. +//! +//! # `Drop` guarantee +//! +//! The purpose of pinning is to be able to rely on the placement of some data in memory. +//! To make this work, not just moving the data is restricted; deallocating, repurposing, or +//! otherwise invalidating the memory used to store the data is restricted, too. +//! Concretely, for pinned data you have to maintain the invariant +//! that *its memory will not get invalidated from the moment it gets pinned until +//! when `drop` is called*. Memory can be invalidated by deallocation, but also by +//! replacing a [`Some(v)`] by [`None`], or calling [`Vec::set_len`] to "kill" some elements +//! off of a vector. +//! +//! This is exactly the kind of guarantee that the intrusive linked list from the previous +//! section needs to function correctly. +//! +//! Notice that this guarantee does *not* mean that memory does not leak! It is still +//! completely okay not to ever call `drop` on a pinned element (e.g., you can still +//! call [`mem::forget`] on a `Pin>`). In the example of the doubly-linked +//! list, that element would just stay in the list. However you may not free or reuse the storage +//! *without calling `drop`*. +//! +//! # `Drop` implementation +//! +//! If your type uses pinning (such as the two examples above), you have to be careful +//! when implementing `Drop`. The `drop` function takes `&mut self`, but this +//! is called *even if your type was previously pinned*! It is as if the +//! compiler automatically called `get_unchecked_mut`. +//! +//! This can never cause a problem in safe code because implementing a type that relies on pinning +//! requires unsafe code, but be aware that deciding to make use of pinning +//! in your type (for example by implementing some operation on `Pin<&[mut] Self>`) +//! has consequences for your `Drop` implementation as well: if an element +//! of your type could have been pinned, you must treat Drop as implicitly taking +//! `Pin<&mut Self>`. +//! +//! In particular, if your type is `#[repr(packed)]`, the compiler will automatically +//! move fields around to be able to drop them. As a consequence, you cannot use +//! pinning with a `#[repr(packed)]` type. +//! +//! # Projections and Structural Pinning +//! +//! One interesting question arises when considering the interaction of pinning and +//! the fields of a struct. When can a struct have a "pinning projection", i.e., +//! an operation with type `fn(Pin<&[mut] Struct>) -> Pin<&[mut] Field>`? +//! In a similar vein, when can a generic wrapper type (such as `Vec`, `Box`, or `RefCell`) +//! have an operation with type `fn(Pin<&[mut] Wrapper>) -> Pin<&[mut] T>`? +//! +//! Having a pinning projection for some field means that pinning is "structural": +//! when the wrapper is pinned, the field must be considered pinned, too. +//! After all, the pinning projection lets us get a `Pin<&[mut] Field>`. +//! +//! However, structural pinning comes with a few extra requirements, so not all +//! wrappers can be structural and hence not all wrappers can offer pinning projections: +//! +//! 1. The wrapper must only be [`Unpin`] if all the structural fields are +//! `Unpin`. This is the default, but `Unpin` is a safe trait, so as the author of +//! the wrapper it is your responsibility *not* to add something like +//! `impl Unpin for Wrapper`. (Notice that adding a projection operation +//! requires unsafe code, so the fact that `Unpin` is a safe trait does not break +//! the principle that you only have to worry about any of this if you use `unsafe`.) +//! 2. The destructor of the wrapper must not move structural fields out of its argument. This +//! is the exact point that was raised in the [previous section][drop-impl]: `drop` takes +//! `&mut self`, but the wrapper (and hence its fields) might have been pinned before. +//! You have to guarantee that you do not move a field inside your `Drop` implementation. +//! In particular, as explained previously, this means that your wrapper type must *not* +//! be `#[repr(packed)]`. +//! 3. You must make sure that you uphold the [`Drop` guarantee][drop-guarantee]: +//! once your wrapper is pinned, the memory that contains the +//! content is not overwritten or deallocated without calling the content's destructors. +//! This can be tricky, as witnessed by `VecDeque`: the destructor of `VecDeque` can fail +//! to call `drop` on all elements if one of the destructors panics. This violates the +//! `Drop` guarantee, because it can lead to elements being deallocated without +//! their destructor being called. (`VecDeque` has no pinning projections, so this +//! does not cause unsoundness.) +//! 4. You must not offer any other operations that could lead to data being moved out of +//! the fields when your type is pinned. For example, if the wrapper contains an +//! `Option` and there is a `take`-like operation with type +//! `fn(Pin<&mut Wrapper>) -> Option`, +//! that operation can be used to move a `T` out of a pinned `Wrapper` -- which means +//! pinning cannot be structural. +//! +//! For a more complex example of moving data out of a pinned type, imagine if `RefCell` +//! had a method `fn get_pin_mut(self: Pin<&mut Self>) -> Pin<&mut T>`. +//! Then we could do the following: +//! ```compile_fail +//! fn exploit_ref_cell(rc: Pin<&mut RefCell>) { +//! { let p = rc.as_mut().get_pin_mut(); } // Here we get pinned access to the `T`. +//! let rc_shr: &RefCell = rc.into_ref().get_ref(); +//! let b = rc_shr.borrow_mut(); +//! let content = &mut *b; // And here we have `&mut T` to the same data. +//! } +//! ``` +//! This is catastrophic, it means we can first pin the content of the `RefCell` +//! (using `RefCell::get_pin_mut`) and then move that content using the mutable +//! reference we got later. +//! +//! For a type like `Vec`, both possibilites (structural pinning or not) make sense, +//! and the choice is up to the author. A `Vec` with structural pinning could +//! have `get_pin`/`get_pin_mut` projections. However, it could *not* allow calling +//! `pop` on a pinned `Vec` because that would move the (structurally pinned) contents! +//! Nor could it allow `push`, which might reallocate and thus also move the contents. +//! A `Vec` without structural pinning could `impl Unpin for Vec`, because the contents +//! are never pinned and the `Vec` itself is fine with being moved as well. +//! +//! In the standard library, pointer types generally do not have structural pinning, +//! and thus they do not offer pinning projections. This is why `Box: Unpin` holds for all `T`. +//! It makes sense to do this for pointer types, because moving the `Box` +//! does not actually move the `T`: the `Box` can be freely movable (aka `Unpin`) even if the `T` +//! is not. In fact, even `Pin>` and `Pin<&mut T>` are always `Unpin` themselves, +//! for the same reason: their contents (the `T`) are pinned, but the pointers themselves +//! can be moved without moving the pinned data. For both `Box` and `Pin>`, +//! whether the content is pinned is entirely independent of whether the pointer is +//! pinned, meaning pinning is *not* structural. +//! +//! [`Pin

`]: struct.Pin.html +//! [`Unpin`]: ../../std/marker/trait.Unpin.html +//! [`Deref`]: ../../std/ops/trait.Deref.html +//! [`DerefMut`]: ../../std/ops/trait.DerefMut.html +//! [`mem::swap`]: ../../std/mem/fn.swap.html +//! [`mem::forget`]: ../../std/mem/fn.forget.html +//! [`Box`]: ../../std/boxed/struct.Box.html +//! [`Vec::set_len`]: ../../std/vec/struct.Vec.html#method.set_len +//! [`None`]: ../../std/option/enum.Option.html#variant.None +//! [`Some(v)`]: ../../std/option/enum.Option.html#variant.Some +//! [drop-impl]: #drop-implementation +//! [drop-guarantee]: #drop-guarantee -#![unstable(feature = "pin", issue = "49150")] +#![stable(feature = "pin", since = "1.33.0")] use fmt; -use marker::Sized; -use ops::{Deref, DerefMut, CoerceUnsized, DispatchFromDyn}; - -#[doc(inline)] -pub use marker::Unpin; +use marker::{Sized, Unpin}; +use cmp::{self, PartialEq, PartialOrd}; +use ops::{Deref, DerefMut, Receiver, CoerceUnsized, DispatchFromDyn}; /// A pinned pointer. /// @@ -117,22 +275,70 @@ pub use marker::Unpin; /// [`Unpin`]: ../../std/marker/trait.Unpin.html /// [`pin` module]: ../../std/pin/index.html // -// Note: the derives below are allowed because they all only use `&P`, so they -// cannot move the value behind `pointer`. -#[unstable(feature = "pin", issue = "49150")] +// Note: the derives below, and the explicit `PartialEq` and `PartialOrd` +// implementations, are allowed because they all only use `&P`, so they cannot move +// the value behind `pointer`. +#[stable(feature = "pin", since = "1.33.0")] +#[lang = "pin"] #[fundamental] -#[derive(Copy, Clone, Hash, Eq, PartialEq, Ord, PartialOrd)] +#[repr(transparent)] +#[derive(Copy, Clone, Hash, Eq, Ord)] pub struct Pin

{ pointer: P, } +#[stable(feature = "pin_partialeq_partialord_impl_applicability", since = "1.34.0")] +impl PartialEq> for Pin

+where + P: PartialEq, +{ + fn eq(&self, other: &Pin) -> bool { + self.pointer == other.pointer + } + + fn ne(&self, other: &Pin) -> bool { + self.pointer != other.pointer + } +} + +#[stable(feature = "pin_partialeq_partialord_impl_applicability", since = "1.34.0")] +impl PartialOrd> for Pin

+where + P: PartialOrd, +{ + fn partial_cmp(&self, other: &Pin) -> Option { + self.pointer.partial_cmp(&other.pointer) + } + + fn lt(&self, other: &Pin) -> bool { + self.pointer < other.pointer + } + + fn le(&self, other: &Pin) -> bool { + self.pointer <= other.pointer + } + + fn gt(&self, other: &Pin) -> bool { + self.pointer > other.pointer + } + + fn ge(&self, other: &Pin) -> bool { + self.pointer >= other.pointer + } +} + impl Pin

where P::Target: Unpin, { - /// Construct a new `Pin` around a pointer to some data of a type that - /// implements `Unpin`. - #[unstable(feature = "pin", issue = "49150")] + /// Construct a new `Pin

` around a pointer to some data of a type that + /// implements [`Unpin`]. + /// + /// Unlike `Pin::new_unchecked`, this method is safe because the pointer + /// `P` dereferences to an [`Unpin`] type, which cancels the pinning guarantees. + /// + /// [`Unpin`]: ../../std/marker/trait.Unpin.html + #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn new(pointer: P) -> Pin

{ // Safety: the value pointed to is `Unpin`, and so has no requirements @@ -142,26 +348,84 @@ where } impl Pin

{ - /// Construct a new `Pin` around a reference to some data of a type that + /// Construct a new `Pin

` around a reference to some data of a type that /// may or may not implement `Unpin`. /// + /// If `pointer` dereferences to an `Unpin` type, `Pin::new` should be used + /// instead. + /// /// # Safety /// /// This constructor is unsafe because we cannot guarantee that the data - /// pointed to by `pointer` is pinned. If the constructed `Pin

` does - /// not guarantee that the data `P` points to is pinned, constructing a - /// `Pin

` is undefined behavior. + /// pointed to by `pointer` is pinned, meaning that the data will not be moved or + /// its storage invalidated until it gets dropped. If the constructed `Pin

` does + /// not guarantee that the data `P` points to is pinned, that is a violation of + /// the API contract and may lead to undefined behavior in later (safe) operations. /// - /// If `pointer` dereferences to an `Unpin` type, `Pin::new` should be used - /// instead. - #[unstable(feature = "pin", issue = "49150")] + /// By using this method, you are making a promise about the `P::Deref` and + /// `P::DerefMut` implementations, if they exist. Most importantly, they + /// must not move out of their `self` arguments: `Pin::as_mut` and `Pin::as_ref` + /// will call `DerefMut::deref_mut` and `Deref::deref` *on the pinned pointer* + /// and expect these methods to uphold the pinning invariants. + /// Moreover, by calling this method you promise that the reference `P` + /// dereferences to will not be moved out of again; in particular, it + /// must not be possible to obtain a `&mut P::Target` and then + /// move out of that reference (using, for example [`mem::swap`]). + /// + /// For example, calling `Pin::new_unchecked` on an `&'a mut T` is unsafe because + /// while you are able to pin it for the given lifetime `'a`, you have no control + /// over whether it is kept pinned once `'a` ends: + /// ``` + /// use std::mem; + /// use std::pin::Pin; + /// + /// fn move_pinned_ref(mut a: T, mut b: T) { + /// unsafe { + /// let p: Pin<&mut T> = Pin::new_unchecked(&mut a); + /// // This should mean the pointee `a` can never move again. + /// } + /// mem::swap(&mut a, &mut b); + /// // The address of `a` changed to `b`'s stack slot, so `a` got moved even + /// // though we have previously pinned it! We have violated the pinning API contract. + /// } + /// ``` + /// A value, once pinned, must remain pinned forever (unless its type implements `Unpin`). + /// + /// Similarily, calling `Pin::new_unchecked` on an `Rc` is unsafe because there could be + /// aliases to the same data that are not subject to the pinning restrictions: + /// ``` + /// use std::rc::Rc; + /// use std::pin::Pin; + /// + /// fn move_pinned_rc(mut x: Rc) { + /// let pinned = unsafe { Pin::new_unchecked(x.clone()) }; + /// { + /// let p: Pin<&T> = pinned.as_ref(); + /// // This should mean the pointee can never move again. + /// } + /// drop(pinned); + /// let content = Rc::get_mut(&mut x).unwrap(); + /// // Now, if `x` was the only reference, we have a mutable reference to + /// // data that we pinned above, which we could use to move it as we have + /// // seen in the previous example. We have violated the pinning API contract. + /// } + /// ``` + /// + /// [`mem::swap`]: ../../std/mem/fn.swap.html + #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub unsafe fn new_unchecked(pointer: P) -> Pin

{ Pin { pointer } } - /// Get a pinned shared reference from this pinned pointer. - #[unstable(feature = "pin", issue = "49150")] + /// Gets a pinned shared reference from this pinned pointer. + /// + /// This is a generic method to go from `&Pin>` to `Pin<&T>`. + /// It is safe because, as part of the contract of `Pin::new_unchecked`, + /// the pointee cannot move after `Pin>` got created. + /// "Malicious" implementations of `Pointer::Deref` are likewise + /// ruled out by the contract of `Pin::new_unchecked`. + #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn as_ref(self: &Pin

) -> Pin<&P::Target> { unsafe { Pin::new_unchecked(&*self.pointer) } @@ -169,29 +433,40 @@ impl Pin

{ } impl Pin

{ - /// Get a pinned mutable reference from this pinned pointer. - #[unstable(feature = "pin", issue = "49150")] + /// Gets a pinned mutable reference from this pinned pointer. + /// + /// This is a generic method to go from `&mut Pin>` to `Pin<&mut T>`. + /// It is safe because, as part of the contract of `Pin::new_unchecked`, + /// the pointee cannot move after `Pin>` got created. + /// "Malicious" implementations of `Pointer::DerefMut` are likewise + /// ruled out by the contract of `Pin::new_unchecked`. + #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] pub fn as_mut(self: &mut Pin

) -> Pin<&mut P::Target> { unsafe { Pin::new_unchecked(&mut *self.pointer) } } - /// Assign a new value to the memory behind the pinned reference. - #[unstable(feature = "pin", issue = "49150")] + /// Assigns a new value to the memory behind the pinned reference. + /// + /// This overwrites pinned data, but that is okay: its destructor gets + /// run before being overwritten, so no pinning guarantee is violated. + #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] - pub fn set(mut self: Pin

, value: P::Target) + pub fn set(self: &mut Pin

, value: P::Target) where P::Target: Sized, { - *self.pointer = value; + *(self.pointer) = value; } } impl<'a, T: ?Sized> Pin<&'a T> { - /// Construct a new pin by mapping the interior value. + /// Constructs a new pin by mapping the interior value. /// /// For example, if you wanted to get a `Pin` of a field of something, /// you could use this to get access to that field in one line of code. + /// However, there are several gotchas with these "pinning projections"; + /// see the [`pin` module] documentation for further details on that topic. /// /// # Safety /// @@ -199,38 +474,50 @@ impl<'a, T: ?Sized> Pin<&'a T> { /// will not move so long as the argument value does not move (for example, /// because it is one of the fields of that value), and also that you do /// not move out of the argument you receive to the interior function. - #[unstable(feature = "pin", issue = "49150")] - pub unsafe fn map_unchecked(this: Pin<&'a T>, func: F) -> Pin<&'a U> where + /// + /// [`pin` module]: ../../std/pin/index.html#projections-and-structural-pinning + #[stable(feature = "pin", since = "1.33.0")] + pub unsafe fn map_unchecked(self: Pin<&'a T>, func: F) -> Pin<&'a U> where F: FnOnce(&T) -> &U, { - let pointer = &*this.pointer; + let pointer = &*self.pointer; let new_pointer = func(pointer); Pin::new_unchecked(new_pointer) } - /// Get a shared reference out of a pin. + /// Gets a shared reference out of a pin. + /// + /// This is safe because it is not possible to move out of a shared reference. + /// It may seem like there is an issue here with interior mutability: in fact, + /// it *is* possible to move a `T` out of a `&RefCell`. However, this is + /// not a problem as long as there does not also exist a `Pin<&T>` pointing + /// to the same data, and `RefCell` does not let you create a pinned reference + /// to its contents. See the discussion on ["pinning projections"] for further + /// details. /// /// Note: `Pin` also implements `Deref` to the target, which can be used /// to access the inner value. However, `Deref` only provides a reference /// that lives for as long as the borrow of the `Pin`, not the lifetime of /// the `Pin` itself. This method allows turning the `Pin` into a reference /// with the same lifetime as the original `Pin`. - #[unstable(feature = "pin", issue = "49150")] + /// + /// ["pinning projections"]: ../../std/pin/index.html#projections-and-structural-pinning + #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] - pub fn get_ref(this: Pin<&'a T>) -> &'a T { - this.pointer + pub fn get_ref(self: Pin<&'a T>) -> &'a T { + self.pointer } } impl<'a, T: ?Sized> Pin<&'a mut T> { - /// Convert this `Pin<&mut T>` into a `Pin<&T>` with the same lifetime. - #[unstable(feature = "pin", issue = "49150")] + /// Converts this `Pin<&mut T>` into a `Pin<&T>` with the same lifetime. + #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] - pub fn into_ref(this: Pin<&'a mut T>) -> Pin<&'a T> { - Pin { pointer: this.pointer } + pub fn into_ref(self: Pin<&'a mut T>) -> Pin<&'a T> { + Pin { pointer: self.pointer } } - /// Get a mutable reference to the data inside of this `Pin`. + /// Gets a mutable reference to the data inside of this `Pin`. /// /// This requires that the data inside this `Pin` is `Unpin`. /// @@ -239,15 +526,15 @@ impl<'a, T: ?Sized> Pin<&'a mut T> { /// that lives for as long as the borrow of the `Pin`, not the lifetime of /// the `Pin` itself. This method allows turning the `Pin` into a reference /// with the same lifetime as the original `Pin`. - #[unstable(feature = "pin", issue = "49150")] + #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] - pub fn get_mut(this: Pin<&'a mut T>) -> &'a mut T + pub fn get_mut(self: Pin<&'a mut T>) -> &'a mut T where T: Unpin, { - this.pointer + self.pointer } - /// Get a mutable reference to the data inside of this `Pin`. + /// Gets a mutable reference to the data inside of this `Pin`. /// /// # Safety /// @@ -257,16 +544,18 @@ impl<'a, T: ?Sized> Pin<&'a mut T> { /// /// If the underlying data is `Unpin`, `Pin::get_mut` should be used /// instead. - #[unstable(feature = "pin", issue = "49150")] + #[stable(feature = "pin", since = "1.33.0")] #[inline(always)] - pub unsafe fn get_mut_unchecked(this: Pin<&'a mut T>) -> &'a mut T { - this.pointer + pub unsafe fn get_unchecked_mut(self: Pin<&'a mut T>) -> &'a mut T { + self.pointer } /// Construct a new pin by mapping the interior value. /// /// For example, if you wanted to get a `Pin` of a field of something, /// you could use this to get access to that field in one line of code. + /// However, there are several gotchas with these "pinning projections"; + /// see the [`pin` module] documentation for further details on that topic. /// /// # Safety /// @@ -274,17 +563,19 @@ impl<'a, T: ?Sized> Pin<&'a mut T> { /// will not move so long as the argument value does not move (for example, /// because it is one of the fields of that value), and also that you do /// not move out of the argument you receive to the interior function. - #[unstable(feature = "pin", issue = "49150")] - pub unsafe fn map_unchecked_mut(this: Pin<&'a mut T>, func: F) -> Pin<&'a mut U> where + /// + /// [`pin` module]: ../../std/pin/index.html#projections-and-structural-pinning + #[stable(feature = "pin", since = "1.33.0")] + pub unsafe fn map_unchecked_mut(self: Pin<&'a mut T>, func: F) -> Pin<&'a mut U> where F: FnOnce(&mut T) -> &mut U, { - let pointer = Pin::get_mut_unchecked(this); + let pointer = Pin::get_unchecked_mut(self); let new_pointer = func(pointer); Pin::new_unchecked(new_pointer) } } -#[unstable(feature = "pin", issue = "49150")] +#[stable(feature = "pin", since = "1.33.0")] impl Deref for Pin

{ type Target = P::Target; fn deref(&self) -> &P::Target { @@ -292,7 +583,7 @@ impl Deref for Pin

{ } } -#[unstable(feature = "pin", issue = "49150")] +#[stable(feature = "pin", since = "1.33.0")] impl DerefMut for Pin

where P::Target: Unpin @@ -302,21 +593,24 @@ where } } -#[unstable(feature = "pin", issue = "49150")] +#[unstable(feature = "receiver_trait", issue = "0")] +impl Receiver for Pin

{} + +#[stable(feature = "pin", since = "1.33.0")] impl fmt::Debug for Pin

{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(&self.pointer, f) } } -#[unstable(feature = "pin", issue = "49150")] +#[stable(feature = "pin", since = "1.33.0")] impl fmt::Display for Pin

{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Display::fmt(&self.pointer, f) } } -#[unstable(feature = "pin", issue = "49150")] +#[stable(feature = "pin", since = "1.33.0")] impl fmt::Pointer for Pin

{ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Pointer::fmt(&self.pointer, f) @@ -328,17 +622,14 @@ impl fmt::Pointer for Pin

{ // `Deref` is unsound. Any such impl would probably be unsound // for other reasons, though, so we just need to take care not to allow such // impls to land in std. -#[unstable(feature = "pin", issue = "49150")] +#[stable(feature = "pin", since = "1.33.0")] impl CoerceUnsized> for Pin

where P: CoerceUnsized, {} -#[unstable(feature = "pin", issue = "49150")] +#[stable(feature = "pin", since = "1.33.0")] impl<'a, P, U> DispatchFromDyn> for Pin

where P: DispatchFromDyn, {} - -#[unstable(feature = "pin", issue = "49150")] -impl

Unpin for Pin

{} diff --git a/src/libcore/prelude/mod.rs b/src/libcore/prelude/mod.rs index 99b1947c84e4f..51f4acf068580 100644 --- a/src/libcore/prelude/mod.rs +++ b/src/libcore/prelude/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The libcore prelude #![stable(feature = "core_prelude", since = "1.4.0")] diff --git a/src/libcore/prelude/v1.rs b/src/libcore/prelude/v1.rs index 45f629a64424c..b53494edbf401 100644 --- a/src/libcore/prelude/v1.rs +++ b/src/libcore/prelude/v1.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The core prelude //! //! This module is intended for users of libcore which do not link to libstd as @@ -19,7 +9,7 @@ // Re-exported core operators #[stable(feature = "core_prelude", since = "1.4.0")] #[doc(no_inline)] -pub use marker::{Copy, Send, Sized, Sync}; +pub use marker::{Copy, Send, Sized, Sync, Unpin}; #[stable(feature = "core_prelude", since = "1.4.0")] #[doc(no_inline)] pub use ops::{Drop, Fn, FnMut, FnOnce}; diff --git a/src/libcore/ptr.rs b/src/libcore/ptr.rs index b3c93ae1fa7b2..e002052bf27bf 100644 --- a/src/libcore/ptr.rs +++ b/src/libcore/ptr.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Manually manage memory through raw pointers. //! //! *[See also the pointer primitive types](../../std/primitive.pointer.html).* @@ -22,7 +12,7 @@ //! to access only a single value, in which case the documentation omits the size //! and implicitly assumes it to be `size_of::()` bytes. //! -//! The precise rules for validity are not determined yet. The guarantees that are +//! The precise rules for validity are not determined yet. The guarantees that are //! provided at this point are very minimal: //! //! * A [null] pointer is *never* valid, not even for accesses of [size zero][zst]. @@ -80,7 +70,6 @@ use fmt; use hash; use marker::{PhantomData, Unsize}; use mem::{self, MaybeUninit}; -use nonzero::NonZero; use cmp::Ordering::{self, Less, Equal, Greater}; @@ -115,7 +104,7 @@ pub use intrinsics::write_bytes; /// /// * `to_drop` must be [valid] for reads. /// -/// * `to_drop` must be properly aligned. See the example below for how to drop +/// * `to_drop` must be properly aligned. See the example below for how to drop /// an unaligned pointer. /// /// Additionally, if `T` is not [`Copy`], using the pointed-to value after @@ -146,7 +135,7 @@ pub use intrinsics::write_bytes; /// unsafe { /// // Get a raw pointer to the last element in `v`. /// let ptr = &mut v[1] as *mut _; -/// // Shorten `v` to prevent the last item from being dropped. We do that first, +/// // Shorten `v` to prevent the last item from being dropped. We do that first, /// // to prevent issues if the `drop_in_place` below panics. /// v.set_len(1); /// // Without a call `drop_in_place`, the last item would never be dropped, @@ -307,12 +296,12 @@ pub const fn null_mut() -> *mut T { 0 as *mut T } pub unsafe fn swap(x: *mut T, y: *mut T) { // Give ourselves some scratch space to work with. // We do not have to worry about drops: `MaybeUninit` does nothing when dropped. - let mut tmp = MaybeUninit::::uninitialized(); + let mut tmp = MaybeUninit::::uninit(); // Perform the swap copy_nonoverlapping(x, tmp.as_mut_ptr(), 1); copy(y, x, 1); // `x` and `y` may overlap - copy_nonoverlapping(tmp.get_ref(), y, 1); + copy_nonoverlapping(tmp.as_ptr(), y, 1); } /// Swaps `count * size_of::()` bytes between the two regions of memory @@ -399,7 +388,7 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { while i + block_size <= len { // Create some uninitialized memory as scratch space // Declaring `t` here avoids aligning the stack when this loop is unused - let mut t = mem::MaybeUninit::::uninitialized(); + let mut t = mem::MaybeUninit::::uninit(); let t = t.as_mut_ptr() as *mut u8; let x = x.add(i); let y = y.add(i); @@ -414,7 +403,7 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { if i < len { // Swap any remaining bytes - let mut t = mem::MaybeUninit::::uninitialized(); + let mut t = mem::MaybeUninit::::uninit(); let rem = len - i; let t = t.as_mut_ptr() as *mut u8; @@ -542,7 +531,7 @@ pub unsafe fn replace(dst: *mut T, mut src: T) -> T { /// /// `read` creates a bitwise copy of `T`, regardless of whether `T` is [`Copy`]. /// If `T` is not [`Copy`], using both the returned value and the value at -/// `*src` can violate memory safety. Note that assigning to `*src` counts as a +/// `*src` can violate memory safety. Note that assigning to `*src` counts as a /// use because it will attempt to drop the value at `*src`. /// /// [`write`] can be used to overwrite data without causing it to be dropped. @@ -582,9 +571,9 @@ pub unsafe fn replace(dst: *mut T, mut src: T) -> T { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub unsafe fn read(src: *const T) -> T { - let mut tmp = MaybeUninit::::uninitialized(); + let mut tmp = MaybeUninit::::uninit(); copy_nonoverlapping(src, tmp.as_mut_ptr(), 1); - tmp.into_inner() + tmp.assume_init() } /// Reads the value from `src` without moving it. This leaves the @@ -599,10 +588,10 @@ pub unsafe fn read(src: *const T) -> T { /// * `src` must be [valid] for reads. /// /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of -/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned +/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned /// value and the value at `*src` can [violate memory safety][read-ownership]. /// -/// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. +/// Note that even if `T` has size `0`, the pointer must be non-NULL. /// /// [`Copy`]: ../marker/trait.Copy.html /// [`read`]: ./fn.read.html @@ -649,11 +638,11 @@ pub unsafe fn read(src: *const T) -> T { #[inline] #[stable(feature = "ptr_unaligned", since = "1.17.0")] pub unsafe fn read_unaligned(src: *const T) -> T { - let mut tmp = MaybeUninit::::uninitialized(); + let mut tmp = MaybeUninit::::uninit(); copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::()); - tmp.into_inner() + tmp.assume_init() } /// Overwrites a memory location with the given value without reading or @@ -770,7 +759,7 @@ pub unsafe fn write(dst: *mut T, src: T) { /// /// * `dst` must be [valid] for writes. /// -/// Note that even if `T` has size `0`, the pointer must be non-NULL and properly aligned. +/// Note that even if `T` has size `0`, the pointer must be non-NULL. /// /// [valid]: ../ptr/index.html#safety /// @@ -836,7 +825,7 @@ pub unsafe fn write_unaligned(dst: *mut T, src: T) { /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types -/// (e.g., if a zero-sized type is passed to `read_volatile`) are no-ops +/// (e.g., if a zero-sized type is passed to `read_volatile`) are noops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf @@ -850,7 +839,7 @@ pub unsafe fn write_unaligned(dst: *mut T, src: T) { /// * `src` must be properly aligned. /// /// Like [`read`], `read_unaligned` creates a bitwise copy of `T`, regardless of -/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned +/// whether `T` is [`Copy`]. If `T` is not [`Copy`], using both the returned /// value and the value at `*src` can [violate memory safety][read-ownership]. /// However, storing non-[`Copy`] types in volatile memory is almost certainly /// incorrect. @@ -860,6 +849,7 @@ pub unsafe fn write_unaligned(dst: *mut T, src: T) { /// [valid]: ../ptr/index.html#safety /// [`Copy`]: ../marker/trait.Copy.html /// [`read`]: ./fn.read.html +/// [read-ownership]: ./fn.read.html#ownership-of-the-returned-value /// /// Just like in C, whether an operation is volatile has no bearing whatsoever /// on questions involving concurrent access from multiple threads. Volatile @@ -913,7 +903,7 @@ pub unsafe fn read_volatile(src: *const T) -> T { /// /// The compiler shouldn't change the relative order or number of volatile /// memory operations. However, volatile memory operations on zero-sized types -/// (e.g., if a zero-sized type is passed to `write_volatile`) are no-ops +/// (e.g., if a zero-sized type is passed to `write_volatile`) are noops /// and may be ignored. /// /// [c11]: http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf @@ -1103,7 +1093,7 @@ impl *const T { /// unless `x` and `y` point into the same allocated object. /// /// Always use `.offset(count)` instead when possible, because `offset` - /// allows the compiler to optimize better. If you need to cross object + /// allows the compiler to optimize better. If you need to cross object /// boundaries, cast the pointer to an integer and do the arithmetic there. /// /// # Examples @@ -1722,7 +1712,7 @@ impl *mut T { /// unless `x` and `y` point into the same allocated object. /// /// Always use `.offset(count)` instead when possible, because `offset` - /// allows the compiler to optimize better. If you need to cross object + /// allows the compiler to optimize better. If you need to cross object /// boundaries, cast the pointer to an integer and do the arithmetic there. /// /// # Examples @@ -2483,7 +2473,7 @@ impl PartialEq for *mut T { #[stable(feature = "rust1", since = "1.0.0")] impl Eq for *mut T {} -/// Compare raw pointers for equality. +/// Compares raw pointers for equality. /// /// This is the same as using the `==` operator, but less generic: /// the arguments have to be `*const T` raw pointers, @@ -2505,11 +2495,57 @@ impl Eq for *mut T {} /// let other_five_ref = &other_five; /// /// assert!(five_ref == same_five_ref); -/// assert!(five_ref == other_five_ref); -/// /// assert!(ptr::eq(five_ref, same_five_ref)); +/// +/// assert!(five_ref == other_five_ref); /// assert!(!ptr::eq(five_ref, other_five_ref)); /// ``` +/// +/// Slices are also compared by their length (fat pointers): +/// +/// ``` +/// let a = [1, 2, 3]; +/// assert!(std::ptr::eq(&a[..3], &a[..3])); +/// assert!(!std::ptr::eq(&a[..2], &a[..3])); +/// assert!(!std::ptr::eq(&a[0..2], &a[1..3])); +/// ``` +/// +/// Traits are also compared by their implementation: +/// +/// ``` +/// #[repr(transparent)] +/// struct Wrapper { member: i32 } +/// +/// trait Trait {} +/// impl Trait for Wrapper {} +/// impl Trait for i32 {} +/// +/// fn main() { +/// let wrapper = Wrapper { member: 10 }; +/// +/// // Pointers have equal addresses. +/// assert!(std::ptr::eq( +/// &wrapper as *const Wrapper as *const u8, +/// &wrapper.member as *const i32 as *const u8 +/// )); +/// +/// // Objects have equal addresses, but `Trait` has different implementations. +/// assert!(!std::ptr::eq( +/// &wrapper as &dyn Trait, +/// &wrapper.member as &dyn Trait, +/// )); +/// assert!(!std::ptr::eq( +/// &wrapper as &dyn Trait as *const dyn Trait, +/// &wrapper.member as &dyn Trait as *const dyn Trait, +/// )); +/// +/// // Converting the reference to a `*const u8` compares by address. +/// assert!(std::ptr::eq( +/// &wrapper as &dyn Trait as *const dyn Trait as *const u8, +/// &wrapper.member as &dyn Trait as *const dyn Trait as *const u8, +/// )); +/// } +/// ``` #[stable(feature = "ptr_eq", since = "1.17.0")] #[inline] pub fn eq(a: *const T, b: *const T) -> bool { @@ -2525,7 +2561,6 @@ pub fn eq(a: *const T, b: *const T) -> bool { /// # Examples /// /// ``` -/// #![feature(ptr_hash)] /// use std::collections::hash_map::DefaultHasher; /// use std::hash::{Hash, Hasher}; /// use std::ptr; @@ -2543,7 +2578,7 @@ pub fn eq(a: *const T, b: *const T) -> bool { /// /// assert_eq!(actual, expected); /// ``` -#[unstable(feature = "ptr_hash", reason = "newly added", issue = "56286")] +#[stable(feature = "ptr_hash", since = "1.35.0")] pub fn hash(hashee: *const T, into: &mut S) { use hash::Hash; hashee.hash(into); @@ -2589,14 +2624,14 @@ macro_rules! fnptr_impls_safety_abi { #[stable(feature = "fnptr_impls", since = "1.4.0")] impl fmt::Pointer for $FnTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Pointer::fmt(&(*self as *const ()), f) + "disabled due to avr-rust/rust#143".fmt(f) } } #[stable(feature = "fnptr_impls", since = "1.4.0")] impl fmt::Debug for $FnTy { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Pointer::fmt(&(*self as *const ()), f) + "disabled due to avr-rust/rust#143".fmt(f) } } } @@ -2728,8 +2763,9 @@ impl PartialOrd for *mut T { (if you also use #[may_dangle]), Send, and/or Sync")] #[doc(hidden)] #[repr(transparent)] +#[rustc_layout_scalar_valid_range_start(1)] pub struct Unique { - pointer: NonZero<*const T>, + pointer: *const T, // NOTE: this marker has no consequences for variance, but is necessary // for dropck to understand that we logically own a `T`. // @@ -2786,21 +2822,21 @@ impl Unique { /// /// `ptr` must be non-null. pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { - Unique { pointer: NonZero(ptr as _), _marker: PhantomData } + Unique { pointer: ptr as _, _marker: PhantomData } } /// Creates a new `Unique` if `ptr` is non-null. pub fn new(ptr: *mut T) -> Option { if !ptr.is_null() { - Some(Unique { pointer: unsafe { NonZero(ptr as _) }, _marker: PhantomData }) + Some(unsafe { Unique { pointer: ptr as _, _marker: PhantomData } }) } else { None } } /// Acquires the underlying `*mut` pointer. - pub fn as_ptr(self) -> *mut T { - self.pointer.0 as *mut T + pub const fn as_ptr(self) -> *mut T { + self.pointer as *mut T } /// Dereferences the content. @@ -2846,23 +2882,23 @@ impl fmt::Pointer for Unique { } #[unstable(feature = "ptr_internals", issue = "0")] -impl<'a, T: ?Sized> From<&'a mut T> for Unique { - fn from(reference: &'a mut T) -> Self { - Unique { pointer: unsafe { NonZero(reference as *mut T) }, _marker: PhantomData } +impl From<&mut T> for Unique { + fn from(reference: &mut T) -> Self { + unsafe { Unique { pointer: reference as *mut T, _marker: PhantomData } } } } #[unstable(feature = "ptr_internals", issue = "0")] -impl<'a, T: ?Sized> From<&'a T> for Unique { - fn from(reference: &'a T) -> Self { - Unique { pointer: unsafe { NonZero(reference as *const T) }, _marker: PhantomData } +impl From<&T> for Unique { + fn from(reference: &T) -> Self { + unsafe { Unique { pointer: reference as *const T, _marker: PhantomData } } } } #[unstable(feature = "ptr_internals", issue = "0")] impl<'a, T: ?Sized> From> for Unique { fn from(p: NonNull) -> Self { - Unique { pointer: p.pointer, _marker: PhantomData } + unsafe { Unique { pointer: p.pointer, _marker: PhantomData } } } } @@ -2878,15 +2914,27 @@ impl<'a, T: ?Sized> From> for Unique { /// However the pointer may still dangle if it isn't dereferenced. /// /// Unlike `*mut T`, `NonNull` is covariant over `T`. If this is incorrect -/// for your use case, you should include some PhantomData in your type to +/// for your use case, you should include some [`PhantomData`] in your type to /// provide invariance, such as `PhantomData>` or `PhantomData<&'a mut T>`. /// Usually this won't be necessary; covariance is correct for most safe abstractions, -/// such as Box, Rc, Arc, Vec, and LinkedList. This is the case because they +/// such as `Box`, `Rc`, `Arc`, `Vec`, and `LinkedList`. This is the case because they /// provide a public API that follows the normal shared XOR mutable rules of Rust. +/// +/// Notice that `NonNull` has a `From` instance for `&T`. However, this does +/// not change the fact that mutating through a (pointer derived from a) shared +/// reference is undefined behavior unless the mutation happens inside an +/// [`UnsafeCell`]. The same goes for creating a mutable reference from a shared +/// reference. When using this `From` instance without an `UnsafeCell`, +/// it is your responsibility to ensure that `as_mut` is never called, and `as_ptr` +/// is never used for mutation. +/// +/// [`PhantomData`]: ../marker/struct.PhantomData.html +/// [`UnsafeCell`]: ../cell/struct.UnsafeCell.html #[stable(feature = "nonnull", since = "1.25.0")] #[repr(transparent)] +#[rustc_layout_scalar_valid_range_start(1)] pub struct NonNull { - pointer: NonZero<*const T>, + pointer: *const T, } /// `NonNull` pointers are not `Send` because the data they reference may be aliased. @@ -2911,7 +2959,8 @@ impl NonNull { /// some other means. #[stable(feature = "nonnull", since = "1.25.0")] #[inline] - pub fn dangling() -> Self { + #[rustc_const_unstable(feature = "const_ptr_nonnull")] + pub const fn dangling() -> Self { unsafe { let ptr = mem::align_of::() as *mut T; NonNull::new_unchecked(ptr) @@ -2928,7 +2977,7 @@ impl NonNull { #[stable(feature = "nonnull", since = "1.25.0")] #[inline] pub const unsafe fn new_unchecked(ptr: *mut T) -> Self { - NonNull { pointer: NonZero(ptr as _) } + NonNull { pointer: ptr as _ } } /// Creates a new `NonNull` if `ptr` is non-null. @@ -2946,7 +2995,7 @@ impl NonNull { #[stable(feature = "nonnull", since = "1.25.0")] #[inline] pub const fn as_ptr(self) -> *mut T { - self.pointer.0 as *mut T + self.pointer as *mut T } /// Dereferences the content. @@ -2974,7 +3023,8 @@ impl NonNull { /// Cast to a pointer of another type #[stable(feature = "nonnull_cast", since = "1.27.0")] #[inline] - pub fn cast(self) -> NonNull { + #[rustc_const_unstable(feature = "const_ptr_nonnull")] + pub const fn cast(self) -> NonNull { unsafe { NonNull::new_unchecked(self.as_ptr() as *mut U) } @@ -3050,22 +3100,22 @@ impl hash::Hash for NonNull { impl From> for NonNull { #[inline] fn from(unique: Unique) -> Self { - NonNull { pointer: unique.pointer } + unsafe { NonNull { pointer: unique.pointer } } } } #[stable(feature = "nonnull", since = "1.25.0")] -impl<'a, T: ?Sized> From<&'a mut T> for NonNull { +impl From<&mut T> for NonNull { #[inline] - fn from(reference: &'a mut T) -> Self { - NonNull { pointer: unsafe { NonZero(reference as *mut T) } } + fn from(reference: &mut T) -> Self { + unsafe { NonNull { pointer: reference as *mut T } } } } #[stable(feature = "nonnull", since = "1.25.0")] -impl<'a, T: ?Sized> From<&'a T> for NonNull { +impl From<&T> for NonNull { #[inline] - fn from(reference: &'a T) -> Self { - NonNull { pointer: unsafe { NonZero(reference as *const T) } } + fn from(reference: &T) -> Self { + unsafe { NonNull { pointer: reference as *const T } } } } diff --git a/src/libcore/raw.rs b/src/libcore/raw.rs index 4f1af8bf110e4..155429b0e4f54 100644 --- a/src/libcore/raw.rs +++ b/src/libcore/raw.rs @@ -1,13 +1,3 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(missing_docs)] #![unstable(feature = "raw", issue = "27751")] diff --git a/src/libcore/result.rs b/src/libcore/result.rs index 3484664c24205..9b7b83689861b 100644 --- a/src/libcore/result.rs +++ b/src/libcore/result.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Error handling with the `Result` type. //! //! [`Result`][`Result`] is the type used for returning and propagating @@ -379,7 +369,7 @@ impl Result { // Adapter for working with references ///////////////////////////////////////////////////////////////////////// - /// Converts from `Result` to `Result<&T, &E>`. + /// Converts from `&Result` to `Result<&T, &E>`. /// /// Produces a new `Result`, containing a reference /// into the original, leaving the original in place. @@ -404,7 +394,7 @@ impl Result { } } - /// Converts from `Result` to `Result<&mut T, &mut E>`. + /// Converts from `&mut Result` to `Result<&mut T, &mut E>`. /// /// # Examples /// @@ -906,7 +896,7 @@ impl Result { /// /// # Examples /// - /// Convert a string to an integer, turning poorly-formed strings + /// Converts a string to an integer, turning poorly-formed strings /// into 0 (the default value for integers). [`parse`] converts /// a string to any other type that implements [`FromStr`], returning an /// [`Err`] on error. @@ -982,8 +972,6 @@ impl Result, E> { /// # Examples /// /// ``` - /// #![feature(transpose_result)] - /// /// #[derive(Debug, Eq, PartialEq)] /// struct SomeErr; /// @@ -992,7 +980,7 @@ impl Result, E> { /// assert_eq!(x.transpose(), y); /// ``` #[inline] - #[unstable(feature = "transpose_result", issue = "47338")] + #[stable(feature = "transpose_result", since = "1.33.0")] pub fn transpose(self) -> Option> { match self { Ok(Some(x)) => Some(Ok(x)), @@ -1212,8 +1200,36 @@ impl> FromIterator> for Result { /// let res: Result, &'static str> = v.iter().map(|x: &u32| /// x.checked_add(1).ok_or("Overflow!") /// ).collect(); - /// assert!(res == Ok(vec![2, 3])); + /// assert_eq!(res, Ok(vec![2, 3])); /// ``` + /// + /// Here is another example that tries to subtract one from another list + /// of integers, this time checking for underflow: + /// + /// ``` + /// let v = vec![1, 2, 0]; + /// let res: Result, &'static str> = v.iter().map(|x: &u32| + /// x.checked_sub(1).ok_or("Underflow!") + /// ).collect(); + /// assert_eq!(res, Err("Underflow!")); + /// ``` + /// + /// Here is a variation on the previous example, showing that no + /// further elements are taken from `iter` after the first `Err`. + /// + /// ``` + /// let v = vec![3, 2, 1, 10]; + /// let mut shared = 0; + /// let res: Result, &'static str> = v.iter().map(|x: &u32| { + /// shared += x; + /// x.checked_sub(2).ok_or("Underflow!") + /// }).collect(); + /// assert_eq!(res, Err("Underflow!")); + /// assert_eq!(shared, 6); + /// ``` + /// + /// Since the third element caused an underflow, no further elements were taken, + /// so the final value of `shared` is 6 (= `3 + 2 + 1`), not 16. #[inline] fn from_iter>>(iter: I) -> Result { // FIXME(#11084): This could be replaced with Iterator::scan when this diff --git a/src/libcore/slice/memchr.rs b/src/libcore/slice/memchr.rs index cf95333af9cbb..cbba546b8daba 100644 --- a/src/libcore/slice/memchr.rs +++ b/src/libcore/slice/memchr.rs @@ -1,14 +1,4 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -// -// Original implementation taken from rust-memchr +// Original implementation taken from rust-memchr. // Copyright 2015 Andrew Gallant, bluss and Nicolas Koch use cmp; @@ -17,13 +7,13 @@ use mem; const LO_U64: u64 = 0x0101010101010101; const HI_U64: u64 = 0x8080808080808080; -// use truncation +// Use truncation. const LO_USIZE: usize = LO_U64 as usize; const HI_USIZE: usize = HI_U64 as usize; -/// Return `true` if `x` contains any zero byte. +/// Returns `true` if `x` contains any zero byte. /// -/// From *Matters Computational*, J. Arndt +/// From *Matters Computational*, J. Arndt: /// /// "The idea is to subtract one from each of the bytes and then look for /// bytes where the borrow propagated all the way to the most significant @@ -45,7 +35,7 @@ fn repeat_byte(b: u8) -> usize { (b as usize) * (::usize::MAX / 255) } -/// Return the first index matching the byte `x` in `text`. +/// Returns the first index matching the byte `x` in `text`. pub fn memchr(x: u8, text: &[u8]) -> Option { // Scan for a single byte value by reading two `usize` words at a time. // @@ -86,18 +76,18 @@ pub fn memchr(x: u8, text: &[u8]) -> Option { } } - // find the byte after the point the body loop stopped + // Find the byte after the point the body loop stopped. text[offset..].iter().position(|elt| *elt == x).map(|i| offset + i) } -/// Return the last index matching the byte `x` in `text`. +/// Returns the last index matching the byte `x` in `text`. pub fn memrchr(x: u8, text: &[u8]) -> Option { // Scan for a single byte value by reading two `usize` words at a time. // - // Split `text` in three parts - // - unaligned tail, after the last word aligned address in text - // - body, scan by 2 words at a time - // - the first remaining bytes, < 2 word size + // Split `text` in three parts: + // - unaligned tail, after the last word aligned address in text, + // - body, scanned by 2 words at a time, + // - the first remaining bytes, < 2 word size. let len = text.len(); let ptr = text.as_ptr(); type Chunk = usize; @@ -114,7 +104,7 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { return Some(offset + index); } - // search the body of the text, make sure we don't cross min_aligned_offset. + // Search the body of the text, make sure we don't cross min_aligned_offset. // offset is always aligned, so just testing `>` is sufficient and avoids possible // overflow. let repeated_x = repeat_byte(x); @@ -125,7 +115,7 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { let u = *(ptr.offset(offset as isize - 2 * chunk_bytes as isize) as *const Chunk); let v = *(ptr.offset(offset as isize - chunk_bytes as isize) as *const Chunk); - // break if there is a matching byte + // Break if there is a matching byte. let zu = contains_zero_byte(u ^ repeated_x); let zv = contains_zero_byte(v ^ repeated_x); if zu || zv { @@ -135,6 +125,6 @@ pub fn memrchr(x: u8, text: &[u8]) -> Option { offset -= 2 * chunk_bytes; } - // find the byte before the point the body loop stopped + // Find the byte before the point the body loop stopped. text[..offset].iter().rposition(|elt| *elt == x) } diff --git a/src/libcore/slice/mod.rs b/src/libcore/slice/mod.rs index 59c11b273293f..122ef9c79c276 100644 --- a/src/libcore/slice/mod.rs +++ b/src/libcore/slice/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Slice management and manipulation. //! //! For more details see [`std::slice`]. @@ -44,7 +34,6 @@ use result::Result::{Ok, Err}; use ptr; use mem; use marker::{Copy, Send, Sync, Sized, self}; -use iter_private::TrustedRandomAccess; #[unstable(feature = "slice_internals", issue = "0", reason = "exposed from core to be reused in std; use the memchr crate")] @@ -877,6 +866,7 @@ impl [T] { /// assert_eq!(iter.remainder(), &['l']); /// ``` /// + /// [`chunks`]: #method.chunks /// [`rchunks`]: #method.rchunks /// [`chunks_exact`]: #method.chunks_exact #[stable(feature = "rchunks", since = "1.31.0")] @@ -921,6 +911,7 @@ impl [T] { /// assert_eq!(v, &[0, 2, 2, 1, 1]); /// ``` /// + /// [`chunks_mut`]: #method.chunks_mut /// [`rchunks_mut`]: #method.rchunks_mut /// [`chunks_exact_mut`]: #method.chunks_exact_mut #[stable(feature = "rchunks", since = "1.31.0")] @@ -1206,7 +1197,7 @@ impl [T] { /// Returns an iterator over subslices separated by elements that match /// `pred` limited to returning at most `n` items. This starts at the end of - /// the slice and works backwards. The matched element is not contained in + /// the slice and works backwards. The matched element is not contained in /// the subslices. /// /// The last element returned, if any, will contain the remainder of the @@ -1572,6 +1563,10 @@ impl [T] { /// randomization to avoid degenerate cases, but with a fixed seed to always provide /// deterministic behavior. /// + /// Due to its key calling strategy, [`sort_unstable_by_key`](#method.sort_unstable_by_key) + /// is likely to be slower than [`sort_by_cached_key`](#method.sort_by_cached_key) in + /// cases where the key function is expensive. + /// /// # Examples /// /// ``` @@ -1590,6 +1585,153 @@ impl [T] { sort::quicksort(self, |a, b| f(a).lt(&f(b))); } + /// Reorder the slice such that the element at `index` is at its final sorted position. + /// + /// This reordering has the additional property that any value at position `i < index` will be + /// less than or equal to any value at a position `j > index`. Additionally, this reordering is + /// unstable (i.e. any number of equal elements may end up at position `index`), in-place + /// (i.e. does not allocate), and `O(n)` worst-case. This function is also/ known as "kth + /// element" in other libraries. It returns a triplet of the following values: all elements less + /// than the one at the given index, the value at the given index, and all elements greater than + /// the one at the given index. + /// + /// # Current implementation + /// + /// The current algorithm is based on the quickselect portion of the same quicksort algorithm + /// used for [`sort_unstable`]. + /// + /// [`sort_unstable`]: #method.sort_unstable + /// + /// # Panics + /// + /// Panics when `index >= len()`, meaning it always panics on empty slices. + /// + /// # Examples + /// + /// ``` + /// #![feature(slice_partition_at_index)] + /// + /// let mut v = [-5i32, 4, 1, -3, 2]; + /// + /// // Find the median + /// v.partition_at_index(2); + /// + /// // We are only guaranteed the slice will be one of the following, based on the way we sort + /// // about the specified index. + /// assert!(v == [-3, -5, 1, 2, 4] || + /// v == [-5, -3, 1, 2, 4] || + /// v == [-3, -5, 1, 4, 2] || + /// v == [-5, -3, 1, 4, 2]); + /// ``` + #[unstable(feature = "slice_partition_at_index", issue = "55300")] + #[inline] + pub fn partition_at_index(&mut self, index: usize) -> (&mut [T], &mut T, &mut [T]) + where T: Ord + { + let mut f = |a: &T, b: &T| a.lt(b); + sort::partition_at_index(self, index, &mut f) + } + + /// Reorder the slice with a comparator function such that the element at `index` is at its + /// final sorted position. + /// + /// This reordering has the additional property that any value at position `i < index` will be + /// less than or equal to any value at a position `j > index` using the comparator function. + /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at + /// position `index`), in-place (i.e. does not allocate), and `O(n)` worst-case. This function + /// is also known as "kth element" in other libraries. It returns a triplet of the following + /// values: all elements less than the one at the given index, the value at the given index, + /// and all elements greater than the one at the given index, using the provided comparator + /// function. + /// + /// # Current implementation + /// + /// The current algorithm is based on the quickselect portion of the same quicksort algorithm + /// used for [`sort_unstable`]. + /// + /// [`sort_unstable`]: #method.sort_unstable + /// + /// # Panics + /// + /// Panics when `index >= len()`, meaning it always panics on empty slices. + /// + /// # Examples + /// + /// ``` + /// #![feature(slice_partition_at_index)] + /// + /// let mut v = [-5i32, 4, 1, -3, 2]; + /// + /// // Find the median as if the slice were sorted in descending order. + /// v.partition_at_index_by(2, |a, b| b.cmp(a)); + /// + /// // We are only guaranteed the slice will be one of the following, based on the way we sort + /// // about the specified index. + /// assert!(v == [2, 4, 1, -5, -3] || + /// v == [2, 4, 1, -3, -5] || + /// v == [4, 2, 1, -5, -3] || + /// v == [4, 2, 1, -3, -5]); + /// ``` + #[unstable(feature = "slice_partition_at_index", issue = "55300")] + #[inline] + pub fn partition_at_index_by(&mut self, index: usize, mut compare: F) + -> (&mut [T], &mut T, &mut [T]) + where F: FnMut(&T, &T) -> Ordering + { + let mut f = |a: &T, b: &T| compare(a, b) == Less; + sort::partition_at_index(self, index, &mut f) + } + + /// Reorder the slice with a key extraction function such that the element at `index` is at its + /// final sorted position. + /// + /// This reordering has the additional property that any value at position `i < index` will be + /// less than or equal to any value at a position `j > index` using the key extraction function. + /// Additionally, this reordering is unstable (i.e. any number of equal elements may end up at + /// position `index`), in-place (i.e. does not allocate), and `O(n)` worst-case. This function + /// is also known as "kth element" in other libraries. It returns a triplet of the following + /// values: all elements less than the one at the given index, the value at the given index, and + /// all elements greater than the one at the given index, using the provided key extraction + /// function. + /// + /// # Current implementation + /// + /// The current algorithm is based on the quickselect portion of the same quicksort algorithm + /// used for [`sort_unstable`]. + /// + /// [`sort_unstable`]: #method.sort_unstable + /// + /// # Panics + /// + /// Panics when `index >= len()`, meaning it always panics on empty slices. + /// + /// # Examples + /// + /// ``` + /// #![feature(slice_partition_at_index)] + /// + /// let mut v = [-5i32, 4, 1, -3, 2]; + /// + /// // Return the median as if the array were sorted according to absolute value. + /// v.partition_at_index_by_key(2, |a| a.abs()); + /// + /// // We are only guaranteed the slice will be one of the following, based on the way we sort + /// // about the specified index. + /// assert!(v == [1, 2, -3, 4, -5] || + /// v == [1, 2, -3, -5, 4] || + /// v == [2, 1, -3, 4, -5] || + /// v == [2, 1, -3, -5, 4]); + /// ``` + #[unstable(feature = "slice_partition_at_index", issue = "55300")] + #[inline] + pub fn partition_at_index_by_key(&mut self, index: usize, mut f: F) + -> (&mut [T], &mut T, &mut [T]) + where F: FnMut(&T) -> K, K: Ord + { + let mut g = |a: &T, b: &T| f(a).lt(&f(b)); + sort::partition_at_index(self, index, &mut g) + } + /// Moves all consecutive repeated elements to the end of the slice according to the /// [`PartialEq`] trait implementation. /// @@ -1791,7 +1933,7 @@ impl [T] { /// let mut a = ['a', 'b', 'c', 'd', 'e', 'f']; /// a[1..5].rotate_left(1); /// assert_eq!(a, ['a', 'c', 'd', 'e', 'b', 'f']); - /// ``` + /// ``` #[stable(feature = "slice_rotate", since = "1.26.0")] pub fn rotate_left(&mut self, mid: usize) { assert!(mid <= self.len()); @@ -2163,7 +2305,7 @@ impl [T] { /// This method has no purpose when either input element `T` or output element `U` are /// zero-sized and will return the original slice without splitting anything. /// - /// # Unsafety + /// # Safety /// /// This method is essentially a `transmute` with respect to the elements in the returned /// middle slice, so all the usual caveats pertaining to `transmute::` also apply here. @@ -2216,7 +2358,7 @@ impl [T] { /// This method has no purpose when either input element `T` or output element `U` are /// zero-sized and will return the original slice without splitting anything. /// - /// # Unsafety + /// # Safety /// /// This method is essentially a `transmute` with respect to the elements in the returned /// middle slice, so all the usual caveats pertaining to `transmute::` also apply here. @@ -2258,6 +2400,77 @@ impl [T] { from_raw_parts_mut(mut_ptr.add(rest.len() - ts_len), ts_len)) } } + + /// Checks if the elements of this slice are sorted. + /// + /// That is, for each element `a` and its following element `b`, `a <= b` must hold. If the + /// slice yields exactly zero or one element, `true` is returned. + /// + /// Note that if `Self::Item` is only `PartialOrd`, but not `Ord`, the above definition + /// implies that this function returns `false` if any two consecutive items are not + /// comparable. + /// + /// # Examples + /// + /// ``` + /// #![feature(is_sorted)] + /// let empty: [i32; 0] = []; + /// + /// assert!([1, 2, 2, 9].is_sorted()); + /// assert!(![1, 3, 2, 4].is_sorted()); + /// assert!([0].is_sorted()); + /// assert!(empty.is_sorted()); + /// assert!(![0.0, 1.0, std::f32::NAN].is_sorted()); + /// ``` + #[inline] + #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] + pub fn is_sorted(&self) -> bool + where + T: PartialOrd, + { + self.is_sorted_by(|a, b| a.partial_cmp(b)) + } + + /// Checks if the elements of this slice are sorted using the given comparator function. + /// + /// Instead of using `PartialOrd::partial_cmp`, this function uses the given `compare` + /// function to determine the ordering of two elements. Apart from that, it's equivalent to + /// [`is_sorted`]; see its documentation for more information. + /// + /// [`is_sorted`]: #method.is_sorted + #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] + pub fn is_sorted_by(&self, mut compare: F) -> bool + where + F: FnMut(&T, &T) -> Option + { + self.iter().is_sorted_by(|a, b| compare(*a, *b)) + } + + /// Checks if the elements of this slice are sorted using the given key extraction function. + /// + /// Instead of comparing the slice's elements directly, this function compares the keys of the + /// elements, as determined by `f`. Apart from that, it's equivalent to [`is_sorted`]; see its + /// documentation for more information. + /// + /// [`is_sorted`]: #method.is_sorted + /// + /// # Examples + /// + /// ``` + /// #![feature(is_sorted)] + /// + /// assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len())); + /// assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs())); + /// ``` + #[inline] + #[unstable(feature = "is_sorted", reason = "new API", issue = "53485")] + pub fn is_sorted_by_key(&self, mut f: F) -> bool + where + F: FnMut(&T) -> K, + K: PartialOrd + { + self.is_sorted_by(|a, b| f(a).partial_cmp(&f(b))) + } } #[lang = "slice_u8"] @@ -2320,7 +2533,6 @@ impl [u8] { } #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "slice indices are of type `usize` or ranges of `usize`"] impl ops::Index for [T] where I: SliceIndex<[T]> { @@ -2333,7 +2545,6 @@ impl ops::Index for [T] } #[stable(feature = "rust1", since = "1.0.0")] -#[rustc_on_unimplemented = "slice indices are of type `usize` or ranges of `usize`"] impl ops::IndexMut for [T] where I: SliceIndex<[T]> { @@ -2384,7 +2595,19 @@ mod private_slice_index { /// A helper trait used for indexing operations. #[stable(feature = "slice_get_slice", since = "1.28.0")] -#[rustc_on_unimplemented = "slice indices are of type `usize` or ranges of `usize`"] +#[rustc_on_unimplemented( + on( + T = "str", + label = "string indices are ranges of `usize`", + ), + on( + all(any(T = "str", T = "&str", T = "std::string::String"), _Self="{integer}"), + note="you can use `.chars().nth()` or `.bytes().nth()` +see chapter in The Book " + ), + message = "the type `{T}` cannot be indexed by `{Self}`", + label = "slice indices are of type `usize` or ranges of `usize`", +)] pub trait SliceIndex: private_slice_index::Sealed { /// The output type returned by methods. #[stable(feature = "slice_get_slice", since = "1.28.0")] @@ -2781,7 +3004,13 @@ macro_rules! len { // The shared definition of the `Iter` and `IterMut` iterators macro_rules! iterator { - (struct $name:ident -> $ptr:ty, $elem:ty, $raw_mut:tt, $( $mut_:tt )*) => { + ( + struct $name:ident -> $ptr:ty, + $elem:ty, + $raw_mut:tt, + {$( $mut_:tt )*}, + {$($extra:tt)*} + ) => { impl<'a, T> $name<'a, T> { // Helper function for creating a slice from the iterator. #[inline(always)] @@ -2821,7 +3050,7 @@ macro_rules! iterator { } #[stable(feature = "rust1", since = "1.0.0")] - impl<'a, T> ExactSizeIterator for $name<'a, T> { + impl ExactSizeIterator for $name<'_, T> { #[inline(always)] fn len(&self) -> usize { len!(self) @@ -2958,6 +3187,8 @@ macro_rules! iterator { i }) } + + $($extra)* } #[stable(feature = "rust1", since = "1.0.0")] @@ -3014,10 +3245,10 @@ macro_rules! iterator { } #[stable(feature = "fused", since = "1.26.0")] - impl<'a, T> FusedIterator for $name<'a, T> {} + impl FusedIterator for $name<'_, T> {} #[unstable(feature = "trusted_len", issue = "37572")] - unsafe impl<'a, T> TrustedLen for $name<'a, T> {} + unsafe impl TrustedLen for $name<'_, T> {} } } @@ -3065,7 +3296,7 @@ unsafe impl Sync for Iter<'_, T> {} unsafe impl Send for Iter<'_, T> {} impl<'a, T> Iter<'a, T> { - /// View the underlying data as a subslice of the original data. + /// Views the underlying data as a subslice of the original data. /// /// This has the same lifetime as the original slice, and so the /// iterator can continue to be used while this exists. @@ -3095,7 +3326,17 @@ impl<'a, T> Iter<'a, T> { } } -iterator!{struct Iter -> *const T, &'a T, const, /* no mut */} +iterator!{struct Iter -> *const T, &'a T, const, {/* no mut */}, { + fn is_sorted_by(self, mut compare: F) -> bool + where + Self: Sized, + F: FnMut(&Self::Item, &Self::Item) -> Option, + { + self.as_slice().windows(2).all(|w| { + compare(&&w[0], &&w[1]).map(|o| o != Ordering::Greater).unwrap_or(false) + }) + } +}} #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Iter<'_, T> { @@ -3157,7 +3398,7 @@ unsafe impl Sync for IterMut<'_, T> {} unsafe impl Send for IterMut<'_, T> {} impl<'a, T> IterMut<'a, T> { - /// View the underlying data as a subslice of the original data. + /// Views the underlying data as a subslice of the original data. /// /// To avoid creating `&mut` references that alias, this is forced /// to consume the iterator. @@ -3194,9 +3435,37 @@ impl<'a, T> IterMut<'a, T> { pub fn into_slice(self) -> &'a mut [T] { unsafe { from_raw_parts_mut(self.ptr, len!(self)) } } + + /// Views the underlying data as a subslice of the original data. + /// + /// To avoid creating `&mut [T]` references that alias, the returned slice + /// borrows its lifetime from the iterator the method is applied on. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// # #![feature(slice_iter_mut_as_slice)] + /// let mut slice: &mut [usize] = &mut [1, 2, 3]; + /// + /// // First, we get the iterator: + /// let mut iter = slice.iter_mut(); + /// // So if we check what the `as_slice` method returns here, we have "[1, 2, 3]": + /// assert_eq!(iter.as_slice(), &[1, 2, 3]); + /// + /// // Next, we move to the second element of the slice: + /// iter.next(); + /// // Now `as_slice` returns "[2, 3]": + /// assert_eq!(iter.as_slice(), &[2, 3]); + /// ``` + #[unstable(feature = "slice_iter_mut_as_slice", reason = "recently added", issue = "58957")] + pub fn as_slice(&self) -> &[T] { + self.make_slice() + } } -iterator!{struct IterMut -> *mut T, &'a mut T, mut, mut} +iterator!{struct IterMut -> *mut T, &'a mut T, mut, {mut}, {}} /// An internal abstraction over the splitting iterators, so that /// splitn, splitn_mut etc can be implemented once. @@ -3745,6 +4014,19 @@ impl<'a, T> DoubleEndedIterator for Windows<'a, T> { ret } } + + #[inline] + fn nth_back(&mut self, n: usize) -> Option { + let (end, overflow) = self.v.len().overflowing_sub(n); + if end < self.size || overflow { + self.v = &[]; + None + } else { + let ret = &self.v[end-self.size..end]; + self.v = &self.v[..end-1]; + Some(ret) + } + } } #[stable(feature = "rust1", since = "1.0.0")] @@ -4033,7 +4315,7 @@ pub struct ChunksExact<'a, T:'a> { } impl<'a, T> ChunksExact<'a, T> { - /// Return the remainder of the original slice that is not going to be + /// Returns the remainder of the original slice that is not going to be /// returned by the iterator. The returned slice has at most `chunk_size-1` /// elements. #[stable(feature = "chunks_exact", since = "1.31.0")] @@ -4157,7 +4439,7 @@ pub struct ChunksExactMut<'a, T:'a> { } impl<'a, T> ChunksExactMut<'a, T> { - /// Return the remainder of the original slice that is not going to be + /// Returns the remainder of the original slice that is not going to be /// returned by the iterator. The returned slice has at most `chunk_size-1` /// elements. #[stable(feature = "chunks_exact", since = "1.31.0")] @@ -4271,8 +4553,8 @@ pub struct RChunks<'a, T:'a> { // FIXME(#26925) Remove in favor of `#[derive(Clone)]` #[stable(feature = "rchunks", since = "1.31.0")] -impl<'a, T> Clone for RChunks<'a, T> { - fn clone(&self) -> RChunks<'a, T> { +impl Clone for RChunks<'_, T> { + fn clone(&self) -> Self { RChunks { v: self.v, chunk_size: self.chunk_size, @@ -4361,13 +4643,13 @@ impl<'a, T> DoubleEndedIterator for RChunks<'a, T> { } #[stable(feature = "rchunks", since = "1.31.0")] -impl<'a, T> ExactSizeIterator for RChunks<'a, T> {} +impl ExactSizeIterator for RChunks<'_, T> {} #[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl<'a, T> TrustedLen for RChunks<'a, T> {} +unsafe impl TrustedLen for RChunks<'_, T> {} #[stable(feature = "rchunks", since = "1.31.0")] -impl<'a, T> FusedIterator for RChunks<'a, T> {} +impl FusedIterator for RChunks<'_, T> {} #[doc(hidden)] #[stable(feature = "rchunks", since = "1.31.0")] @@ -4486,13 +4768,13 @@ impl<'a, T> DoubleEndedIterator for RChunksMut<'a, T> { } #[stable(feature = "rchunks", since = "1.31.0")] -impl<'a, T> ExactSizeIterator for RChunksMut<'a, T> {} +impl ExactSizeIterator for RChunksMut<'_, T> {} #[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl<'a, T> TrustedLen for RChunksMut<'a, T> {} +unsafe impl TrustedLen for RChunksMut<'_, T> {} #[stable(feature = "rchunks", since = "1.31.0")] -impl<'a, T> FusedIterator for RChunksMut<'a, T> {} +impl FusedIterator for RChunksMut<'_, T> {} #[doc(hidden)] #[stable(feature = "rchunks", since = "1.31.0")] @@ -4529,7 +4811,7 @@ pub struct RChunksExact<'a, T:'a> { } impl<'a, T> RChunksExact<'a, T> { - /// Return the remainder of the original slice that is not going to be + /// Returns the remainder of the original slice that is not going to be /// returned by the iterator. The returned slice has at most `chunk_size-1` /// elements. #[stable(feature = "rchunks", since = "1.31.0")] @@ -4617,10 +4899,10 @@ impl<'a, T> ExactSizeIterator for RChunksExact<'a, T> { } #[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl<'a, T> TrustedLen for RChunksExact<'a, T> {} +unsafe impl TrustedLen for RChunksExact<'_, T> {} #[stable(feature = "rchunks", since = "1.31.0")] -impl<'a, T> FusedIterator for RChunksExact<'a, T> {} +impl FusedIterator for RChunksExact<'_, T> {} #[doc(hidden)] #[stable(feature = "rchunks", since = "1.31.0")] @@ -4654,7 +4936,7 @@ pub struct RChunksExactMut<'a, T:'a> { } impl<'a, T> RChunksExactMut<'a, T> { - /// Return the remainder of the original slice that is not going to be + /// Returns the remainder of the original slice that is not going to be /// returned by the iterator. The returned slice has at most `chunk_size-1` /// elements. #[stable(feature = "rchunks", since = "1.31.0")] @@ -4728,17 +5010,17 @@ impl<'a, T> DoubleEndedIterator for RChunksExactMut<'a, T> { } #[stable(feature = "rchunks", since = "1.31.0")] -impl<'a, T> ExactSizeIterator for RChunksExactMut<'a, T> { +impl ExactSizeIterator for RChunksExactMut<'_, T> { fn is_empty(&self) -> bool { self.v.is_empty() } } #[unstable(feature = "trusted_len", issue = "37572")] -unsafe impl<'a, T> TrustedLen for RChunksExactMut<'a, T> {} +unsafe impl TrustedLen for RChunksExactMut<'_, T> {} #[stable(feature = "rchunks", since = "1.31.0")] -impl<'a, T> FusedIterator for RChunksExactMut<'a, T> {} +impl FusedIterator for RChunksExactMut<'_, T> {} #[doc(hidden)] #[stable(feature = "rchunks", since = "1.31.0")] diff --git a/src/libcore/slice/rotate.rs b/src/libcore/slice/rotate.rs index 07153735300b8..8f10c3576a787 100644 --- a/src/libcore/slice/rotate.rs +++ b/src/libcore/slice/rotate.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use cmp; use mem::{self, MaybeUninit}; use ptr; @@ -36,7 +26,7 @@ impl RawArray { } /// Rotates the range `[mid-left, mid+right)` such that the element at `mid` -/// becomes the first element. Equivalently, rotates the range `left` +/// becomes the first element. Equivalently, rotates the range `left` /// elements to the left or `right` elements to the right. /// /// # Safety @@ -46,10 +36,10 @@ impl RawArray { /// # Algorithm /// /// For longer rotations, swap the left-most `delta = min(left, right)` -/// elements with the right-most `delta` elements. LLVM vectorizes this, +/// elements with the right-most `delta` elements. LLVM vectorizes this, /// which is profitable as we only reach this step for a "large enough" -/// rotation. Doing this puts `delta` elements on the larger side into the -/// correct position, leaving a smaller rotate problem. Demonstration: +/// rotation. Doing this puts `delta` elements on the larger side into the +/// correct position, leaving a smaller rotate problem. Demonstration: /// /// ```text /// [ 6 7 8 9 10 11 12 13 . 1 2 3 4 5 ] @@ -82,7 +72,7 @@ pub unsafe fn ptr_rotate(mut left: usize, mid: *mut T, mut right: usize) { } } - let mut rawarray = MaybeUninit::>::uninitialized(); + let mut rawarray = MaybeUninit::>::uninit(); let buf = &mut (*rawarray.as_mut_ptr()).typed as *mut [T; 2] as *mut T; let dim = mid.sub(left).add(right); diff --git a/src/libcore/slice/sort.rs b/src/libcore/slice/sort.rs index affe84fbef91f..68f1fb4b526ad 100644 --- a/src/libcore/slice/sort.rs +++ b/src/libcore/slice/sort.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Slice sorting //! //! This module contains an sort algorithm based on Orson Peters' pattern-defeating quicksort, @@ -226,14 +216,14 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize let mut block_l = BLOCK; let mut start_l = ptr::null_mut(); let mut end_l = ptr::null_mut(); - let mut offsets_l = MaybeUninit::<[u8; BLOCK]>::uninitialized(); + let mut offsets_l: [MaybeUninit; BLOCK] = uninitialized_array![u8; BLOCK]; // The current block on the right side (from `r.sub(block_r)` to `r`). let mut r = unsafe { l.add(v.len()) }; let mut block_r = BLOCK; let mut start_r = ptr::null_mut(); let mut end_r = ptr::null_mut(); - let mut offsets_r = MaybeUninit::<[u8; BLOCK]>::uninitialized(); + let mut offsets_r: [MaybeUninit; BLOCK] = uninitialized_array![u8; BLOCK]; // FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather // than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient. @@ -272,8 +262,8 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize if start_l == end_l { // Trace `block_l` elements from the left side. - start_l = offsets_l.as_mut_ptr() as *mut u8; - end_l = offsets_l.as_mut_ptr() as *mut u8; + start_l = MaybeUninit::first_ptr_mut(&mut offsets_l); + end_l = MaybeUninit::first_ptr_mut(&mut offsets_l); let mut elem = l; for i in 0..block_l { @@ -288,8 +278,8 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize if start_r == end_r { // Trace `block_r` elements from the right side. - start_r = offsets_r.as_mut_ptr() as *mut u8; - end_r = offsets_r.as_mut_ptr() as *mut u8; + start_r = MaybeUninit::first_ptr_mut(&mut offsets_r); + end_r = MaybeUninit::first_ptr_mut(&mut offsets_r); let mut elem = r; for i in 0..block_r { @@ -701,3 +691,92 @@ pub fn quicksort(v: &mut [T], mut is_less: F) recurse(v, &mut is_less, None, limit); } + +fn partition_at_index_loop<'a, T, F>( mut v: &'a mut [T], mut index: usize, is_less: &mut F + , mut pred: Option<&'a T>) where F: FnMut(&T, &T) -> bool +{ + loop { + // For slices of up to this length it's probably faster to simply sort them. + const MAX_INSERTION: usize = 10; + if v.len() <= MAX_INSERTION { + insertion_sort(v, is_less); + return; + } + + // Choose a pivot + let (pivot, _) = choose_pivot(v, is_less); + + // If the chosen pivot is equal to the predecessor, then it's the smallest element in the + // slice. Partition the slice into elements equal to and elements greater than the pivot. + // This case is usually hit when the slice contains many duplicate elements. + if let Some(p) = pred { + if !is_less(p, &v[pivot]) { + let mid = partition_equal(v, pivot, is_less); + + // If we've passed our index, then we're good. + if mid > index { + return; + } + + // Otherwise, continue sorting elements greater than the pivot. + v = &mut v[mid..]; + index = index - mid; + pred = None; + continue; + } + } + + let (mid, _) = partition(v, pivot, is_less); + + // Split the slice into `left`, `pivot`, and `right`. + let (left, right) = {v}.split_at_mut(mid); + let (pivot, right) = right.split_at_mut(1); + let pivot = &pivot[0]; + + if mid < index { + v = right; + index = index - mid - 1; + pred = Some(pivot); + } else if mid > index { + v = left; + } else { + // If mid == index, then we're done, since partition() guaranteed that all elements + // after mid are greater than or equal to mid. + return; + } + } +} + +pub fn partition_at_index(v: &mut [T], index: usize, mut is_less: F) + -> (&mut [T], &mut T, &mut [T]) where F: FnMut(&T, &T) -> bool +{ + use cmp::Ordering::Less; + use cmp::Ordering::Greater; + + if index >= v.len() { + panic!("partition_at_index index {} greater than length of slice {}", index, v.len()); + } + + if mem::size_of::() == 0 { + // Sorting has no meaningful behavior on zero-sized types. Do nothing. + } else if index == v.len() - 1 { + // Find max element and place it in the last position of the array. We're free to use + // `unwrap()` here because we know v must not be empty. + let (max_index, _) = v.iter().enumerate().max_by( + |&(_, x), &(_, y)| if is_less(x, y) { Less } else { Greater }).unwrap(); + v.swap(max_index, index); + } else if index == 0 { + // Find min element and place it in the first position of the array. We're free to use + // `unwrap()` here because we know v must not be empty. + let (min_index, _) = v.iter().enumerate().min_by( + |&(_, x), &(_, y)| if is_less(x, y) { Less } else { Greater }).unwrap(); + v.swap(min_index, index); + } else { + partition_at_index_loop(v, index, &mut is_less, None); + } + + let (left, right) = v.split_at_mut(index); + let (pivot, right) = right.split_at_mut(1); + let pivot = &mut pivot[0]; + (left, pivot, right) +} diff --git a/src/libcore/str/lossy.rs b/src/libcore/str/lossy.rs index 52abd8f99529b..b3e8527c4ae0b 100644 --- a/src/libcore/str/lossy.rs +++ b/src/libcore/str/lossy.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use char; use str as core_str; use fmt; diff --git a/src/libcore/str/mod.rs b/src/libcore/str/mod.rs index 4a22d929fede0..f54d7badc3ae0 100644 --- a/src/libcore/str/mod.rs +++ b/src/libcore/str/mod.rs @@ -1,16 +1,6 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! String manipulation +//! String manipulation. //! -//! For more details, see std::str +//! For more details, see the `std::str` module. #![stable(feature = "rust1", since = "1.0.0")] @@ -18,11 +8,13 @@ use self::pattern::Pattern; use self::pattern::{Searcher, ReverseSearcher, DoubleEndedSearcher}; use char; -use fmt; -use iter::{Map, Cloned, FusedIterator, TrustedLen, Filter}; -use iter_private::TrustedRandomAccess; +use fmt::{self, Write}; +use iter::{Map, Cloned, FusedIterator, TrustedLen, TrustedRandomAccess, Filter}; +use iter::{Flatten, FlatMap, Chain}; use slice::{self, SliceIndex, Split as SliceSplit}; use mem; +use ops::Try; +use option; pub mod pattern; @@ -30,8 +22,7 @@ pub mod pattern; #[allow(missing_docs)] pub mod lossy; -/// A trait to abstract the idea of creating a new instance of a type from a -/// string. +/// Parse a value from a string /// /// `FromStr`'s [`from_str`] method is often used implicitly, through /// [`str`]'s [`parse`] method. See [`parse`]'s documentation for examples. @@ -40,6 +31,11 @@ pub mod lossy; /// [`str`]: ../../std/primitive.str.html /// [`parse`]: ../../std/primitive.str.html#method.parse /// +/// `FromStr` does not have a lifetime parameter, and so you can only parse types +/// that do not contain a lifetime parameter themselves. In other words, you can +/// parse an `i32` with `FromStr`, but not a `&i32`. You can parse a struct that +/// contains an `i32`, but not one that contains an `&i32`. +/// /// # Examples /// /// Basic implementation of `FromStr` on an example `Point` type: @@ -233,7 +229,7 @@ impl Utf8Error { #[stable(feature = "utf8_error", since = "1.5.0")] pub fn valid_up_to(&self) -> usize { self.valid_up_to } - /// Provide more information about the failure: + /// Provides more information about the failure: /// /// * `None`: the end of the input was reached unexpectedly. /// `self.valid_up_to()` is 1 to 3 bytes from the end of the input. @@ -619,7 +615,7 @@ impl<'a> DoubleEndedIterator for Chars<'a> { impl FusedIterator for Chars<'_> {} impl<'a> Chars<'a> { - /// View the underlying data as a subslice of the original data. + /// Views the underlying data as a subslice of the original data. /// /// This has the same lifetime as the original slice, and so the /// iterator can continue to be used while this exists. @@ -709,7 +705,7 @@ impl<'a> DoubleEndedIterator for CharIndices<'a> { impl FusedIterator for CharIndices<'_> {} impl<'a> CharIndices<'a> { - /// View the underlying data as a subslice of the original data. + /// Views the underlying data as a subslice of the original data. /// /// This has the same lifetime as the original slice, and so the /// iterator can continue to be used while this exists. @@ -827,7 +823,7 @@ impl FusedIterator for Bytes<'_> {} unsafe impl TrustedLen for Bytes<'_> {} #[doc(hidden)] -unsafe impl<'a> TrustedRandomAccess for Bytes<'a> { +unsafe impl TrustedRandomAccess for Bytes<'_> { unsafe fn get_unchecked(&mut self, i: usize) -> u8 { self.0.get_unchecked(i) } @@ -1352,33 +1348,14 @@ impl FusedIterator for Lines<'_> {} #[allow(deprecated)] pub struct LinesAny<'a>(Lines<'a>); -/// A nameable, cloneable fn type -#[derive(Clone)] -struct LinesAnyMap; - -impl<'a> Fn<(&'a str,)> for LinesAnyMap { - #[inline] - extern "rust-call" fn call(&self, (line,): (&'a str,)) -> &'a str { +impl_fn_for_zst! { + /// A nameable, cloneable fn type + #[derive(Clone)] + struct LinesAnyMap impl<'a> Fn = |line: &'a str| -> &'a str { let l = line.len(); if l > 0 && line.as_bytes()[l - 1] == b'\r' { &line[0 .. l - 1] } else { line } - } -} - -impl<'a> FnMut<(&'a str,)> for LinesAnyMap { - #[inline] - extern "rust-call" fn call_mut(&mut self, (line,): (&'a str,)) -> &'a str { - Fn::call(&*self, (line,)) - } -} - -impl<'a> FnOnce<(&'a str,)> for LinesAnyMap { - type Output = &'a str; - - #[inline] - extern "rust-call" fn call_once(self, (line,): (&'a str,)) -> &'a str { - Fn::call(&self, (line,)) - } + }; } #[stable(feature = "rust1", since = "1.0.0")] @@ -1586,9 +1563,9 @@ mod traits { /// Implements ordering of strings. /// - /// Strings are ordered lexicographically by their byte values. This orders Unicode code - /// points based on their positions in the code charts. This is not necessarily the same as - /// "alphabetical" order, which varies by language and locale. Sorting strings according to + /// Strings are ordered lexicographically by their byte values. This orders Unicode code + /// points based on their positions in the code charts. This is not necessarily the same as + /// "alphabetical" order, which varies by language and locale. Sorting strings according to /// culturally-accepted standards requires locale-specific data that is outside the scope of /// the `str` type. #[stable(feature = "rust1", since = "1.0.0")] @@ -1614,9 +1591,9 @@ mod traits { /// Implements comparison operations on strings. /// - /// Strings are compared lexicographically by their byte values. This compares Unicode code - /// points based on their positions in the code charts. This is not necessarily the same as - /// "alphabetical" order, which varies by language and locale. Comparing strings according to + /// Strings are compared lexicographically by their byte values. This compares Unicode code + /// points based on their positions in the code charts. This is not necessarily the same as + /// "alphabetical" order, which varies by language and locale. Comparing strings according to /// culturally-accepted standards requires locale-specific data that is outside the scope of /// the `str` type. #[stable(feature = "rust1", since = "1.0.0")] @@ -1627,190 +1604,26 @@ mod traits { } } - /// Implements substring slicing with syntax `&self[begin .. end]`. - /// - /// Returns a slice of the given string from the byte range - /// [`begin`..`end`). - /// - /// This operation is `O(1)`. - /// - /// # Panics - /// - /// Panics if `begin` or `end` does not point to the starting - /// byte offset of a character (as defined by `is_char_boundary`). - /// Requires that `begin <= end` and `end <= len` where `len` is the - /// length of the string. - /// - /// # Examples - /// - /// ``` - /// let s = "Löwe 老虎 Léopard"; - /// assert_eq!(&s[0 .. 1], "L"); - /// - /// assert_eq!(&s[1 .. 9], "öwe 老"); - /// - /// // these will panic: - /// // byte 2 lies within `ö`: - /// // &s[2 ..3]; - /// - /// // byte 8 lies within `老` - /// // &s[1 .. 8]; - /// - /// // byte 100 is outside the string - /// // &s[3 .. 100]; - /// ``` - #[stable(feature = "rust1", since = "1.0.0")] - impl ops::Index> for str { - type Output = str; - #[inline] - fn index(&self, index: ops::Range) -> &str { - index.index(self) - } - } - - /// Implements mutable substring slicing with syntax - /// `&mut self[begin .. end]`. - /// - /// Returns a mutable slice of the given string from the byte range - /// [`begin`..`end`). - /// - /// This operation is `O(1)`. - /// - /// # Panics - /// - /// Panics if `begin` or `end` does not point to the starting - /// byte offset of a character (as defined by `is_char_boundary`). - /// Requires that `begin <= end` and `end <= len` where `len` is the - /// length of the string. - #[stable(feature = "derefmut_for_string", since = "1.3.0")] - impl ops::IndexMut> for str { - #[inline] - fn index_mut(&mut self, index: ops::Range) -> &mut str { - index.index_mut(self) - } - } - - /// Implements substring slicing with syntax `&self[.. end]`. - /// - /// Returns a slice of the string from the beginning to byte offset - /// `end`. - /// - /// Equivalent to `&self[0 .. end]`. #[stable(feature = "rust1", since = "1.0.0")] - impl ops::Index> for str { - type Output = str; - - #[inline] - fn index(&self, index: ops::RangeTo) -> &str { - index.index(self) - } - } - - /// Implements mutable substring slicing with syntax `&mut self[.. end]`. - /// - /// Returns a mutable slice of the string from the beginning to byte offset - /// `end`. - /// - /// Equivalent to `&mut self[0 .. end]`. - #[stable(feature = "derefmut_for_string", since = "1.3.0")] - impl ops::IndexMut> for str { - #[inline] - fn index_mut(&mut self, index: ops::RangeTo) -> &mut str { - index.index_mut(self) - } - } - - /// Implements substring slicing with syntax `&self[begin ..]`. - /// - /// Returns a slice of the string from byte offset `begin` - /// to the end of the string. - /// - /// Equivalent to `&self[begin .. len]`. - #[stable(feature = "rust1", since = "1.0.0")] - impl ops::Index> for str { - type Output = str; + impl ops::Index for str + where + I: SliceIndex, + { + type Output = I::Output; #[inline] - fn index(&self, index: ops::RangeFrom) -> &str { + fn index(&self, index: I) -> &I::Output { index.index(self) } } - /// Implements mutable substring slicing with syntax `&mut self[begin ..]`. - /// - /// Returns a mutable slice of the string from byte offset `begin` - /// to the end of the string. - /// - /// Equivalent to `&mut self[begin .. len]`. - #[stable(feature = "derefmut_for_string", since = "1.3.0")] - impl ops::IndexMut> for str { - #[inline] - fn index_mut(&mut self, index: ops::RangeFrom) -> &mut str { - index.index_mut(self) - } - } - - /// Implements substring slicing with syntax `&self[..]`. - /// - /// Returns a slice of the whole string. This operation can - /// never panic. - /// - /// Equivalent to `&self[0 .. len]`. #[stable(feature = "rust1", since = "1.0.0")] - impl ops::Index for str { - type Output = str; - - #[inline] - fn index(&self, _index: ops::RangeFull) -> &str { - self - } - } - - /// Implements mutable substring slicing with syntax `&mut self[..]`. - /// - /// Returns a mutable slice of the whole string. This operation can - /// never panic. - /// - /// Equivalent to `&mut self[0 .. len]`. - #[stable(feature = "derefmut_for_string", since = "1.3.0")] - impl ops::IndexMut for str { - #[inline] - fn index_mut(&mut self, _index: ops::RangeFull) -> &mut str { - self - } - } - - #[stable(feature = "inclusive_range", since = "1.26.0")] - impl ops::Index> for str { - type Output = str; - - #[inline] - fn index(&self, index: ops::RangeInclusive) -> &str { - index.index(self) - } - } - - #[stable(feature = "inclusive_range", since = "1.26.0")] - impl ops::Index> for str { - type Output = str; - - #[inline] - fn index(&self, index: ops::RangeToInclusive) -> &str { - index.index(self) - } - } - - #[stable(feature = "inclusive_range", since = "1.26.0")] - impl ops::IndexMut> for str { - #[inline] - fn index_mut(&mut self, index: ops::RangeInclusive) -> &mut str { - index.index_mut(self) - } - } - #[stable(feature = "inclusive_range", since = "1.26.0")] - impl ops::IndexMut> for str { + impl ops::IndexMut for str + where + I: SliceIndex, + { #[inline] - fn index_mut(&mut self, index: ops::RangeToInclusive) -> &mut str { + fn index_mut(&mut self, index: I) -> &mut I::Output { index.index_mut(self) } } @@ -1821,6 +1634,18 @@ mod traits { panic!("attempted to index str up to maximum usize"); } + /// Implements substring slicing with syntax `&self[..]` or `&mut self[..]`. + /// + /// Returns a slice of the whole string, i.e., returns `&self` or `&mut + /// self`. Equivalent to `&self[0 .. len]` or `&mut self[0 .. len]`. Unlike + /// other indexing operations, this can never panic. + /// + /// This operation is `O(1)`. + /// + /// Prior to 1.20.0, these indexing operations were still supported by + /// direct implementation of `Index` and `IndexMut`. + /// + /// Equivalent to `&self[0 .. len]` or `&mut self[0 .. len]`. #[stable(feature = "str_checked_slicing", since = "1.20.0")] impl SliceIndex for ops::RangeFull { type Output = str; @@ -1850,6 +1675,41 @@ mod traits { } } + /// Implements substring slicing with syntax `&self[begin .. end]` or `&mut + /// self[begin .. end]`. + /// + /// Returns a slice of the given string from the byte range + /// [`begin`, `end`). + /// + /// This operation is `O(1)`. + /// + /// Prior to 1.20.0, these indexing operations were still supported by + /// direct implementation of `Index` and `IndexMut`. + /// + /// # Panics + /// + /// Panics if `begin` or `end` does not point to the starting byte offset of + /// a character (as defined by `is_char_boundary`), if `begin > end`, or if + /// `end > len`. + /// + /// # Examples + /// + /// ``` + /// let s = "Löwe 老虎 Léopard"; + /// assert_eq!(&s[0 .. 1], "L"); + /// + /// assert_eq!(&s[1 .. 9], "öwe 老"); + /// + /// // these will panic: + /// // byte 2 lies within `ö`: + /// // &s[2 ..3]; + /// + /// // byte 8 lies within `老` + /// // &s[1 .. 8]; + /// + /// // byte 100 is outside the string + /// // &s[3 .. 100]; + /// ``` #[stable(feature = "str_checked_slicing", since = "1.20.0")] impl SliceIndex for ops::Range { type Output = str; @@ -1881,9 +1741,9 @@ mod traits { } #[inline] unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output { - let ptr = slice.as_ptr().add(self.start); + let ptr = slice.as_mut_ptr().add(self.start); let len = self.end - self.start; - super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr as *mut u8, len)) + super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, len)) } #[inline] fn index(self, slice: &str) -> &Self::Output { @@ -1904,6 +1764,21 @@ mod traits { } } + /// Implements substring slicing with syntax `&self[.. end]` or `&mut + /// self[.. end]`. + /// + /// Returns a slice of the given string from the byte range [`0`, `end`). + /// Equivalent to `&self[0 .. end]` or `&mut self[0 .. end]`. + /// + /// This operation is `O(1)`. + /// + /// Prior to 1.20.0, these indexing operations were still supported by + /// direct implementation of `Index` and `IndexMut`. + /// + /// # Panics + /// + /// Panics if `end` does not point to the starting byte offset of a + /// character (as defined by `is_char_boundary`), or if `end > len`. #[stable(feature = "str_checked_slicing", since = "1.20.0")] impl SliceIndex for ops::RangeTo { type Output = str; @@ -1930,8 +1805,8 @@ mod traits { } #[inline] unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output { - let ptr = slice.as_ptr(); - super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr as *mut u8, self.end)) + let ptr = slice.as_mut_ptr(); + super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, self.end)) } #[inline] fn index(self, slice: &str) -> &Self::Output { @@ -1949,6 +1824,22 @@ mod traits { } } + /// Implements substring slicing with syntax `&self[begin ..]` or `&mut + /// self[begin ..]`. + /// + /// Returns a slice of the given string from the byte range [`begin`, + /// `len`). Equivalent to `&self[begin .. len]` or `&mut self[begin .. + /// len]`. + /// + /// This operation is `O(1)`. + /// + /// Prior to 1.20.0, these indexing operations were still supported by + /// direct implementation of `Index` and `IndexMut`. + /// + /// # Panics + /// + /// Panics if `begin` does not point to the starting byte offset of + /// a character (as defined by `is_char_boundary`), or if `begin >= len`. #[stable(feature = "str_checked_slicing", since = "1.20.0")] impl SliceIndex for ops::RangeFrom { type Output = str; @@ -1976,9 +1867,9 @@ mod traits { } #[inline] unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output { - let ptr = slice.as_ptr().add(self.start); + let ptr = slice.as_mut_ptr().add(self.start); let len = slice.len() - self.start; - super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr as *mut u8, len)) + super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, len)) } #[inline] fn index(self, slice: &str) -> &Self::Output { @@ -1996,6 +1887,22 @@ mod traits { } } + /// Implements substring slicing with syntax `&self[begin ..= end]` or `&mut + /// self[begin ..= end]`. + /// + /// Returns a slice of the given string from the byte range + /// [`begin`, `end`]. Equivalent to `&self [begin .. end + 1]` or `&mut + /// self[begin .. end + 1]`, except if `end` has the maximum value for + /// `usize`. + /// + /// This operation is `O(1)`. + /// + /// # Panics + /// + /// Panics if `begin` does not point to the starting byte offset of + /// a character (as defined by `is_char_boundary`), if `end` does not point + /// to the ending byte offset of a character (`end + 1` is either a starting + /// byte offset or equal to `len`), if `begin > end`, or if `end >= len`. #[stable(feature = "inclusive_range", since = "1.26.0")] impl SliceIndex for ops::RangeInclusive { type Output = str; @@ -2029,8 +1936,20 @@ mod traits { } } - - + /// Implements substring slicing with syntax `&self[..= end]` or `&mut + /// self[..= end]`. + /// + /// Returns a slice of the given string from the byte range [0, `end`]. + /// Equivalent to `&self [0 .. end + 1]`, except if `end` has the maximum + /// value for `usize`. + /// + /// This operation is `O(1)`. + /// + /// # Panics + /// + /// Panics if `end` does not point to the ending byte offset of a character + /// (`end + 1` is either a starting byte offset as defined by + /// `is_char_boundary`, or equal to `len`), or if `end >= len`. #[stable(feature = "inclusive_range", since = "1.26.0")] impl SliceIndex for ops::RangeToInclusive { type Output = str; @@ -2278,6 +2197,22 @@ impl str { self as *const str as *const u8 } + /// Converts a mutable string slice to a raw pointer. + /// + /// As string slices are a slice of bytes, the raw pointer points to a + /// [`u8`]. This pointer will be pointing to the first byte of the string + /// slice. + /// + /// It is your responsibility to make sure that the string slice only gets + /// modified in a way that it remains valid UTF-8. + /// + /// [`u8`]: primitive.u8.html + #[unstable(feature = "str_as_mut_ptr", issue = "58215")] + #[inline] + pub fn as_mut_ptr(&mut self) -> *mut u8 { + self as *mut str as *mut u8 + } + /// Returns a subslice of `str`. /// /// This is the non-panicking alternative to indexing the `str`. Returns @@ -2565,7 +2500,7 @@ impl str { // is_char_boundary checks that the index is in [0, .len()] if self.is_char_boundary(mid) { let len = self.len(); - let ptr = self.as_ptr() as *mut u8; + let ptr = self.as_mut_ptr(); unsafe { (from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, mid)), from_utf8_unchecked_mut(slice::from_raw_parts_mut( @@ -2708,7 +2643,7 @@ impl str { Bytes(self.as_bytes().iter().cloned()) } - /// Split a string slice by whitespace. + /// Splits a string slice by whitespace. /// /// The iterator returned will return string slices that are sub-slices of /// the original string slice, separated by any amount of whitespace. @@ -2751,7 +2686,7 @@ impl str { SplitWhitespace { inner: self.split(IsWhitespace).filter(IsNotEmpty) } } - /// Split a string slice by ASCII whitespace. + /// Splits a string slice by ASCII whitespace. /// /// The iterator returned will return string slices that are sub-slices of /// the original string slice, separated by any amount of ASCII whitespace. @@ -2765,7 +2700,6 @@ impl str { /// Basic usage: /// /// ``` - /// #![feature(split_ascii_whitespace)] /// let mut iter = "A few words".split_ascii_whitespace(); /// /// assert_eq!(Some("A"), iter.next()); @@ -2787,13 +2721,13 @@ impl str { /// /// assert_eq!(None, iter.next()); /// ``` - #[unstable(feature = "split_ascii_whitespace", issue = "48656")] + #[stable(feature = "split_ascii_whitespace", since = "1.34.0")] #[inline] pub fn split_ascii_whitespace(&self) -> SplitAsciiWhitespace { let inner = self .as_bytes() .split(IsAsciiWhitespace) - .filter(IsNotEmpty) + .filter(BytesIsNotEmpty) .map(UnsafeBytesToStr); SplitAsciiWhitespace { inner } } @@ -3027,14 +2961,14 @@ impl str { /// An iterator over substrings of this string slice, separated by /// characters matched by a pattern. /// - /// The pattern can be a `&str`, [`char`], or a closure that determines the - /// split. + /// The pattern can be any type that implements the Pattern trait. Notable + /// examples are `&str`, [`char`], and closures that determines the split. /// /// # Iterator behavior /// /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern /// allows a reverse search and forward/reverse search yields the same - /// elements. This is true for, eg, [`char`] but not for `&str`. + /// elements. This is true for, e.g., [`char`], but not for `&str`. /// /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html /// @@ -3144,8 +3078,8 @@ impl str { /// An iterator over substrings of the given string slice, separated by /// characters matched by a pattern and yielded in reverse order. /// - /// The pattern can be a `&str`, [`char`], or a closure that determines the - /// split. + /// The pattern can be any type that implements the Pattern trait. Notable + /// examples are `&str`, [`char`], and closures that determines the split. /// /// # Iterator behavior /// @@ -3194,8 +3128,8 @@ impl str { /// An iterator over substrings of the given string slice, separated by /// characters matched by a pattern. /// - /// The pattern can be a `&str`, [`char`], or a closure that determines the - /// split. + /// The pattern can be any type that implements the Pattern trait. Notable + /// examples are `&str`, [`char`], and closures that determines the split. /// /// Equivalent to [`split`], except that the trailing substring /// is skipped if empty. @@ -3209,7 +3143,7 @@ impl str { /// /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern /// allows a reverse search and forward/reverse search yields the same - /// elements. This is true for, eg, [`char`] but not for `&str`. + /// elements. This is true for, e.g., [`char`], but not for `&str`. /// /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html /// @@ -3241,8 +3175,8 @@ impl str { /// An iterator over substrings of `self`, separated by characters /// matched by a pattern and yielded in reverse order. /// - /// The pattern can be a simple `&str`, [`char`], or a closure that - /// determines the split. + /// The pattern can be any type that implements the Pattern trait. Notable + /// examples are `&str`, [`char`], and closures that determines the split. /// Additional libraries might provide more complex patterns like /// regular expressions. /// @@ -3288,8 +3222,8 @@ impl str { /// If `n` substrings are returned, the last substring (the `n`th substring) /// will contain the remainder of the string. /// - /// The pattern can be a `&str`, [`char`], or a closure that determines the - /// split. + /// The pattern can be any type that implements the Pattern trait. Notable + /// examples are `&str`, [`char`], and closures that determines the split. /// /// # Iterator behavior /// @@ -3341,8 +3275,8 @@ impl str { /// If `n` substrings are returned, the last substring (the `n`th substring) /// will contain the remainder of the string. /// - /// The pattern can be a `&str`, [`char`], or a closure that - /// determines the split. + /// The pattern can be any type that implements the Pattern trait. Notable + /// examples are `&str`, [`char`], and closures that determines the split. /// /// # Iterator behavior /// @@ -3385,14 +3319,14 @@ impl str { /// An iterator over the disjoint matches of a pattern within the given string /// slice. /// - /// The pattern can be a `&str`, [`char`], or a closure that - /// determines if a character matches. + /// The pattern can be any type that implements the Pattern trait. Notable + /// examples are `&str`, [`char`], and closures that determines the split. /// /// # Iterator behavior /// /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern /// allows a reverse search and forward/reverse search yields the same - /// elements. This is true for, eg, [`char`] but not for `&str`. + /// elements. This is true for, e.g., [`char`], but not for `&str`. /// /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html /// @@ -3468,7 +3402,7 @@ impl str { /// /// The returned iterator will be a [`DoubleEndedIterator`] if the pattern /// allows a reverse search and forward/reverse search yields the same - /// elements. This is true for, eg, [`char`] but not for `&str`. + /// elements. This is true for, e.g., [`char`], but not for `&str`. /// /// [`DoubleEndedIterator`]: iter/trait.DoubleEndedIterator.html /// @@ -3554,6 +3488,8 @@ impl str { /// /// assert_eq!("Hello\tworld", s.trim()); /// ``` + #[must_use = "this returns the trimmed string as a slice, \ + without modifying the original"] #[stable(feature = "rust1", since = "1.0.0")] pub fn trim(&self) -> &str { self.trim_matches(|c: char| c.is_whitespace()) @@ -3568,7 +3504,7 @@ impl str { /// /// A string is a sequence of bytes. `start` in this context means the first /// position of that byte string; for a left-to-right language like English or - /// Russian, this will be left side; and for right-to-left languages like + /// Russian, this will be left side, and for right-to-left languages like /// like Arabic or Hebrew, this will be the right side. /// /// # Examples @@ -3589,6 +3525,8 @@ impl str { /// let s = " עברית "; /// assert!(Some('ע') == s.trim_start().chars().next()); /// ``` + #[must_use = "this returns the trimmed string as a new slice, \ + without modifying the original"] #[stable(feature = "trim_direction", since = "1.30.0")] pub fn trim_start(&self) -> &str { self.trim_start_matches(|c: char| c.is_whitespace()) @@ -3603,7 +3541,7 @@ impl str { /// /// A string is a sequence of bytes. `end` in this context means the last /// position of that byte string; for a left-to-right language like English or - /// Russian, this will be right side; and for right-to-left languages like + /// Russian, this will be right side, and for right-to-left languages like /// like Arabic or Hebrew, this will be the left side. /// /// # Examples @@ -3624,6 +3562,8 @@ impl str { /// let s = " עברית "; /// assert!(Some('ת') == s.trim_end().chars().rev().next()); /// ``` + #[must_use = "this returns the trimmed string as a new slice, \ + without modifying the original"] #[stable(feature = "trim_direction", since = "1.30.0")] pub fn trim_end(&self) -> &str { self.trim_end_matches(|c: char| c.is_whitespace()) @@ -3661,7 +3601,11 @@ impl str { /// assert!(Some('ע') == s.trim_left().chars().next()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_deprecated(reason = "superseded by `trim_start`", since = "1.33.0")] + #[rustc_deprecated( + since = "1.33.0", + reason = "superseded by `trim_start`", + suggestion = "trim_start", + )] pub fn trim_left(&self) -> &str { self.trim_start() } @@ -3698,7 +3642,11 @@ impl str { /// assert!(Some('ת') == s.trim_right().chars().rev().next()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_deprecated(reason = "superseded by `trim_end`", since = "1.33.0")] + #[rustc_deprecated( + since = "1.33.0", + reason = "superseded by `trim_end`", + suggestion = "trim_end", + )] pub fn trim_right(&self) -> &str { self.trim_end() } @@ -3726,6 +3674,8 @@ impl str { /// ``` /// assert_eq!("1foo1barXX".trim_matches(|c| c == '1' || c == 'X'), "foo1bar"); /// ``` + #[must_use = "this returns the trimmed string as a new slice, \ + without modifying the original"] #[stable(feature = "rust1", since = "1.0.0")] pub fn trim_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str where P::Searcher: DoubleEndedSearcher<'a> @@ -3771,6 +3721,8 @@ impl str { /// let x: &[_] = &['1', '2']; /// assert_eq!("12foo1bar12".trim_start_matches(x), "foo1bar12"); /// ``` + #[must_use = "this returns the trimmed string as a new slice, \ + without modifying the original"] #[stable(feature = "trim_direction", since = "1.30.0")] pub fn trim_start_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str { let mut i = self.len(); @@ -3814,6 +3766,8 @@ impl str { /// ``` /// assert_eq!("1fooX".trim_end_matches(|c| c == '1' || c == 'X'), "1foo"); /// ``` + #[must_use = "this returns the trimmed string as a new slice, \ + without modifying the original"] #[stable(feature = "trim_direction", since = "1.30.0")] pub fn trim_end_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str where P::Searcher: ReverseSearcher<'a> @@ -3839,10 +3793,10 @@ impl str { /// /// # Text directionality /// - /// A string is a sequence of bytes. 'Left' in this context means the first - /// position of that byte string; for a language like Arabic or Hebrew - /// which are 'right to left' rather than 'left to right', this will be - /// the _right_ side, not the left. + /// A string is a sequence of bytes. `start` in this context means the first + /// position of that byte string; for a left-to-right language like English or + /// Russian, this will be left side, and for right-to-left languages like + /// like Arabic or Hebrew, this will be the right side. /// /// # Examples /// @@ -3856,7 +3810,11 @@ impl str { /// assert_eq!("12foo1bar12".trim_left_matches(x), "foo1bar12"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_deprecated(reason = "superseded by `trim_start_matches`", since = "1.33.0")] + #[rustc_deprecated( + since = "1.33.0", + reason = "superseded by `trim_start_matches`", + suggestion = "trim_start_matches", + )] pub fn trim_left_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str { self.trim_start_matches(pat) } @@ -3871,10 +3829,10 @@ impl str { /// /// # Text directionality /// - /// A string is a sequence of bytes. 'Right' in this context means the last - /// position of that byte string; for a language like Arabic or Hebrew - /// which are 'right to left' rather than 'left to right', this will be - /// the _left_ side, not the right. + /// A string is a sequence of bytes. `end` in this context means the last + /// position of that byte string; for a left-to-right language like English or + /// Russian, this will be right side, and for right-to-left languages like + /// like Arabic or Hebrew, this will be the left side. /// /// # Examples /// @@ -3894,7 +3852,11 @@ impl str { /// assert_eq!("1fooX".trim_right_matches(|c| c == '1' || c == 'X'), "1foo"); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_deprecated(reason = "superseded by `trim_end_matches`", since = "1.33.0")] + #[rustc_deprecated( + since = "1.33.0", + reason = "superseded by `trim_end_matches`", + suggestion = "trim_end_matches", + )] pub fn trim_right_matches<'a, P: Pattern<'a>>(&'a self, pat: P) -> &'a str where P::Searcher: ReverseSearcher<'a> { @@ -4018,6 +3980,146 @@ impl str { let me = unsafe { self.as_bytes_mut() }; me.make_ascii_lowercase() } + + /// Return an iterator that escapes each char in `self` with [`char::escape_debug`]. + /// + /// Note: only extended grapheme codepoints that begin the string will be + /// escaped. + /// + /// [`char::escape_debug`]: ../std/primitive.char.html#method.escape_debug + /// + /// # Examples + /// + /// As an iterator: + /// + /// ``` + /// for c in "❤\n!".escape_debug() { + /// print!("{}", c); + /// } + /// println!(); + /// ``` + /// + /// Using `println!` directly: + /// + /// ``` + /// println!("{}", "❤\n!".escape_debug()); + /// ``` + /// + /// + /// Both are equivalent to: + /// + /// ``` + /// println!("❤\\n!"); + /// ``` + /// + /// Using `to_string`: + /// + /// ``` + /// assert_eq!("❤\n!".escape_debug().to_string(), "❤\\n!"); + /// ``` + #[stable(feature = "str_escape", since = "1.34.0")] + pub fn escape_debug(&self) -> EscapeDebug { + let mut chars = self.chars(); + EscapeDebug { + inner: chars.next() + .map(|first| first.escape_debug_ext(true)) + .into_iter() + .flatten() + .chain(chars.flat_map(CharEscapeDebugContinue)) + } + } + + /// Return an iterator that escapes each char in `self` with [`char::escape_default`]. + /// + /// [`char::escape_default`]: ../std/primitive.char.html#method.escape_default + /// + /// # Examples + /// + /// As an iterator: + /// + /// ``` + /// for c in "❤\n!".escape_default() { + /// print!("{}", c); + /// } + /// println!(); + /// ``` + /// + /// Using `println!` directly: + /// + /// ``` + /// println!("{}", "❤\n!".escape_default()); + /// ``` + /// + /// + /// Both are equivalent to: + /// + /// ``` + /// println!("\\u{{2764}}\n!"); + /// ``` + /// + /// Using `to_string`: + /// + /// ``` + /// assert_eq!("❤\n!".escape_default().to_string(), "\\u{2764}\\n!"); + /// ``` + #[stable(feature = "str_escape", since = "1.34.0")] + pub fn escape_default(&self) -> EscapeDefault { + EscapeDefault { inner: self.chars().flat_map(CharEscapeDefault) } + } + + /// Return an iterator that escapes each char in `self` with [`char::escape_unicode`]. + /// + /// [`char::escape_unicode`]: ../std/primitive.char.html#method.escape_unicode + /// + /// # Examples + /// + /// As an iterator: + /// + /// ``` + /// for c in "❤\n!".escape_unicode() { + /// print!("{}", c); + /// } + /// println!(); + /// ``` + /// + /// Using `println!` directly: + /// + /// ``` + /// println!("{}", "❤\n!".escape_unicode()); + /// ``` + /// + /// + /// Both are equivalent to: + /// + /// ``` + /// println!("\\u{{2764}}\\u{{a}}\\u{{21}}"); + /// ``` + /// + /// Using `to_string`: + /// + /// ``` + /// assert_eq!("❤\n!".escape_unicode().to_string(), "\\u{2764}\\u{a}\\u{21}"); + /// ``` + #[stable(feature = "str_escape", since = "1.34.0")] + pub fn escape_unicode(&self) -> EscapeUnicode { + EscapeUnicode { inner: self.chars().flat_map(CharEscapeUnicode) } + } +} + +impl_fn_for_zst! { + #[derive(Clone)] + struct CharEscapeDebugContinue impl Fn = |c: char| -> char::EscapeDebug { + c.escape_debug_ext(false) + }; + + #[derive(Clone)] + struct CharEscapeUnicode impl Fn = |c: char| -> char::EscapeUnicode { + c.escape_unicode() + }; + #[derive(Clone)] + struct CharEscapeDefault impl Fn = |c: char| -> char::EscapeDefault { + c.escape_default() + }; } #[stable(feature = "rust1", since = "1.0.0")] @@ -4062,105 +4164,39 @@ pub struct SplitWhitespace<'a> { /// /// [`split_ascii_whitespace`]: ../../std/primitive.str.html#method.split_ascii_whitespace /// [`str`]: ../../std/primitive.str.html -#[unstable(feature = "split_ascii_whitespace", issue = "48656")] +#[stable(feature = "split_ascii_whitespace", since = "1.34.0")] #[derive(Clone, Debug)] pub struct SplitAsciiWhitespace<'a> { - inner: Map, IsNotEmpty>, UnsafeBytesToStr>, -} - -#[derive(Clone)] -struct IsWhitespace; - -impl FnOnce<(char, )> for IsWhitespace { - type Output = bool; - - #[inline] - extern "rust-call" fn call_once(mut self, arg: (char, )) -> bool { - self.call_mut(arg) - } -} - -impl FnMut<(char, )> for IsWhitespace { - #[inline] - extern "rust-call" fn call_mut(&mut self, arg: (char, )) -> bool { - arg.0.is_whitespace() - } -} - -#[derive(Clone)] -struct IsAsciiWhitespace; - -impl<'a> FnOnce<(&'a u8, )> for IsAsciiWhitespace { - type Output = bool; - - #[inline] - extern "rust-call" fn call_once(mut self, arg: (&u8, )) -> bool { - self.call_mut(arg) - } -} - -impl<'a> FnMut<(&'a u8, )> for IsAsciiWhitespace { - #[inline] - extern "rust-call" fn call_mut(&mut self, arg: (&u8, )) -> bool { - arg.0.is_ascii_whitespace() - } -} - -#[derive(Clone)] -struct IsNotEmpty; - -impl<'a, 'b> FnOnce<(&'a &'b str, )> for IsNotEmpty { - type Output = bool; - - #[inline] - extern "rust-call" fn call_once(mut self, arg: (&'a &'b str, )) -> bool { - self.call_mut(arg) - } -} - -impl<'a, 'b> FnMut<(&'a &'b str, )> for IsNotEmpty { - #[inline] - extern "rust-call" fn call_mut(&mut self, arg: (&'a &'b str, )) -> bool { - !arg.0.is_empty() - } -} - -impl<'a, 'b> FnOnce<(&'a &'b [u8], )> for IsNotEmpty { - type Output = bool; - - #[inline] - extern "rust-call" fn call_once(mut self, arg: (&'a &'b [u8], )) -> bool { - self.call_mut(arg) - } + inner: Map, BytesIsNotEmpty>, UnsafeBytesToStr>, } -impl<'a, 'b> FnMut<(&'a &'b [u8], )> for IsNotEmpty { - #[inline] - extern "rust-call" fn call_mut(&mut self, arg: (&'a &'b [u8], )) -> bool { - !arg.0.is_empty() - } -} +impl_fn_for_zst! { + #[derive(Clone)] + struct IsWhitespace impl Fn = |c: char| -> bool { + c.is_whitespace() + }; -#[derive(Clone)] -struct UnsafeBytesToStr; + #[derive(Clone)] + struct IsAsciiWhitespace impl Fn = |byte: &u8| -> bool { + byte.is_ascii_whitespace() + }; -impl<'a> FnOnce<(&'a [u8], )> for UnsafeBytesToStr { - type Output = &'a str; + #[derive(Clone)] + struct IsNotEmpty impl<'a, 'b> Fn = |s: &'a &'b str| -> bool { + !s.is_empty() + }; - #[inline] - extern "rust-call" fn call_once(mut self, arg: (&'a [u8], )) -> &'a str { - self.call_mut(arg) - } -} + #[derive(Clone)] + struct BytesIsNotEmpty impl<'a, 'b> Fn = |s: &'a &'b [u8]| -> bool { + !s.is_empty() + }; -impl<'a> FnMut<(&'a [u8], )> for UnsafeBytesToStr { - #[inline] - extern "rust-call" fn call_mut(&mut self, arg: (&'a [u8], )) -> &'a str { - unsafe { from_utf8_unchecked(arg.0) } - } + #[derive(Clone)] + struct UnsafeBytesToStr impl<'a> Fn = |bytes: &'a [u8]| -> &'a str { + unsafe { from_utf8_unchecked(bytes) } + }; } - #[stable(feature = "split_whitespace", since = "1.1.0")] impl<'a> Iterator for SplitWhitespace<'a> { type Item = &'a str; @@ -4187,7 +4223,7 @@ impl<'a> DoubleEndedIterator for SplitWhitespace<'a> { #[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for SplitWhitespace<'_> {} -#[unstable(feature = "split_ascii_whitespace", issue = "48656")] +#[stable(feature = "split_ascii_whitespace", since = "1.34.0")] impl<'a> Iterator for SplitAsciiWhitespace<'a> { type Item = &'a str; @@ -4202,7 +4238,7 @@ impl<'a> Iterator for SplitAsciiWhitespace<'a> { } } -#[unstable(feature = "split_ascii_whitespace", issue = "48656")] +#[stable(feature = "split_ascii_whitespace", since = "1.34.0")] impl<'a> DoubleEndedIterator for SplitAsciiWhitespace<'a> { #[inline] fn next_back(&mut self) -> Option<&'a str> { @@ -4210,7 +4246,7 @@ impl<'a> DoubleEndedIterator for SplitAsciiWhitespace<'a> { } } -#[unstable(feature = "split_ascii_whitespace", issue = "48656")] +#[stable(feature = "split_ascii_whitespace", since = "1.34.0")] impl FusedIterator for SplitAsciiWhitespace<'_> {} /// An iterator of [`u16`] over the string encoded as UTF-16. @@ -4270,3 +4306,74 @@ impl<'a> Iterator for EncodeUtf16<'a> { #[stable(feature = "fused", since = "1.26.0")] impl FusedIterator for EncodeUtf16<'_> {} + +/// The return type of [`str::escape_debug`]. +/// +/// [`str::escape_debug`]: ../../std/primitive.str.html#method.escape_debug +#[stable(feature = "str_escape", since = "1.34.0")] +#[derive(Clone, Debug)] +pub struct EscapeDebug<'a> { + inner: Chain< + Flatten>, + FlatMap, char::EscapeDebug, CharEscapeDebugContinue> + >, +} + +/// The return type of [`str::escape_default`]. +/// +/// [`str::escape_default`]: ../../std/primitive.str.html#method.escape_default +#[stable(feature = "str_escape", since = "1.34.0")] +#[derive(Clone, Debug)] +pub struct EscapeDefault<'a> { + inner: FlatMap, char::EscapeDefault, CharEscapeDefault>, +} + +/// The return type of [`str::escape_unicode`]. +/// +/// [`str::escape_unicode`]: ../../std/primitive.str.html#method.escape_unicode +#[stable(feature = "str_escape", since = "1.34.0")] +#[derive(Clone, Debug)] +pub struct EscapeUnicode<'a> { + inner: FlatMap, char::EscapeUnicode, CharEscapeUnicode>, +} + +macro_rules! escape_types_impls { + ($( $Name: ident ),+) => {$( + #[stable(feature = "str_escape", since = "1.34.0")] + impl<'a> fmt::Display for $Name<'a> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.clone().try_for_each(|c| f.write_char(c)) + } + } + + #[stable(feature = "str_escape", since = "1.34.0")] + impl<'a> Iterator for $Name<'a> { + type Item = char; + + #[inline] + fn next(&mut self) -> Option { self.inner.next() } + + #[inline] + fn size_hint(&self) -> (usize, Option) { self.inner.size_hint() } + + #[inline] + fn try_fold(&mut self, init: Acc, fold: Fold) -> R where + Self: Sized, Fold: FnMut(Acc, Self::Item) -> R, R: Try + { + self.inner.try_fold(init, fold) + } + + #[inline] + fn fold(self, init: Acc, fold: Fold) -> Acc + where Fold: FnMut(Acc, Self::Item) -> Acc, + { + self.inner.fold(init, fold) + } + } + + #[stable(feature = "str_escape", since = "1.34.0")] + impl<'a> FusedIterator for $Name<'a> {} + )+} +} + +escape_types_impls!(EscapeDebug, EscapeDefault, EscapeUnicode); diff --git a/src/libcore/str/pattern.rs b/src/libcore/str/pattern.rs index 2059160ddfe67..2571780ad0bab 100644 --- a/src/libcore/str/pattern.rs +++ b/src/libcore/str/pattern.rs @@ -1,17 +1,7 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The string Pattern API. //! -//! For more details, see the traits `Pattern`, `Searcher`, -//! `ReverseSearcher` and `DoubleEndedSearcher`. +//! For more details, see the traits [`Pattern`], [`Searcher`], +//! [`ReverseSearcher`], and [`DoubleEndedSearcher`]. #![unstable(feature = "pattern", reason = "API not fully fleshed out and ready to be stabilized", @@ -127,7 +117,7 @@ pub unsafe trait Searcher<'a> { /// `[Reject(0, 1), Reject(1, 2), Match(2, 5), Reject(5, 8)]` fn next(&mut self) -> SearchStep; - /// Find the next `Match` result. See `next()` + /// Finds the next `Match` result. See `next()` /// /// Unlike next(), there is no guarantee that the returned ranges /// of this and next_reject will overlap. This will return (start_match, end_match), @@ -144,7 +134,7 @@ pub unsafe trait Searcher<'a> { } } - /// Find the next `Reject` result. See `next()` and `next_match()` + /// Finds the next `Reject` result. See `next()` and `next_match()` /// /// Unlike next(), there is no guarantee that the returned ranges /// of this and next_match will overlap. @@ -195,7 +185,7 @@ pub unsafe trait ReverseSearcher<'a>: Searcher<'a> { /// `[Reject(7, 8), Match(4, 7), Reject(1, 4), Reject(0, 1)]` fn next_back(&mut self) -> SearchStep; - /// Find the next `Match` result. See `next_back()` + /// Finds the next `Match` result. See `next_back()` #[inline] fn next_match_back(&mut self) -> Option<(usize, usize)>{ loop { @@ -207,7 +197,7 @@ pub unsafe trait ReverseSearcher<'a>: Searcher<'a> { } } - /// Find the next `Reject` result. See `next_back()` + /// Finds the next `Reject` result. See `next_back()` #[inline] fn next_reject_back(&mut self) -> Option<(usize, usize)>{ loop { @@ -435,8 +425,7 @@ impl<'a> Pattern<'a> for char { #[inline] fn into_searcher(self, haystack: &'a str) -> Self::Searcher { let mut utf8_encoded = [0; 4]; - self.encode_utf8(&mut utf8_encoded); - let utf8_size = self.len_utf8(); + let utf8_size = self.encode_utf8(&mut utf8_encoded).len(); CharSearcher { haystack, finger: 0, diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs index d2683e31eefb8..26b59969e18af 100644 --- a/src/libcore/sync/atomic.rs +++ b/src/libcore/sync/atomic.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Atomic types //! //! Atomic types provide primitive shared-memory communication between @@ -15,13 +5,16 @@ //! types. //! //! This module defines atomic versions of a select number of primitive -//! types, including [`AtomicBool`], [`AtomicIsize`], and [`AtomicUsize`]. +//! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`], +//! [`AtomicI8`], [`AtomicU16`], etc. //! Atomic types present operations that, when used correctly, synchronize //! updates between threads. //! //! [`AtomicBool`]: struct.AtomicBool.html //! [`AtomicIsize`]: struct.AtomicIsize.html //! [`AtomicUsize`]: struct.AtomicUsize.html +//! [`AtomicI8`]: struct.AtomicI8.html +//! [`AtomicU16`]: struct.AtomicU16.html //! //! Each method takes an [`Ordering`] which represents the strength of //! the memory barrier for that operation. These orderings are the @@ -41,11 +34,46 @@ //! [`Sync`]: ../../marker/trait.Sync.html //! [arc]: ../../../std/sync/struct.Arc.html //! -//! Most atomic types may be stored in static variables, initialized using -//! the provided static initializers like [`ATOMIC_BOOL_INIT`]. Atomic statics +//! Atomic types may be stored in static variables, initialized using +//! the constant initializers like [`AtomicBool::new`]. Atomic statics //! are often used for lazy global initialization. //! -//! [`ATOMIC_BOOL_INIT`]: constant.ATOMIC_BOOL_INIT.html +//! [`AtomicBool::new`]: struct.AtomicBool.html#method.new +//! +//! # Portability +//! +//! All atomic types in this module are guaranteed to be [lock-free] if they're +//! available. This means they don't internally acquire a global mutex. Atomic +//! types and operations are not guaranteed to be wait-free. This means that +//! operations like `fetch_or` may be implemented with a compare-and-swap loop. +//! +//! Atomic operations may be implemented at the instruction layer with +//! larger-size atomics. For example some platforms use 4-byte atomic +//! instructions to implement `AtomicI8`. Note that this emulation should not +//! have an impact on correctness of code, it's just something to be aware of. +//! +//! The atomic types in this module may not be available on all platforms. The +//! atomic types here are all widely available, however, and can generally be +//! relied upon existing. Some notable exceptions are: +//! +//! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or +//! `AtomicI64` types. +//! * ARM platforms like `armv5te` that aren't for Linux do not have any atomics +//! at all. +//! * ARM targets with `thumbv6m` do not have atomic operations at all. +//! +//! Note that future platforms may be added that also do not have support for +//! some atomic operations. Maximally portable code will want to be careful +//! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are +//! generally the most portable, but even then they're not available everywhere. +//! For reference, the `std` library requires pointer-sized atomics, although +//! `core` does not. +//! +//! Currently you'll need to use `#[cfg(target_arch)]` primarily to +//! conditionally compile in code with atomics. There is an unstable +//! `#[cfg(target_has_atomic)]` as well which may be stabilized in the future. +//! +//! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm //! //! # Examples //! @@ -76,9 +104,9 @@ //! Keep a global count of live threads: //! //! ``` -//! use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT}; +//! use std::sync::atomic::{AtomicUsize, Ordering}; //! -//! static GLOBAL_THREAD_COUNT: AtomicUsize = ATOMIC_USIZE_INIT; +//! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0); //! //! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::SeqCst); //! println!("live threads: {}", old_thread_count + 1); @@ -94,27 +122,34 @@ use intrinsics; use cell::UnsafeCell; use fmt; -/// Save power or switch hyperthreads in a busy-wait spin-loop. +use hint::spin_loop; + +/// Signals the processor that it is entering a busy-wait spin-loop. +/// +/// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving +/// power or switching hyper-threads. /// -/// This function is deliberately more primitive than -/// [`std::thread::yield_now`](../../../std/thread/fn.yield_now.html) and -/// does not directly yield to the system's scheduler. -/// In some cases it might be useful to use a combination of both functions. -/// Careful benchmarking is advised. +/// This function is different than [`std::thread::yield_now`] which directly yields to the +/// system's scheduler, whereas `spin_loop_hint` only signals the processor that it is entering a +/// busy-wait spin-loop without yielding control to the system's scheduler. /// -/// On some platforms this function may not do anything at all. +/// Using a busy-wait spin-loop with `spin_loop_hint` is ideally used in situations where a +/// contended lock is held by another thread executed on a different CPU and where the waiting +/// times are relatively small. Because entering busy-wait spin-loop does not trigger the system's +/// scheduler, no overhead for switching threads occurs. However, if the thread holding the +/// contended lock is running on the same CPU, the spin-loop is likely to occupy an entire CPU slice +/// before switching to the thread that holds the lock. If the contending lock is held by a thread +/// on the same CPU or if the waiting times for acquiring the lock are longer, it is often better to +/// use [`std::thread::yield_now`]. +/// +/// **Note**: On platforms that do not support receiving spin-loop hints this function does not +/// do anything at all. +/// +/// [`std::thread::yield_now`]: ../../../std/thread/fn.yield_now.html #[inline] #[stable(feature = "spin_loop_hint", since = "1.24.0")] pub fn spin_loop_hint() { - #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] - unsafe { - asm!("pause" ::: "memory" : "volatile"); - } - - #[cfg(target_arch = "aarch64")] - unsafe { - asm!("yield" ::: "memory" : "volatile"); - } + spin_loop() } /// A boolean type which can be safely shared between threads. @@ -188,7 +223,7 @@ unsafe impl Sync for AtomicPtr {} /// [Ordering::Relaxed]: #variant.Relaxed /// [Ordering::SeqCst]: #variant.SeqCst #[stable(feature = "rust1", since = "1.0.0")] -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] #[non_exhaustive] pub enum Ordering { /// No ordering constraints, only atomic operations. @@ -268,6 +303,11 @@ pub enum Ordering { /// [`AtomicBool`]: struct.AtomicBool.html #[cfg(target_has_atomic = "8")] #[stable(feature = "rust1", since = "1.0.0")] +#[rustc_deprecated( + since = "1.34.0", + reason = "the `new` function is now preferred", + suggestion = "AtomicBool::new(false)", +)] pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false); #[cfg(target_has_atomic = "8")] @@ -1100,10 +1140,12 @@ macro_rules! atomic_int { $stable_access:meta, $stable_from:meta, $stable_nand:meta, + $stable_init_const:meta, $s_int_type:expr, $int_ref:expr, $extra_feature:expr, $min_fn:ident, $max_fn:ident, $align:expr, + $atomic_new:expr, $int_type:ident $atomic_type:ident $atomic_init:ident) => { /// An integer type which can be safely shared between threads. /// @@ -1113,7 +1155,8 @@ macro_rules! atomic_int { /// `]( #[doc = $int_ref] /// ). For more about the differences between atomic types and - /// non-atomic types, please see the [module-level documentation]. + /// non-atomic types as well as information about the portability of + /// this type, please see the [module-level documentation]. /// /// [module-level documentation]: index.html #[$stable] @@ -1123,7 +1166,12 @@ macro_rules! atomic_int { } /// An atomic integer initialized to `0`. - #[$stable] + #[$stable_init_const] + #[rustc_deprecated( + since = "1.34.0", + reason = "the `new` function is now preferred", + suggestion = $atomic_new, + )] pub const $atomic_init: $atomic_type = $atomic_type::new(0); #[$stable] @@ -1843,114 +1891,130 @@ assert_eq!(min_foo, 12); #[cfg(target_has_atomic = "8")] atomic_int! { - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "i8", "../../../std/primitive.i8.html", "#![feature(integer_atomics)]\n\n", atomic_min, atomic_max, 1, + "AtomicI8::new(0)", i8 AtomicI8 ATOMIC_I8_INIT } #[cfg(target_has_atomic = "8")] atomic_int! { - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "u8", "../../../std/primitive.u8.html", "#![feature(integer_atomics)]\n\n", atomic_umin, atomic_umax, 1, + "AtomicU8::new(0)", u8 AtomicU8 ATOMIC_U8_INIT } #[cfg(target_has_atomic = "16")] atomic_int! { - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "i16", "../../../std/primitive.i16.html", "#![feature(integer_atomics)]\n\n", atomic_min, atomic_max, 2, + "AtomicI16::new(0)", i16 AtomicI16 ATOMIC_I16_INIT } #[cfg(target_has_atomic = "16")] atomic_int! { - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "u16", "../../../std/primitive.u16.html", "#![feature(integer_atomics)]\n\n", atomic_umin, atomic_umax, 2, + "AtomicU16::new(0)", u16 AtomicU16 ATOMIC_U16_INIT } #[cfg(target_has_atomic = "32")] atomic_int! { - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "i32", "../../../std/primitive.i32.html", "#![feature(integer_atomics)]\n\n", atomic_min, atomic_max, 4, + "AtomicI32::new(0)", i32 AtomicI32 ATOMIC_I32_INIT } #[cfg(target_has_atomic = "32")] atomic_int! { - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "u32", "../../../std/primitive.u32.html", "#![feature(integer_atomics)]\n\n", atomic_umin, atomic_umax, 4, + "AtomicU32::new(0)", u32 AtomicU32 ATOMIC_U32_INIT } #[cfg(target_has_atomic = "64")] atomic_int! { - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "i64", "../../../std/primitive.i64.html", "#![feature(integer_atomics)]\n\n", atomic_min, atomic_max, 8, + "AtomicI64::new(0)", i64 AtomicI64 ATOMIC_I64_INIT } #[cfg(target_has_atomic = "64")] atomic_int! { - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), - unstable(feature = "integer_atomics", issue = "32976"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), + stable(feature = "integer_atomics_stable", since = "1.34.0"), unstable(feature = "integer_atomics", issue = "32976"), "u64", "../../../std/primitive.u64.html", "#![feature(integer_atomics)]\n\n", atomic_umin, atomic_umax, 8, + "AtomicU64::new(0)", u64 AtomicU64 ATOMIC_U64_INIT } #[cfg(target_has_atomic = "128")] @@ -1961,10 +2025,12 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), "i128", "../../../std/primitive.i128.html", "#![feature(integer_atomics)]\n\n", atomic_min, atomic_max, 16, + "AtomicI128::new(0)", i128 AtomicI128 ATOMIC_I128_INIT } #[cfg(target_has_atomic = "128")] @@ -1975,10 +2041,12 @@ atomic_int! { unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), unstable(feature = "integer_atomics", issue = "32976"), + unstable(feature = "integer_atomics", issue = "32976"), "u128", "../../../std/primitive.u128.html", "#![feature(integer_atomics)]\n\n", atomic_umin, atomic_umax, 16, + "AtomicU128::new(0)", u128 AtomicU128 ATOMIC_U128_INIT } #[cfg(target_pointer_width = "16")] @@ -2001,10 +2069,12 @@ atomic_int!{ stable(feature = "atomic_access", since = "1.15.0"), stable(feature = "atomic_from", since = "1.23.0"), stable(feature = "atomic_nand", since = "1.27.0"), + stable(feature = "rust1", since = "1.0.0"), "isize", "../../../std/primitive.isize.html", "", atomic_min, atomic_max, ptr_width!(), + "AtomicIsize::new(0)", isize AtomicIsize ATOMIC_ISIZE_INIT } #[cfg(target_has_atomic = "ptr")] @@ -2015,10 +2085,12 @@ atomic_int!{ stable(feature = "atomic_access", since = "1.15.0"), stable(feature = "atomic_from", since = "1.23.0"), stable(feature = "atomic_nand", since = "1.27.0"), + stable(feature = "rust1", since = "1.0.0"), "usize", "../../../std/primitive.usize.html", "", atomic_umin, atomic_umax, ptr_width!(), + "AtomicUsize::new(0)", usize AtomicUsize ATOMIC_USIZE_INIT } @@ -2388,12 +2460,11 @@ pub fn fence(order: Ordering) { /// /// ``` /// use std::sync::atomic::{AtomicBool, AtomicUsize}; -/// use std::sync::atomic::{ATOMIC_BOOL_INIT, ATOMIC_USIZE_INIT}; /// use std::sync::atomic::Ordering; /// use std::sync::atomic::compiler_fence; /// -/// static IMPORTANT_VARIABLE: AtomicUsize = ATOMIC_USIZE_INIT; -/// static IS_READY: AtomicBool = ATOMIC_BOOL_INIT; +/// static IMPORTANT_VARIABLE: AtomicUsize = AtomicUsize::new(0); +/// static IS_READY: AtomicBool = AtomicBool::new(false); /// /// fn main() { /// IMPORTANT_VARIABLE.store(42, Ordering::Relaxed); diff --git a/src/libcore/sync/mod.rs b/src/libcore/sync/mod.rs index 0080e0b5e4353..b635bae0a47b4 100644 --- a/src/libcore/sync/mod.rs +++ b/src/libcore/sync/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Synchronization primitives #![stable(feature = "rust1", since = "1.0.0")] diff --git a/src/libcore/task/mod.rs b/src/libcore/task/mod.rs index 95c9cca292f82..29bae69ea83c1 100644 --- a/src/libcore/task/mod.rs +++ b/src/libcore/task/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![unstable(feature = "futures_api", reason = "futures in libcore are unstable", issue = "50547")] @@ -18,4 +8,4 @@ mod poll; pub use self::poll::Poll; mod wake; -pub use self::wake::{Waker, LocalWaker, UnsafeWake}; +pub use self::wake::{Context, Waker, RawWaker, RawWakerVTable}; diff --git a/src/libcore/task/poll.rs b/src/libcore/task/poll.rs index fb027efc6dca0..c811f96ace3ba 100644 --- a/src/libcore/task/poll.rs +++ b/src/libcore/task/poll.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![unstable(feature = "futures_api", reason = "futures in libcore are unstable", issue = "50547")] @@ -17,6 +7,7 @@ use result::Result; /// Indicates whether a value is available or if the current task has been /// scheduled to receive a wakeup instead. +#[must_use = "this `Poll` may be a `Pending` variant, which should be handled"] #[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)] pub enum Poll { /// Represents that a value is immediately ready. @@ -31,7 +22,7 @@ pub enum Poll { } impl Poll { - /// Change the ready value of this `Poll` with the closure provided + /// Changes the ready value of this `Poll` with the closure provided. pub fn map(self, f: F) -> Poll where F: FnOnce(T) -> U { @@ -41,7 +32,7 @@ impl Poll { } } - /// Returns whether this is `Poll::Ready` + /// Returns `true` if this is `Poll::Ready` #[inline] pub fn is_ready(&self) -> bool { match *self { @@ -50,7 +41,7 @@ impl Poll { } } - /// Returns whether this is `Poll::Pending` + /// Returns `true` if this is `Poll::Pending` #[inline] pub fn is_pending(&self) -> bool { !self.is_ready() @@ -58,7 +49,7 @@ impl Poll { } impl Poll> { - /// Change the success value of this `Poll` with the closure provided + /// Changes the success value of this `Poll` with the closure provided. pub fn map_ok(self, f: F) -> Poll> where F: FnOnce(T) -> U { @@ -69,7 +60,7 @@ impl Poll> { } } - /// Change the error value of this `Poll` with the closure provided + /// Changes the error value of this `Poll` with the closure provided. pub fn map_err(self, f: F) -> Poll> where F: FnOnce(E) -> U { diff --git a/src/libcore/task/wake.rs b/src/libcore/task/wake.rs index 8ea7abce67bfa..006cbbb6ce6bd 100644 --- a/src/libcore/task/wake.rs +++ b/src/libcore/task/wake.rs @@ -1,294 +1,293 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![unstable(feature = "futures_api", reason = "futures in libcore are unstable", issue = "50547")] use fmt; -use marker::Unpin; -use ptr::NonNull; +use marker::{PhantomData, Unpin}; -/// A `Waker` is a handle for waking up a task by notifying its executor that it -/// is ready to be run. +/// A `RawWaker` allows the implementor of a task executor to create a [`Waker`] +/// which provides customized wakeup behavior. /// -/// This handle contains a trait object pointing to an instance of the `UnsafeWake` -/// trait, allowing notifications to get routed through it. -#[repr(transparent)] -pub struct Waker { - inner: NonNull, +/// [vtable]: https://en.wikipedia.org/wiki/Virtual_method_table +/// +/// It consists of a data pointer and a [virtual function pointer table (vtable)][vtable] that +/// customizes the behavior of the `RawWaker`. +#[derive(PartialEq, Debug)] +pub struct RawWaker { + /// A data pointer, which can be used to store arbitrary data as required + /// by the executor. This could be e.g. a type-erased pointer to an `Arc` + /// that is associated with the task. + /// The value of this field gets passed to all functions that are part of + /// the vtable as the first parameter. + data: *const (), + /// Virtual function pointer table that customizes the behavior of this waker. + vtable: &'static RawWakerVTable, } -impl Unpin for Waker {} -unsafe impl Send for Waker {} -unsafe impl Sync for Waker {} +impl RawWaker { + /// Creates a new `RawWaker` from the provided `data` pointer and `vtable`. + /// + /// The `data` pointer can be used to store arbitrary data as required + /// by the executor. This could be e.g. a type-erased pointer to an `Arc` + /// that is associated with the task. + /// The value of this poiner will get passed to all functions that are part + /// of the `vtable` as the first parameter. + /// + /// The `vtable` customizes the behavior of a `Waker` which gets created + /// from a `RawWaker`. For each operation on the `Waker`, the associated + /// function in the `vtable` of the underlying `RawWaker` will be called. + #[rustc_promotable] + #[unstable(feature = "futures_api", + reason = "futures in libcore are unstable", + issue = "50547")] + pub const fn new(data: *const (), vtable: &'static RawWakerVTable) -> RawWaker { + RawWaker { + data, + vtable, + } + } +} -impl Waker { - /// Constructs a new `Waker` directly. +/// A virtual function pointer table (vtable) that specifies the behavior +/// of a [`RawWaker`]. +/// +/// The pointer passed to all functions inside the vtable is the `data` pointer +/// from the enclosing [`RawWaker`] object. +/// +/// The functions inside this struct are only intended be called on the `data` +/// pointer of a properly constructed [`RawWaker`] object from inside the +/// [`RawWaker`] implementation. Calling one of the contained functions using +/// any other `data` pointer will cause undefined behavior. +#[derive(PartialEq, Copy, Clone, Debug)] +pub struct RawWakerVTable { + /// This function will be called when the [`RawWaker`] gets cloned, e.g. when + /// the [`Waker`] in which the [`RawWaker`] is stored gets cloned. + /// + /// The implementation of this function must retain all resources that are + /// required for this additional instance of a [`RawWaker`] and associated + /// task. Calling `wake` on the resulting [`RawWaker`] should result in a wakeup + /// of the same task that would have been awoken by the original [`RawWaker`]. + clone: unsafe fn(*const ()) -> RawWaker, + + /// This function will be called when `wake` is called on the [`Waker`]. + /// It must wake up the task associated with this [`RawWaker`]. /// - /// Note that most code will not need to call this. Implementers of the - /// `UnsafeWake` trait will typically provide a wrapper that calls this - /// but you otherwise shouldn't call it directly. + /// The implementation of this function must make sure to release any + /// resources that are associated with this instance of a [`RawWaker`] and + /// associated task. + wake: unsafe fn(*const ()), + + /// This function will be called when `wake_by_ref` is called on the [`Waker`]. + /// It must wake up the task associated with this [`RawWaker`]. /// - /// If you're working with the standard library then it's recommended to - /// use the `Waker::from` function instead which works with the safe - /// `Arc` type and the safe `Wake` trait. - #[inline] - pub unsafe fn new(inner: NonNull) -> Self { - Waker { inner } - } + /// This function is similar to `wake`, but must not consume the provided data + /// pointer. + wake_by_ref: unsafe fn(*const ()), - /// Wake up the task associated with this `Waker`. - #[inline] - pub fn wake(&self) { - unsafe { self.inner.as_ref().wake() } - } + /// This function gets called when a [`RawWaker`] gets dropped. + /// + /// The implementation of this function must make sure to release any + /// resources that are associated with this instance of a [`RawWaker`] and + /// associated task. + drop: unsafe fn(*const ()), +} - /// Returns whether or not this `Waker` and `other` awaken the same task. +impl RawWakerVTable { + /// Creates a new `RawWakerVTable` from the provided `clone`, `wake`, + /// `wake_by_ref`, and `drop` functions. /// - /// This function works on a best-effort basis, and may return false even - /// when the `Waker`s would awaken the same task. However, if this function - /// returns true, it is guaranteed that the `Waker`s will awaken the same - /// task. + /// # `clone` /// - /// This function is primarily used for optimization purposes. - #[inline] - pub fn will_wake(&self, other: &Waker) -> bool { - self.inner == other.inner - } - - /// Returns whether or not this `Waker` and `other` `LocalWaker` awaken - /// the same task. + /// This function will be called when the [`RawWaker`] gets cloned, e.g. when + /// the [`Waker`] in which the [`RawWaker`] is stored gets cloned. /// - /// This function works on a best-effort basis, and may return false even - /// when the `Waker`s would awaken the same task. However, if this function - /// returns true, it is guaranteed that the `Waker`s will awaken the same - /// task. + /// The implementation of this function must retain all resources that are + /// required for this additional instance of a [`RawWaker`] and associated + /// task. Calling `wake` on the resulting [`RawWaker`] should result in a wakeup + /// of the same task that would have been awoken by the original [`RawWaker`]. /// - /// This function is primarily used for optimization purposes. - #[inline] - pub fn will_wake_local(&self, other: &LocalWaker) -> bool { - self.will_wake(&other.0) + /// # `wake` + /// + /// This function will be called when `wake` is called on the [`Waker`]. + /// It must wake up the task associated with this [`RawWaker`]. + /// + /// The implementation of this function must make sure to release any + /// resources that are associated with this instance of a [`RawWaker`] and + /// associated task. + /// + /// # `wake_by_ref` + /// + /// This function will be called when `wake_by_ref` is called on the [`Waker`]. + /// It must wake up the task associated with this [`RawWaker`]. + /// + /// This function is similar to `wake`, but must not consume the provided data + /// pointer. + /// + /// # `drop` + /// + /// This function gets called when a [`RawWaker`] gets dropped. + /// + /// The implementation of this function must make sure to release any + /// resources that are associated with this instance of a [`RawWaker`] and + /// associated task. + #[rustc_promotable] + #[unstable(feature = "futures_api", + reason = "futures in libcore are unstable", + issue = "50547")] + pub const fn new( + clone: unsafe fn(*const ()) -> RawWaker, + wake: unsafe fn(*const ()), + wake_by_ref: unsafe fn(*const ()), + drop: unsafe fn(*const ()), + ) -> Self { + Self { + clone, + wake, + wake_by_ref, + drop, + } } } -impl Clone for Waker { +/// The `Context` of an asynchronous task. +/// +/// Currently, `Context` only serves to provide access to a `&Waker` +/// which can be used to wake the current task. +pub struct Context<'a> { + waker: &'a Waker, + // Ensure we future-proof against variance changes by forcing + // the lifetime to be invariant (argument-position lifetimes + // are contravariant while return-position lifetimes are + // covariant). + _marker: PhantomData &'a ()>, +} + +impl<'a> Context<'a> { + /// Create a new `Context` from a `&Waker`. #[inline] - fn clone(&self) -> Self { - unsafe { - self.inner.as_ref().clone_raw() + pub fn from_waker(waker: &'a Waker) -> Self { + Context { + waker, + _marker: PhantomData, } } -} -impl fmt::Debug for Waker { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Waker") - .finish() + /// Returns a reference to the `Waker` for the current task. + #[inline] + pub fn waker(&self) -> &'a Waker { + &self.waker } } -impl Drop for Waker { - #[inline] - fn drop(&mut self) { - unsafe { - self.inner.as_ref().drop_raw() - } +impl fmt::Debug for Context<'_> { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Context") + .field("waker", &self.waker) + .finish() } } -/// A `LocalWaker` is a handle for waking up a task by notifying its executor that it +/// A `Waker` is a handle for waking up a task by notifying its executor that it /// is ready to be run. /// -/// This is similar to the `Waker` type, but cannot be sent across threads. -/// Task executors can use this type to implement more optimized single-threaded wakeup -/// behavior. +/// This handle encapsulates a [`RawWaker`] instance, which defines the +/// executor-specific wakeup behavior. +/// +/// Implements [`Clone`], [`Send`], and [`Sync`]. #[repr(transparent)] -#[derive(Clone)] -pub struct LocalWaker(Waker); +pub struct Waker { + waker: RawWaker, +} -impl Unpin for LocalWaker {} -impl !Send for LocalWaker {} -impl !Sync for LocalWaker {} +impl Unpin for Waker {} +unsafe impl Send for Waker {} +unsafe impl Sync for Waker {} -impl LocalWaker { - /// Constructs a new `LocalWaker` directly. - /// - /// Note that most code will not need to call this. Implementers of the - /// `UnsafeWake` trait will typically provide a wrapper that calls this - /// but you otherwise shouldn't call it directly. - /// - /// If you're working with the standard library then it's recommended to - /// use the `local_waker_from_nonlocal` or `local_waker` to convert a `Waker` - /// into a `LocalWaker`. - /// - /// For this function to be used safely, it must be sound to call `inner.wake_local()` - /// on the current thread. +impl Waker { + /// Wake up the task associated with this `Waker`. #[inline] - pub unsafe fn new(inner: NonNull) -> Self { - LocalWaker(Waker::new(inner)) - } + pub fn wake(self) { + // The actual wakeup call is delegated through a virtual function call + // to the implementation which is defined by the executor. + let wake = self.waker.vtable.wake; + let data = self.waker.data; - /// Borrows this `LocalWaker` as a `Waker`. - /// - /// `Waker` is nearly identical to `LocalWaker`, but is threadsafe - /// (implements `Send` and `Sync`). - #[inline] - pub fn as_waker(&self) -> &Waker { - &self.0 + // Don't call `drop` -- the waker will be consumed by `wake`. + crate::mem::forget(self); + + // SAFETY: This is safe because `Waker::from_raw` is the only way + // to initialize `wake` and `data` requiring the user to acknowledge + // that the contract of `RawWaker` is upheld. + unsafe { (wake)(data) }; } - /// Converts this `LocalWaker` into a `Waker`. + /// Wake up the task associated with this `Waker` without consuming the `Waker`. /// - /// `Waker` is nearly identical to `LocalWaker`, but is threadsafe - /// (implements `Send` and `Sync`). + /// This is similar to `wake`, but may be slightly less efficient in the case + /// where an owned `Waker` is available. This method should be preferred to + /// calling `waker.clone().wake()`. #[inline] - pub fn into_waker(self) -> Waker { - self.0 - } + pub fn wake_by_ref(&self) { + // The actual wakeup call is delegated through a virtual function call + // to the implementation which is defined by the executor. - /// Wake up the task associated with this `LocalWaker`. - #[inline] - pub fn wake(&self) { - unsafe { self.0.inner.as_ref().wake_local() } + // SAFETY: see `wake` + unsafe { (self.waker.vtable.wake_by_ref)(self.waker.data) } } - /// Returns whether or not this `LocalWaker` and `other` `LocalWaker` awaken the same task. + /// Returns `true` if this `Waker` and another `Waker` have awoken the same task. /// /// This function works on a best-effort basis, and may return false even - /// when the `LocalWaker`s would awaken the same task. However, if this function - /// returns true, it is guaranteed that the `LocalWaker`s will awaken the same - /// task. + /// when the `Waker`s would awaken the same task. However, if this function + /// returns `true`, it is guaranteed that the `Waker`s will awaken the same task. /// /// This function is primarily used for optimization purposes. #[inline] - pub fn will_wake(&self, other: &LocalWaker) -> bool { - self.0.will_wake(&other.0) + pub fn will_wake(&self, other: &Waker) -> bool { + self.waker == other.waker } - /// Returns whether or not this `LocalWaker` and `other` `Waker` awaken the same task. - /// - /// This function works on a best-effort basis, and may return false even - /// when the `Waker`s would awaken the same task. However, if this function - /// returns true, it is guaranteed that the `LocalWaker`s will awaken the same - /// task. + /// Creates a new `Waker` from [`RawWaker`]. /// - /// This function is primarily used for optimization purposes. + /// The behavior of the returned `Waker` is undefined if the contract defined + /// in [`RawWaker`]'s and [`RawWakerVTable`]'s documentation is not upheld. + /// Therefore this method is unsafe. #[inline] - pub fn will_wake_nonlocal(&self, other: &Waker) -> bool { - self.0.will_wake(other) + pub unsafe fn from_raw(waker: RawWaker) -> Waker { + Waker { + waker, + } } } -impl From for Waker { - /// Converts a `LocalWaker` into a `Waker`. - /// - /// This conversion turns a `!Sync` `LocalWaker` into a `Sync` `Waker`, allowing a wakeup - /// object to be sent to another thread, but giving up its ability to do specialized - /// thread-local wakeup behavior. +impl Clone for Waker { #[inline] - fn from(local_waker: LocalWaker) -> Self { - local_waker.0 + fn clone(&self) -> Self { + Waker { + // SAFETY: This is safe because `Waker::from_raw` is the only way + // to initialize `clone` and `data` requiring the user to acknowledge + // that the contract of [`RawWaker`] is upheld. + waker: unsafe { (self.waker.vtable.clone)(self.waker.data) }, + } } } -impl fmt::Debug for LocalWaker { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("LocalWaker") - .finish() +impl Drop for Waker { + #[inline] + fn drop(&mut self) { + // SAFETY: This is safe because `Waker::from_raw` is the only way + // to initialize `drop` and `data` requiring the user to acknowledge + // that the contract of `RawWaker` is upheld. + unsafe { (self.waker.vtable.drop)(self.waker.data) } } } -/// An unsafe trait for implementing custom memory management for a `Waker` or `LocalWaker`. -/// -/// A `Waker` conceptually is a cloneable trait object for `Wake`, and is -/// most often essentially just `Arc`. However, in some contexts -/// (particularly `no_std`), it's desirable to avoid `Arc` in favor of some -/// custom memory management strategy. This trait is designed to allow for such -/// customization. -/// -/// When using `std`, a default implementation of the `UnsafeWake` trait is provided for -/// `Arc` where `T: Wake`. -pub unsafe trait UnsafeWake: Send + Sync { - /// Creates a clone of this `UnsafeWake` and stores it behind a `Waker`. - /// - /// This function will create a new uniquely owned handle that under the - /// hood references the same notification instance. In other words calls - /// to `wake` on the returned handle should be equivalent to calls to - /// `wake` on this handle. - /// - /// # Unsafety - /// - /// This function is unsafe to call because it's asserting the `UnsafeWake` - /// value is in a consistent state, i.e., hasn't been dropped. - unsafe fn clone_raw(&self) -> Waker; - - /// Drops this instance of `UnsafeWake`, deallocating resources - /// associated with it. - /// - /// FIXME(cramertj) - /// This method is intended to have a signature such as: - /// - /// ```ignore (not-a-doctest) - /// fn drop_raw(self: *mut Self); - /// ``` - /// - /// Unfortunately in Rust today that signature is not object safe. - /// Nevertheless it's recommended to implement this function *as if* that - /// were its signature. As such it is not safe to call on an invalid - /// pointer, nor is the validity of the pointer guaranteed after this - /// function returns. - /// - /// # Unsafety - /// - /// This function is unsafe to call because it's asserting the `UnsafeWake` - /// value is in a consistent state, i.e., hasn't been dropped. - unsafe fn drop_raw(&self); - - /// Indicates that the associated task is ready to make progress and should - /// be `poll`ed. - /// - /// Executors generally maintain a queue of "ready" tasks; `wake` should place - /// the associated task onto this queue. - /// - /// # Panics - /// - /// Implementations should avoid panicking, but clients should also be prepared - /// for panics. - /// - /// # Unsafety - /// - /// This function is unsafe to call because it's asserting the `UnsafeWake` - /// value is in a consistent state, i.e., hasn't been dropped. - unsafe fn wake(&self); - - /// Indicates that the associated task is ready to make progress and should - /// be `poll`ed. This function is the same as `wake`, but can only be called - /// from the thread that this `UnsafeWake` is "local" to. This allows for - /// implementors to provide specialized wakeup behavior specific to the current - /// thread. This function is called by `LocalWaker::wake`. - /// - /// Executors generally maintain a queue of "ready" tasks; `wake_local` should place - /// the associated task onto this queue. - /// - /// # Panics - /// - /// Implementations should avoid panicking, but clients should also be prepared - /// for panics. - /// - /// # Unsafety - /// - /// This function is unsafe to call because it's asserting the `UnsafeWake` - /// value is in a consistent state, i.e., hasn't been dropped, and that the - /// `UnsafeWake` hasn't moved from the thread on which it was created. - unsafe fn wake_local(&self) { - self.wake() +impl fmt::Debug for Waker { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let vtable_ptr = self.waker.vtable as *const RawWakerVTable; + f.debug_struct("Waker") + .field("data", &self.waker.data) + .field("vtable", &vtable_ptr) + .finish() } } diff --git a/src/libcore/tests/any.rs b/src/libcore/tests/any.rs index a80bf93953039..62bebcb03c96a 100644 --- a/src/libcore/tests/any.rs +++ b/src/libcore/tests/any.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::any::*; #[derive(PartialEq, Debug)] diff --git a/src/libcore/tests/array.rs b/src/libcore/tests/array.rs index 6278d5e23e0d6..9e133ac568811 100644 --- a/src/libcore/tests/array.rs +++ b/src/libcore/tests/array.rs @@ -1,12 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. use core::array::FixedSizeArray; use core::convert::TryFrom; diff --git a/src/libcore/tests/ascii.rs b/src/libcore/tests/ascii.rs index 950222dbcfa3f..ec98e0464c9e6 100644 --- a/src/libcore/tests/ascii.rs +++ b/src/libcore/tests/ascii.rs @@ -1,13 +1,3 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::char::from_u32; #[test] diff --git a/src/libcore/tests/atomic.rs b/src/libcore/tests/atomic.rs index a3667b3f3fee0..05fe8460f324e 100644 --- a/src/libcore/tests/atomic.rs +++ b/src/libcore/tests/atomic.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::sync::atomic::*; use core::sync::atomic::Ordering::SeqCst; diff --git a/src/libcore/tests/cell.rs b/src/libcore/tests/cell.rs index 4b7243b9cfc79..7bd7d1874101a 100644 --- a/src/libcore/tests/cell.rs +++ b/src/libcore/tests/cell.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::cell::*; use core::default::Default; use std::mem::drop; @@ -15,15 +5,15 @@ use std::mem::drop; #[test] fn smoketest_cell() { let x = Cell::new(10); - assert!(x == Cell::new(10)); - assert!(x.get() == 10); + assert_eq!(x, Cell::new(10)); + assert_eq!(x.get(), 10); x.set(20); - assert!(x == Cell::new(20)); - assert!(x.get() == 20); + assert_eq!(x, Cell::new(20)); + assert_eq!(x.get(), 20); let y = Cell::new((30, 40)); - assert!(y == Cell::new((30, 40))); - assert!(y.get() == (30, 40)); + assert_eq!(y, Cell::new((30, 40))); + assert_eq!(y.get(), (30, 40)); } #[test] diff --git a/src/libcore/tests/char.rs b/src/libcore/tests/char.rs index 3d99c8ea9e229..57e9f4e384e0f 100644 --- a/src/libcore/tests/char.rs +++ b/src/libcore/tests/char.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::{char,str}; use std::convert::TryFrom; use std::str::FromStr; @@ -86,6 +76,8 @@ fn test_to_digit() { #[test] fn test_to_lowercase() { fn lower(c: char) -> String { + let to_lowercase = c.to_lowercase(); + assert_eq!(to_lowercase.len(), to_lowercase.count()); let iter: String = c.to_lowercase().collect(); let disp: String = c.to_lowercase().to_string(); assert_eq!(iter, disp); @@ -111,6 +103,8 @@ fn test_to_lowercase() { #[test] fn test_to_uppercase() { fn upper(c: char) -> String { + let to_uppercase = c.to_uppercase(); + assert_eq!(to_uppercase.len(), to_uppercase.count()); let iter: String = c.to_uppercase().collect(); let disp: String = c.to_uppercase().to_string(); assert_eq!(iter, disp); diff --git a/src/libcore/tests/clone.rs b/src/libcore/tests/clone.rs index 91d68ba33447a..c97a87aebce41 100644 --- a/src/libcore/tests/clone.rs +++ b/src/libcore/tests/clone.rs @@ -1,13 +1,3 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #[test] fn test_borrowed_clone() { let x = 5; diff --git a/src/libcore/tests/cmp.rs b/src/libcore/tests/cmp.rs index 8c5179f59932b..4e624e5eb126e 100644 --- a/src/libcore/tests/cmp.rs +++ b/src/libcore/tests/cmp.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::cmp::Ordering::{Less, Greater, Equal}; #[test] diff --git a/src/libcore/tests/fmt/builders.rs b/src/libcore/tests/fmt/builders.rs index b7233658e9361..e4b75fe1fa265 100644 --- a/src/libcore/tests/fmt/builders.rs +++ b/src/libcore/tests/fmt/builders.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod debug_struct { use std::fmt; @@ -40,7 +30,7 @@ mod debug_struct { assert_eq!("Foo { bar: true }", format!("{:?}", Foo)); assert_eq!( "Foo { - bar: true + bar: true, }", format!("{:#?}", Foo)); } @@ -62,7 +52,7 @@ mod debug_struct { assert_eq!( "Foo { bar: true, - baz: 10/20 + baz: 10/20, }", format!("{:#?}", Foo)); } @@ -97,9 +87,9 @@ mod debug_struct { "Bar { foo: Foo { bar: true, - baz: 10/20 + baz: 10/20, }, - hello: \"world\" + hello: \"world\", }", format!("{:#?}", Bar)); } @@ -137,7 +127,7 @@ mod debug_tuple { assert_eq!("Foo(true)", format!("{:?}", Foo)); assert_eq!( "Foo( - true + true, )", format!("{:#?}", Foo)); } @@ -159,7 +149,7 @@ mod debug_tuple { assert_eq!( "Foo( true, - 10/20 + 10/20, )", format!("{:#?}", Foo)); } @@ -194,9 +184,9 @@ mod debug_tuple { "Bar( Foo( true, - 10/20 + 10/20, ), - \"world\" + \"world\", )", format!("{:#?}", Bar)); } @@ -234,7 +224,7 @@ mod debug_map { assert_eq!("{\"bar\": true}", format!("{:?}", Foo)); assert_eq!( "{ - \"bar\": true + \"bar\": true, }", format!("{:#?}", Foo)); } @@ -256,7 +246,7 @@ mod debug_map { assert_eq!( "{ \"bar\": true, - 10: 10/20 + 10: 10/20, }", format!("{:#?}", Foo)); } @@ -292,12 +282,12 @@ mod debug_map { "{ \"foo\": { \"bar\": true, - 10: 10/20 + 10: 10/20, }, { \"bar\": true, - 10: 10/20 - }: \"world\" + 10: 10/20, + }: \"world\", }", format!("{:#?}", Bar)); } @@ -335,7 +325,7 @@ mod debug_set { assert_eq!("{true}", format!("{:?}", Foo)); assert_eq!( "{ - true + true, }", format!("{:#?}", Foo)); } @@ -357,7 +347,7 @@ mod debug_set { assert_eq!( "{ true, - 10/20 + 10/20, }", format!("{:#?}", Foo)); } @@ -392,9 +382,9 @@ mod debug_set { "{ { true, - 10/20 + 10/20, }, - \"world\" + \"world\", }", format!("{:#?}", Bar)); } @@ -432,7 +422,7 @@ mod debug_list { assert_eq!("[true]", format!("{:?}", Foo)); assert_eq!( "[ - true + true, ]", format!("{:#?}", Foo)); } @@ -454,7 +444,7 @@ mod debug_list { assert_eq!( "[ true, - 10/20 + 10/20, ]", format!("{:#?}", Foo)); } @@ -489,9 +479,9 @@ mod debug_list { "[ [ true, - 10/20 + 10/20, ], - \"world\" + \"world\", ]", format!("{:#?}", Bar)); } @@ -523,31 +513,31 @@ fn test_formatting_parameters_are_forwarded() { assert_eq!(format!("{:#03?}", struct_), " Foo { bar: 1024, - baz: 007 + baz: 007, } ".trim()); assert_eq!(format!("{:#03?}", tuple), " ( 1024, - 007 + 007, ) ".trim()); assert_eq!(format!("{:#03?}", list), " [ 1024, - 007 + 007, ] ".trim()); assert_eq!(format!("{:#03?}", map), r#" { "bar": 1024, - "baz": 007 + "baz": 007, } "#.trim()); assert_eq!(format!("{:#03?}", set), " { 007, - 1024 + 1024, } ".trim()); } diff --git a/src/libcore/tests/fmt/float.rs b/src/libcore/tests/fmt/float.rs index 138c3970e9087..bd0daf7a8eb84 100644 --- a/src/libcore/tests/fmt/float.rs +++ b/src/libcore/tests/fmt/float.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #[test] fn test_format_f64() { assert_eq!("1", format!("{:.0}", 1.0f64)); diff --git a/src/libcore/tests/fmt/mod.rs b/src/libcore/tests/fmt/mod.rs index 5d204c7d523d6..df1deeaeb97b7 100644 --- a/src/libcore/tests/fmt/mod.rs +++ b/src/libcore/tests/fmt/mod.rs @@ -1,18 +1,9 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod builders; mod float; mod num; #[test] +#[cfg(not(miri))] // Miri cannot print pointers fn test_format_flags() { // No residual flags left by pointer formatting let p = "".as_ptr(); @@ -22,6 +13,7 @@ fn test_format_flags() { } #[test] +#[cfg(not(miri))] // Miri cannot print pointers fn test_pointer_formats_data_pointer() { let b: &[u8] = b""; let s: &str = ""; diff --git a/src/libcore/tests/fmt/num.rs b/src/libcore/tests/fmt/num.rs index bc205ec0582ea..10fcf8b76ccb4 100644 --- a/src/libcore/tests/fmt/num.rs +++ b/src/libcore/tests/fmt/num.rs @@ -1,158 +1,148 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #[test] fn test_format_int() { // Formatting integers should select the right implementation based off // the type of the argument. Also, hex/octal/binary should be defined // for integers, but they shouldn't emit the negative sign. - assert!(format!("{}", 1isize) == "1"); - assert!(format!("{}", 1i8) == "1"); - assert!(format!("{}", 1i16) == "1"); - assert!(format!("{}", 1i32) == "1"); - assert!(format!("{}", 1i64) == "1"); - assert!(format!("{}", -1isize) == "-1"); - assert!(format!("{}", -1i8) == "-1"); - assert!(format!("{}", -1i16) == "-1"); - assert!(format!("{}", -1i32) == "-1"); - assert!(format!("{}", -1i64) == "-1"); - assert!(format!("{:?}", 1isize) == "1"); - assert!(format!("{:?}", 1i8) == "1"); - assert!(format!("{:?}", 1i16) == "1"); - assert!(format!("{:?}", 1i32) == "1"); - assert!(format!("{:?}", 1i64) == "1"); - assert!(format!("{:b}", 1isize) == "1"); - assert!(format!("{:b}", 1i8) == "1"); - assert!(format!("{:b}", 1i16) == "1"); - assert!(format!("{:b}", 1i32) == "1"); - assert!(format!("{:b}", 1i64) == "1"); - assert!(format!("{:x}", 1isize) == "1"); - assert!(format!("{:x}", 1i8) == "1"); - assert!(format!("{:x}", 1i16) == "1"); - assert!(format!("{:x}", 1i32) == "1"); - assert!(format!("{:x}", 1i64) == "1"); - assert!(format!("{:X}", 1isize) == "1"); - assert!(format!("{:X}", 1i8) == "1"); - assert!(format!("{:X}", 1i16) == "1"); - assert!(format!("{:X}", 1i32) == "1"); - assert!(format!("{:X}", 1i64) == "1"); - assert!(format!("{:o}", 1isize) == "1"); - assert!(format!("{:o}", 1i8) == "1"); - assert!(format!("{:o}", 1i16) == "1"); - assert!(format!("{:o}", 1i32) == "1"); - assert!(format!("{:o}", 1i64) == "1"); + assert_eq!(format!("{}", 1isize), "1"); + assert_eq!(format!("{}", 1i8), "1"); + assert_eq!(format!("{}", 1i16), "1"); + assert_eq!(format!("{}", 1i32), "1"); + assert_eq!(format!("{}", 1i64), "1"); + assert_eq!(format!("{}", -1isize), "-1"); + assert_eq!(format!("{}", -1i8), "-1"); + assert_eq!(format!("{}", -1i16), "-1"); + assert_eq!(format!("{}", -1i32), "-1"); + assert_eq!(format!("{}", -1i64), "-1"); + assert_eq!(format!("{:?}", 1isize), "1"); + assert_eq!(format!("{:?}", 1i8), "1"); + assert_eq!(format!("{:?}", 1i16), "1"); + assert_eq!(format!("{:?}", 1i32), "1"); + assert_eq!(format!("{:?}", 1i64), "1"); + assert_eq!(format!("{:b}", 1isize), "1"); + assert_eq!(format!("{:b}", 1i8), "1"); + assert_eq!(format!("{:b}", 1i16), "1"); + assert_eq!(format!("{:b}", 1i32), "1"); + assert_eq!(format!("{:b}", 1i64), "1"); + assert_eq!(format!("{:x}", 1isize), "1"); + assert_eq!(format!("{:x}", 1i8), "1"); + assert_eq!(format!("{:x}", 1i16), "1"); + assert_eq!(format!("{:x}", 1i32), "1"); + assert_eq!(format!("{:x}", 1i64), "1"); + assert_eq!(format!("{:X}", 1isize), "1"); + assert_eq!(format!("{:X}", 1i8), "1"); + assert_eq!(format!("{:X}", 1i16), "1"); + assert_eq!(format!("{:X}", 1i32), "1"); + assert_eq!(format!("{:X}", 1i64), "1"); + assert_eq!(format!("{:o}", 1isize), "1"); + assert_eq!(format!("{:o}", 1i8), "1"); + assert_eq!(format!("{:o}", 1i16), "1"); + assert_eq!(format!("{:o}", 1i32), "1"); + assert_eq!(format!("{:o}", 1i64), "1"); - assert!(format!("{}", 1usize) == "1"); - assert!(format!("{}", 1u8) == "1"); - assert!(format!("{}", 1u16) == "1"); - assert!(format!("{}", 1u32) == "1"); - assert!(format!("{}", 1u64) == "1"); - assert!(format!("{:?}", 1usize) == "1"); - assert!(format!("{:?}", 1u8) == "1"); - assert!(format!("{:?}", 1u16) == "1"); - assert!(format!("{:?}", 1u32) == "1"); - assert!(format!("{:?}", 1u64) == "1"); - assert!(format!("{:b}", 1usize) == "1"); - assert!(format!("{:b}", 1u8) == "1"); - assert!(format!("{:b}", 1u16) == "1"); - assert!(format!("{:b}", 1u32) == "1"); - assert!(format!("{:b}", 1u64) == "1"); - assert!(format!("{:x}", 1usize) == "1"); - assert!(format!("{:x}", 1u8) == "1"); - assert!(format!("{:x}", 1u16) == "1"); - assert!(format!("{:x}", 1u32) == "1"); - assert!(format!("{:x}", 1u64) == "1"); - assert!(format!("{:X}", 1usize) == "1"); - assert!(format!("{:X}", 1u8) == "1"); - assert!(format!("{:X}", 1u16) == "1"); - assert!(format!("{:X}", 1u32) == "1"); - assert!(format!("{:X}", 1u64) == "1"); - assert!(format!("{:o}", 1usize) == "1"); - assert!(format!("{:o}", 1u8) == "1"); - assert!(format!("{:o}", 1u16) == "1"); - assert!(format!("{:o}", 1u32) == "1"); - assert!(format!("{:o}", 1u64) == "1"); + assert_eq!(format!("{}", 1usize), "1"); + assert_eq!(format!("{}", 1u8), "1"); + assert_eq!(format!("{}", 1u16), "1"); + assert_eq!(format!("{}", 1u32), "1"); + assert_eq!(format!("{}", 1u64), "1"); + assert_eq!(format!("{:?}", 1usize), "1"); + assert_eq!(format!("{:?}", 1u8), "1"); + assert_eq!(format!("{:?}", 1u16), "1"); + assert_eq!(format!("{:?}", 1u32), "1"); + assert_eq!(format!("{:?}", 1u64), "1"); + assert_eq!(format!("{:b}", 1usize), "1"); + assert_eq!(format!("{:b}", 1u8), "1"); + assert_eq!(format!("{:b}", 1u16), "1"); + assert_eq!(format!("{:b}", 1u32), "1"); + assert_eq!(format!("{:b}", 1u64), "1"); + assert_eq!(format!("{:x}", 1usize), "1"); + assert_eq!(format!("{:x}", 1u8), "1"); + assert_eq!(format!("{:x}", 1u16), "1"); + assert_eq!(format!("{:x}", 1u32), "1"); + assert_eq!(format!("{:x}", 1u64), "1"); + assert_eq!(format!("{:X}", 1usize), "1"); + assert_eq!(format!("{:X}", 1u8), "1"); + assert_eq!(format!("{:X}", 1u16), "1"); + assert_eq!(format!("{:X}", 1u32), "1"); + assert_eq!(format!("{:X}", 1u64), "1"); + assert_eq!(format!("{:o}", 1usize), "1"); + assert_eq!(format!("{:o}", 1u8), "1"); + assert_eq!(format!("{:o}", 1u16), "1"); + assert_eq!(format!("{:o}", 1u32), "1"); + assert_eq!(format!("{:o}", 1u64), "1"); // Test a larger number - assert!(format!("{:b}", 55) == "110111"); - assert!(format!("{:o}", 55) == "67"); - assert!(format!("{}", 55) == "55"); - assert!(format!("{:x}", 55) == "37"); - assert!(format!("{:X}", 55) == "37"); + assert_eq!(format!("{:b}", 55), "110111"); + assert_eq!(format!("{:o}", 55), "67"); + assert_eq!(format!("{}", 55), "55"); + assert_eq!(format!("{:x}", 55), "37"); + assert_eq!(format!("{:X}", 55), "37"); } #[test] fn test_format_int_zero() { - assert!(format!("{}", 0) == "0"); - assert!(format!("{:?}", 0) == "0"); - assert!(format!("{:b}", 0) == "0"); - assert!(format!("{:o}", 0) == "0"); - assert!(format!("{:x}", 0) == "0"); - assert!(format!("{:X}", 0) == "0"); + assert_eq!(format!("{}", 0), "0"); + assert_eq!(format!("{:?}", 0), "0"); + assert_eq!(format!("{:b}", 0), "0"); + assert_eq!(format!("{:o}", 0), "0"); + assert_eq!(format!("{:x}", 0), "0"); + assert_eq!(format!("{:X}", 0), "0"); - assert!(format!("{}", 0u32) == "0"); - assert!(format!("{:?}", 0u32) == "0"); - assert!(format!("{:b}", 0u32) == "0"); - assert!(format!("{:o}", 0u32) == "0"); - assert!(format!("{:x}", 0u32) == "0"); - assert!(format!("{:X}", 0u32) == "0"); + assert_eq!(format!("{}", 0u32), "0"); + assert_eq!(format!("{:?}", 0u32), "0"); + assert_eq!(format!("{:b}", 0u32), "0"); + assert_eq!(format!("{:o}", 0u32), "0"); + assert_eq!(format!("{:x}", 0u32), "0"); + assert_eq!(format!("{:X}", 0u32), "0"); } #[test] fn test_format_int_flags() { - assert!(format!("{:3}", 1) == " 1"); - assert!(format!("{:>3}", 1) == " 1"); - assert!(format!("{:>+3}", 1) == " +1"); - assert!(format!("{:<3}", 1) == "1 "); - assert!(format!("{:#}", 1) == "1"); - assert!(format!("{:#x}", 10) == "0xa"); - assert!(format!("{:#X}", 10) == "0xA"); - assert!(format!("{:#5x}", 10) == " 0xa"); - assert!(format!("{:#o}", 10) == "0o12"); - assert!(format!("{:08x}", 10) == "0000000a"); - assert!(format!("{:8x}", 10) == " a"); - assert!(format!("{:<8x}", 10) == "a "); - assert!(format!("{:>8x}", 10) == " a"); - assert!(format!("{:#08x}", 10) == "0x00000a"); - assert!(format!("{:08}", -10) == "-0000010"); - assert!(format!("{:x}", !0u8) == "ff"); - assert!(format!("{:X}", !0u8) == "FF"); - assert!(format!("{:b}", !0u8) == "11111111"); - assert!(format!("{:o}", !0u8) == "377"); - assert!(format!("{:#x}", !0u8) == "0xff"); - assert!(format!("{:#X}", !0u8) == "0xFF"); - assert!(format!("{:#b}", !0u8) == "0b11111111"); - assert!(format!("{:#o}", !0u8) == "0o377"); + assert_eq!(format!("{:3}", 1), " 1"); + assert_eq!(format!("{:>3}", 1), " 1"); + assert_eq!(format!("{:>+3}", 1), " +1"); + assert_eq!(format!("{:<3}", 1), "1 "); + assert_eq!(format!("{:#}", 1), "1"); + assert_eq!(format!("{:#x}", 10), "0xa"); + assert_eq!(format!("{:#X}", 10), "0xA"); + assert_eq!(format!("{:#5x}", 10), " 0xa"); + assert_eq!(format!("{:#o}", 10), "0o12"); + assert_eq!(format!("{:08x}", 10), "0000000a"); + assert_eq!(format!("{:8x}", 10), " a"); + assert_eq!(format!("{:<8x}", 10), "a "); + assert_eq!(format!("{:>8x}", 10), " a"); + assert_eq!(format!("{:#08x}", 10), "0x00000a"); + assert_eq!(format!("{:08}", -10), "-0000010"); + assert_eq!(format!("{:x}", !0u8), "ff"); + assert_eq!(format!("{:X}", !0u8), "FF"); + assert_eq!(format!("{:b}", !0u8), "11111111"); + assert_eq!(format!("{:o}", !0u8), "377"); + assert_eq!(format!("{:#x}", !0u8), "0xff"); + assert_eq!(format!("{:#X}", !0u8), "0xFF"); + assert_eq!(format!("{:#b}", !0u8), "0b11111111"); + assert_eq!(format!("{:#o}", !0u8), "0o377"); } #[test] fn test_format_int_sign_padding() { - assert!(format!("{:+5}", 1) == " +1"); - assert!(format!("{:+5}", -1) == " -1"); - assert!(format!("{:05}", 1) == "00001"); - assert!(format!("{:05}", -1) == "-0001"); - assert!(format!("{:+05}", 1) == "+0001"); - assert!(format!("{:+05}", -1) == "-0001"); + assert_eq!(format!("{:+5}", 1), " +1"); + assert_eq!(format!("{:+5}", -1), " -1"); + assert_eq!(format!("{:05}", 1), "00001"); + assert_eq!(format!("{:05}", -1), "-0001"); + assert_eq!(format!("{:+05}", 1), "+0001"); + assert_eq!(format!("{:+05}", -1), "-0001"); } #[test] fn test_format_int_twos_complement() { - use core::{i8, i16, i32, i64}; - assert!(format!("{}", i8::MIN) == "-128"); - assert!(format!("{}", i16::MIN) == "-32768"); - assert!(format!("{}", i32::MIN) == "-2147483648"); - assert!(format!("{}", i64::MIN) == "-9223372036854775808"); + use core::{i16, i32, i64, i8}; + assert_eq!(format!("{}", i8::MIN), "-128"); + assert_eq!(format!("{}", i16::MIN), "-32768"); + assert_eq!(format!("{}", i32::MIN), "-2147483648"); + assert_eq!(format!("{}", i64::MIN), "-9223372036854775808"); } #[test] fn test_format_debug_hex() { - assert!(format!("{:02x?}", b"Foo\0") == "[46, 6f, 6f, 00]"); - assert!(format!("{:02X?}", b"Foo\0") == "[46, 6F, 6F, 00]"); + assert_eq!(format!("{:02x?}", b"Foo\0"), "[46, 6f, 6f, 00]"); + assert_eq!(format!("{:02X?}", b"Foo\0"), "[46, 6F, 6F, 00]"); } diff --git a/src/libcore/tests/hash/mod.rs b/src/libcore/tests/hash/mod.rs index 85c9d41b65b59..1000088e6b063 100644 --- a/src/libcore/tests/hash/mod.rs +++ b/src/libcore/tests/hash/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - mod sip; use std::hash::{Hash, Hasher}; @@ -83,9 +73,11 @@ fn test_writer_hasher() { let cs: &mut [u8] = &mut [1, 2, 3]; let ptr = cs.as_ptr(); let slice_ptr = cs as *const [u8]; + #[cfg(not(miri))] // Miri cannot hash pointers assert_eq!(hash(&slice_ptr), hash(&ptr) + cs.len() as u64); let slice_ptr = cs as *mut [u8]; + #[cfg(not(miri))] // Miri cannot hash pointers assert_eq!(hash(&slice_ptr), hash(&ptr) + cs.len() as u64); } diff --git a/src/libcore/tests/hash/sip.rs b/src/libcore/tests/hash/sip.rs index bad858011e960..b615cfd77ef1d 100644 --- a/src/libcore/tests/hash/sip.rs +++ b/src/libcore/tests/hash/sip.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(deprecated)] use core::hash::{Hash, Hasher}; diff --git a/src/libcore/tests/intrinsics.rs b/src/libcore/tests/intrinsics.rs index 9f3cba26a62db..7544c13dee4bf 100644 --- a/src/libcore/tests/intrinsics.rs +++ b/src/libcore/tests/intrinsics.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::any::TypeId; #[test] diff --git a/src/libcore/tests/iter.rs b/src/libcore/tests/iter.rs index 00b4aa4fa2d7a..d5b581d336d2f 100644 --- a/src/libcore/tests/iter.rs +++ b/src/libcore/tests/iter.rs @@ -1,13 +1,5 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - +use core::cell::Cell; +use core::convert::TryFrom; use core::iter::*; use core::{i8, i16, isize}; use core::usize; @@ -886,7 +878,7 @@ fn test_iterator_flat_map() { assert_eq!(i, ys.len()); } -/// Test `FlatMap::fold` with items already picked off the front and back, +/// Tests `FlatMap::fold` with items already picked off the front and back, /// to make sure all parts of the `FlatMap` are folded correctly. #[test] fn test_iterator_flat_map_fold() { @@ -924,7 +916,7 @@ fn test_iterator_flatten() { assert_eq!(i, ys.len()); } -/// Test `Flatten::fold` with items already picked off the front and back, +/// Tests `Flatten::fold` with items already picked off the front and back, /// to make sure all parts of the `Flatten` are folded correctly. #[test] fn test_iterator_flatten_fold() { @@ -1016,6 +1008,33 @@ fn test_iterator_nth() { assert_eq!(v.iter().nth(v.len()), None); } +#[test] +fn test_iterator_nth_back() { + let v: &[_] = &[0, 1, 2, 3, 4]; + for i in 0..v.len() { + assert_eq!(v.iter().nth_back(i).unwrap(), &v[v.len() - 1 - i]); + } + assert_eq!(v.iter().nth_back(v.len()), None); +} + +#[test] +fn test_iterator_rev_nth_back() { + let v: &[_] = &[0, 1, 2, 3, 4]; + for i in 0..v.len() { + assert_eq!(v.iter().rev().nth_back(i).unwrap(), &v[i]); + } + assert_eq!(v.iter().rev().nth_back(v.len()), None); +} + +#[test] +fn test_iterator_rev_nth() { + let v: &[_] = &[0, 1, 2, 3, 4]; + for i in 0..v.len() { + assert_eq!(v.iter().rev().nth(i).unwrap(), &v[v.len() - 1 - i]); + } + assert_eq!(v.iter().rev().nth(v.len()), None); +} + #[test] fn test_iterator_last() { let v: &[_] = &[0, 1, 2, 3, 4]; @@ -1063,12 +1082,39 @@ fn test_iterator_product_result() { assert_eq!(v.iter().cloned().product::>(), Err(())); } +/// A wrapper struct that implements `Eq` and `Ord` based on the wrapped +/// integer modulo 3. Used to test that `Iterator::max` and `Iterator::min` +/// return the correct element if some of them are equal. +#[derive(Debug)] +struct Mod3(i32); + +impl PartialEq for Mod3 { + fn eq(&self, other: &Self) -> bool { + self.0 % 3 == other.0 % 3 + } +} + +impl Eq for Mod3 {} + +impl PartialOrd for Mod3 { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Mod3 { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + (self.0 % 3).cmp(&(other.0 % 3)) + } +} + #[test] fn test_iterator_max() { let v: &[_] = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]; assert_eq!(v[..4].iter().cloned().max(), Some(3)); assert_eq!(v.iter().cloned().max(), Some(10)); assert_eq!(v[..0].iter().cloned().max(), None); + assert_eq!(v.iter().cloned().map(Mod3).max().map(|x| x.0), Some(8)); } #[test] @@ -1077,6 +1123,7 @@ fn test_iterator_min() { assert_eq!(v[..4].iter().cloned().min(), Some(0)); assert_eq!(v.iter().cloned().min(), Some(0)); assert_eq!(v[..0].iter().cloned().min(), None); + assert_eq!(v.iter().cloned().map(Mod3).min().map(|x| x.0), Some(0)); } #[test] @@ -1236,6 +1283,23 @@ fn test_rev() { vec![16, 14, 12, 10, 8, 6]); } +#[test] +fn test_copied() { + let xs = [2, 4, 6, 8]; + + let mut it = xs.iter().copied(); + assert_eq!(it.len(), 4); + assert_eq!(it.next(), Some(2)); + assert_eq!(it.len(), 3); + assert_eq!(it.next(), Some(4)); + assert_eq!(it.len(), 2); + assert_eq!(it.next_back(), Some(8)); + assert_eq!(it.len(), 1); + assert_eq!(it.next_back(), Some(6)); + assert_eq!(it.len(), 0); + assert_eq!(it.next_back(), None); +} + #[test] fn test_cloned() { let xs = [2, 4, 6, 8]; @@ -1704,19 +1768,97 @@ fn test_range_inclusive_folds() { assert_eq!((1..=10).sum::(), 55); assert_eq!((1..=10).rev().sum::(), 55); - let mut it = 40..=50; + let mut it = 44..=50; + assert_eq!(it.try_fold(0, i8::checked_add), None); + assert_eq!(it, 47..=50); assert_eq!(it.try_fold(0, i8::checked_add), None); - assert_eq!(it, 44..=50); + assert_eq!(it, 50..=50); + assert_eq!(it.try_fold(0, i8::checked_add), Some(50)); + assert!(it.is_empty()); + assert_eq!(it.try_fold(0, i8::checked_add), Some(0)); + assert!(it.is_empty()); + + let mut it = 40..=47; assert_eq!(it.try_rfold(0, i8::checked_add), None); - assert_eq!(it, 44..=47); + assert_eq!(it, 40..=44); + assert_eq!(it.try_rfold(0, i8::checked_add), None); + assert_eq!(it, 40..=41); + assert_eq!(it.try_rfold(0, i8::checked_add), Some(81)); + assert!(it.is_empty()); + assert_eq!(it.try_rfold(0, i8::checked_add), Some(0)); + assert!(it.is_empty()); let mut it = 10..=20; assert_eq!(it.try_fold(0, |a,b| Some(a+b)), Some(165)); assert!(it.is_empty()); + assert_eq!(it.try_fold(0, |a,b| Some(a+b)), Some(0)); + assert!(it.is_empty()); let mut it = 10..=20; assert_eq!(it.try_rfold(0, |a,b| Some(a+b)), Some(165)); assert!(it.is_empty()); + assert_eq!(it.try_rfold(0, |a,b| Some(a+b)), Some(0)); + assert!(it.is_empty()); +} + +#[test] +fn test_range_size_hint() { + use core::usize::MAX as UMAX; + assert_eq!((0..0usize).size_hint(), (0, Some(0))); + assert_eq!((0..100usize).size_hint(), (100, Some(100))); + assert_eq!((0..UMAX).size_hint(), (UMAX, Some(UMAX))); + + let umax = u128::try_from(UMAX).unwrap(); + assert_eq!((0..0u128).size_hint(), (0, Some(0))); + assert_eq!((0..100u128).size_hint(), (100, Some(100))); + assert_eq!((0..umax).size_hint(), (UMAX, Some(UMAX))); + assert_eq!((0..umax + 1).size_hint(), (UMAX, None)); + + use core::isize::{MAX as IMAX, MIN as IMIN}; + assert_eq!((0..0isize).size_hint(), (0, Some(0))); + assert_eq!((-100..100isize).size_hint(), (200, Some(200))); + assert_eq!((IMIN..IMAX).size_hint(), (UMAX, Some(UMAX))); + + let imin = i128::try_from(IMIN).unwrap(); + let imax = i128::try_from(IMAX).unwrap(); + assert_eq!((0..0i128).size_hint(), (0, Some(0))); + assert_eq!((-100..100i128).size_hint(), (200, Some(200))); + assert_eq!((imin..imax).size_hint(), (UMAX, Some(UMAX))); + assert_eq!((imin..imax + 1).size_hint(), (UMAX, None)); +} + +#[test] +fn test_range_inclusive_size_hint() { + use core::usize::MAX as UMAX; + assert_eq!((1..=0usize).size_hint(), (0, Some(0))); + assert_eq!((0..=0usize).size_hint(), (1, Some(1))); + assert_eq!((0..=100usize).size_hint(), (101, Some(101))); + assert_eq!((0..=UMAX - 1).size_hint(), (UMAX, Some(UMAX))); + assert_eq!((0..=UMAX).size_hint(), (UMAX, None)); + + let umax = u128::try_from(UMAX).unwrap(); + assert_eq!((1..=0u128).size_hint(), (0, Some(0))); + assert_eq!((0..=0u128).size_hint(), (1, Some(1))); + assert_eq!((0..=100u128).size_hint(), (101, Some(101))); + assert_eq!((0..=umax - 1).size_hint(), (UMAX, Some(UMAX))); + assert_eq!((0..=umax).size_hint(), (UMAX, None)); + assert_eq!((0..=umax + 1).size_hint(), (UMAX, None)); + + use core::isize::{MAX as IMAX, MIN as IMIN}; + assert_eq!((0..=-1isize).size_hint(), (0, Some(0))); + assert_eq!((0..=0isize).size_hint(), (1, Some(1))); + assert_eq!((-100..=100isize).size_hint(), (201, Some(201))); + assert_eq!((IMIN..=IMAX - 1).size_hint(), (UMAX, Some(UMAX))); + assert_eq!((IMIN..=IMAX).size_hint(), (UMAX, None)); + + let imin = i128::try_from(IMIN).unwrap(); + let imax = i128::try_from(IMAX).unwrap(); + assert_eq!((0..=-1i128).size_hint(), (0, Some(0))); + assert_eq!((0..=0i128).size_hint(), (1, Some(1))); + assert_eq!((-100..=100i128).size_hint(), (201, Some(201))); + assert_eq!((imin..=imax - 1).size_hint(), (UMAX, Some(UMAX))); + assert_eq!((imin..=imax).size_hint(), (UMAX, None)); + assert_eq!((imin..=imax + 1).size_hint(), (UMAX, None)); } #[test] @@ -1872,6 +2014,23 @@ fn test_once() { assert_eq!(it.next(), None); } +#[test] +fn test_once_with() { + let count = Cell::new(0); + let mut it = once_with(|| { + count.set(count.get() + 1); + 42 + }); + + assert_eq!(count.get(), 0); + assert_eq!(it.next(), Some(42)); + assert_eq!(count.get(), 1); + assert_eq!(it.next(), None); + assert_eq!(count.get(), 1); + assert_eq!(it.next(), None); + assert_eq!(count.get(), 1); +} + #[test] fn test_empty() { let mut it = empty::(); @@ -2183,3 +2342,16 @@ fn test_monad_laws_associativity() { assert_eq!((0..10).flat_map(f).flat_map(g).sum::(), (0..10).flat_map(|x| f(x).flat_map(g)).sum::()); } + +#[test] +fn test_is_sorted() { + assert!([1, 2, 2, 9].iter().is_sorted()); + assert!(![1, 3, 2].iter().is_sorted()); + assert!([0].iter().is_sorted()); + assert!(std::iter::empty::().is_sorted()); + assert!(![0.0, 1.0, std::f32::NAN].iter().is_sorted()); + assert!([-2, -1, 0, 3].iter().is_sorted()); + assert!(![-2i32, -1, 0, 3].iter().is_sorted_by_key(|n| n.abs())); + assert!(!["c", "bb", "aaa"].iter().is_sorted()); + assert!(["c", "bb", "aaa"].iter().is_sorted_by_key(|s| s.len())); +} diff --git a/src/libcore/tests/lib.rs b/src/libcore/tests/lib.rs index 7d62b4fa90f20..392a0ffabe3d6 100644 --- a/src/libcore/tests/lib.rs +++ b/src/libcore/tests/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![feature(box_syntax)] #![feature(cell_update)] #![feature(core_private_bignum)] @@ -19,20 +9,21 @@ #![feature(flt2dec)] #![feature(fmt_internals)] #![feature(hashmap_internals)] -#![feature(iter_unfold)] +#![feature(is_sorted)] +#![feature(iter_copied)] +#![feature(iter_nth_back)] +#![feature(iter_once_with)] #![feature(pattern)] #![feature(range_is_empty)] #![feature(raw)] -#![feature(refcell_map_split)] -#![feature(refcell_replace_swap)] #![feature(slice_patterns)] #![feature(sort_internals)] +#![feature(slice_partition_at_index)] #![feature(specialization)] #![feature(step_trait)] #![feature(str_internals)] #![feature(test)] #![feature(trusted_len)] -#![feature(try_from)] #![feature(try_trait)] #![feature(align_offset)] #![feature(reverse_bits)] @@ -40,6 +31,7 @@ #![feature(slice_internals)] #![feature(slice_partition_dedup)] #![feature(copy_within)] +#![feature(int_error_matching)] extern crate core; extern crate test; diff --git a/src/libcore/tests/manually_drop.rs b/src/libcore/tests/manually_drop.rs index 82dfb8d4c0b2a..49a1c187ea6cd 100644 --- a/src/libcore/tests/manually_drop.rs +++ b/src/libcore/tests/manually_drop.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::mem::ManuallyDrop; #[test] diff --git a/src/libcore/tests/mem.rs b/src/libcore/tests/mem.rs index 714f2babbdff6..f5b241959fdd2 100644 --- a/src/libcore/tests/mem.rs +++ b/src/libcore/tests/mem.rs @@ -1,13 +1,3 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::mem::*; #[test] diff --git a/src/libcore/tests/nonzero.rs b/src/libcore/tests/nonzero.rs index bbb1ef76bccec..77e484601bc22 100644 --- a/src/libcore/tests/nonzero.rs +++ b/src/libcore/tests/nonzero.rs @@ -1,16 +1,5 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use core::num::NonZeroU32; -use core::option::Option; -use core::option::Option::{Some, None}; +use core::num::{IntErrorKind, NonZeroI32, NonZeroI8, NonZeroU32, NonZeroU8}; +use core::option::Option::{self, None, Some}; use std::mem::size_of; #[test] @@ -23,6 +12,7 @@ fn test_create_nonzero_instance() { #[test] fn test_size_nonzero_in_option() { assert_eq!(size_of::(), size_of::>()); + assert_eq!(size_of::(), size_of::>()); } #[test] @@ -128,3 +118,31 @@ fn test_from_nonzero() { let num: u32 = nz.into(); assert_eq!(num, 1u32); } + +#[test] +fn test_from_signed_nonzero() { + let nz = NonZeroI32::new(1).unwrap(); + let num: i32 = nz.into(); + assert_eq!(num, 1i32); +} + +#[test] +fn test_from_str() { + assert_eq!("123".parse::(), Ok(NonZeroU8::new(123).unwrap())); + assert_eq!( + "0".parse::().err().map(|e| e.kind().clone()), + Some(IntErrorKind::Zero) + ); + assert_eq!( + "-1".parse::().err().map(|e| e.kind().clone()), + Some(IntErrorKind::InvalidDigit) + ); + assert_eq!( + "-129".parse::().err().map(|e| e.kind().clone()), + Some(IntErrorKind::Underflow) + ); + assert_eq!( + "257".parse::().err().map(|e| e.kind().clone()), + Some(IntErrorKind::Overflow) + ); +} diff --git a/src/libcore/tests/num/bignum.rs b/src/libcore/tests/num/bignum.rs index 58a9dd1b128ce..b873f1dd0652f 100644 --- a/src/libcore/tests/num/bignum.rs +++ b/src/libcore/tests/num/bignum.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::prelude::v1::*; use core::num::bignum::tests::Big8x3 as Big; diff --git a/src/libcore/tests/num/dec2flt/mod.rs b/src/libcore/tests/num/dec2flt/mod.rs index 879a41b4b770d..faeaabbf95ada 100644 --- a/src/libcore/tests/num/dec2flt/mod.rs +++ b/src/libcore/tests/num/dec2flt/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(overflowing_literals)] use std::{i64, f32, f64}; @@ -62,6 +52,7 @@ fn large() { } #[test] +#[cfg(not(miri))] // Miri is too slow fn subnormals() { test_literal!(5e-324); test_literal!(91e-324); @@ -73,6 +64,7 @@ fn subnormals() { } #[test] +#[cfg(not(miri))] // Miri is too slow fn infinity() { test_literal!(1e400); test_literal!(1e309); diff --git a/src/libcore/tests/num/dec2flt/parse.rs b/src/libcore/tests/num/dec2flt/parse.rs index 3ad694e38adb0..1eac484119170 100644 --- a/src/libcore/tests/num/dec2flt/parse.rs +++ b/src/libcore/tests/num/dec2flt/parse.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::num::dec2flt::parse::{Decimal, parse_decimal}; use core::num::dec2flt::parse::ParseResult::{Valid, Invalid}; diff --git a/src/libcore/tests/num/dec2flt/rawfp.rs b/src/libcore/tests/num/dec2flt/rawfp.rs index c9cd2bf5a9ae7..747c1bfa3f9c2 100644 --- a/src/libcore/tests/num/dec2flt/rawfp.rs +++ b/src/libcore/tests/num/dec2flt/rawfp.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::f32; use std::f64; use core::num::diy_float::Fp; diff --git a/src/libcore/tests/num/flt2dec/estimator.rs b/src/libcore/tests/num/flt2dec/estimator.rs index 857aae72c8a5b..fb0888e2720a9 100644 --- a/src/libcore/tests/num/flt2dec/estimator.rs +++ b/src/libcore/tests/num/flt2dec/estimator.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::num::flt2dec::estimator::*; #[test] diff --git a/src/libcore/tests/num/flt2dec/mod.rs b/src/libcore/tests/num/flt2dec/mod.rs index 04567e25e25ba..d362c7994d806 100644 --- a/src/libcore/tests/num/flt2dec/mod.rs +++ b/src/libcore/tests/num/flt2dec/mod.rs @@ -1,12 +1,4 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +#![cfg(not(miri))] // Miri does not implement ldexp, which most tests here need use std::prelude::v1::*; use std::{str, i16, f32, f64, fmt}; diff --git a/src/libcore/tests/num/flt2dec/random.rs b/src/libcore/tests/num/flt2dec/random.rs index 21a7c9fc6b337..1c36af6af0ee4 100644 --- a/src/libcore/tests/num/flt2dec/random.rs +++ b/src/libcore/tests/num/flt2dec/random.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![cfg(not(target_arch = "wasm32"))] use std::i16; diff --git a/src/libcore/tests/num/flt2dec/strategy/dragon.rs b/src/libcore/tests/num/flt2dec/strategy/dragon.rs index 03772a765cc6a..1803e39b46df3 100644 --- a/src/libcore/tests/num/flt2dec/strategy/dragon.rs +++ b/src/libcore/tests/num/flt2dec/strategy/dragon.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::prelude::v1::*; use super::super::*; use core::num::bignum::Big32x40 as Big; diff --git a/src/libcore/tests/num/flt2dec/strategy/grisu.rs b/src/libcore/tests/num/flt2dec/strategy/grisu.rs index 286b39d8cf3b3..53e9f12ae0f14 100644 --- a/src/libcore/tests/num/flt2dec/strategy/grisu.rs +++ b/src/libcore/tests/num/flt2dec/strategy/grisu.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::super::*; use core::num::flt2dec::strategy::grisu::*; diff --git a/src/libcore/tests/num/i16.rs b/src/libcore/tests/num/i16.rs index 7435831ac6dba..f5544b914b73d 100644 --- a/src/libcore/tests/num/i16.rs +++ b/src/libcore/tests/num/i16.rs @@ -1,11 +1 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - int_module!(i16, i16); diff --git a/src/libcore/tests/num/i32.rs b/src/libcore/tests/num/i32.rs index 3b3407e1ada52..39250ee84bce6 100644 --- a/src/libcore/tests/num/i32.rs +++ b/src/libcore/tests/num/i32.rs @@ -1,11 +1 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - int_module!(i32, i32); diff --git a/src/libcore/tests/num/i64.rs b/src/libcore/tests/num/i64.rs index 9e1aec256eed0..fa4d2ab6638d7 100644 --- a/src/libcore/tests/num/i64.rs +++ b/src/libcore/tests/num/i64.rs @@ -1,11 +1 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - int_module!(i64, i64); diff --git a/src/libcore/tests/num/i8.rs b/src/libcore/tests/num/i8.rs index f72244239b260..ccec6915fe090 100644 --- a/src/libcore/tests/num/i8.rs +++ b/src/libcore/tests/num/i8.rs @@ -1,11 +1 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - int_module!(i8, i8); diff --git a/src/libcore/tests/num/int_macros.rs b/src/libcore/tests/num/int_macros.rs index 71d2e7945389b..4881f79ec248a 100644 --- a/src/libcore/tests/num/int_macros.rs +++ b/src/libcore/tests/num/int_macros.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - macro_rules! int_module { ($T:ident, $T_i:ident) => ( #[cfg(test)] mod tests { @@ -22,7 +12,7 @@ mod tests { fn test_overflows() { assert!(MAX > 0); assert!(MIN <= 0); - assert!(MIN + MAX + 1 == 0); + assert_eq!(MIN + MAX + 1, 0); } #[test] @@ -31,23 +21,23 @@ mod tests { } #[test] - fn test_mod_euc() { - assert!((-1 as $T).mod_euc(MIN) == MAX); + fn test_rem_euclid() { + assert_eq!((-1 as $T).rem_euclid(MIN), MAX); } #[test] pub fn test_abs() { - assert!((1 as $T).abs() == 1 as $T); - assert!((0 as $T).abs() == 0 as $T); - assert!((-1 as $T).abs() == 1 as $T); + assert_eq!((1 as $T).abs(), 1 as $T); + assert_eq!((0 as $T).abs(), 0 as $T); + assert_eq!((-1 as $T).abs(), 1 as $T); } #[test] fn test_signum() { - assert!((1 as $T).signum() == 1 as $T); - assert!((0 as $T).signum() == 0 as $T); - assert!((-0 as $T).signum() == 0 as $T); - assert!((-1 as $T).signum() == -1 as $T); + assert_eq!((1 as $T).signum(), 1 as $T); + assert_eq!((0 as $T).signum(), 0 as $T); + assert_eq!((-0 as $T).signum(), 0 as $T); + assert_eq!((-1 as $T).signum(), -1 as $T); } #[test] @@ -68,12 +58,12 @@ mod tests { #[test] fn test_bitwise_operators() { - assert!(0b1110 as $T == (0b1100 as $T).bitor(0b1010 as $T)); - assert!(0b1000 as $T == (0b1100 as $T).bitand(0b1010 as $T)); - assert!(0b0110 as $T == (0b1100 as $T).bitxor(0b1010 as $T)); - assert!(0b1110 as $T == (0b0111 as $T).shl(1)); - assert!(0b0111 as $T == (0b1110 as $T).shr(1)); - assert!(-(0b11 as $T) - (1 as $T) == (0b11 as $T).not()); + assert_eq!(0b1110 as $T, (0b1100 as $T).bitor(0b1010 as $T)); + assert_eq!(0b1000 as $T, (0b1100 as $T).bitand(0b1010 as $T)); + assert_eq!(0b0110 as $T, (0b1100 as $T).bitxor(0b1010 as $T)); + assert_eq!(0b1110 as $T, (0b0111 as $T).shl(1)); + assert_eq!(0b0111 as $T, (0b1110 as $T).shr(1)); + assert_eq!(-(0b11 as $T) - (1 as $T), (0b11 as $T).not()); } const A: $T = 0b0101100; @@ -85,17 +75,17 @@ mod tests { #[test] fn test_count_ones() { - assert!(A.count_ones() == 3); - assert!(B.count_ones() == 2); - assert!(C.count_ones() == 5); + assert_eq!(A.count_ones(), 3); + assert_eq!(B.count_ones(), 2); + assert_eq!(C.count_ones(), 5); } #[test] fn test_count_zeros() { let bits = mem::size_of::<$T>() * 8; - assert!(A.count_zeros() == bits as u32 - 3); - assert!(B.count_zeros() == bits as u32 - 2); - assert!(C.count_zeros() == bits as u32 - 5); + assert_eq!(A.count_zeros(), bits as u32 - 3); + assert_eq!(B.count_zeros(), bits as u32 - 2); + assert_eq!(C.count_zeros(), bits as u32 - 5); } #[test] @@ -158,9 +148,9 @@ mod tests { #[test] fn test_signed_checked_div() { - assert!((10 as $T).checked_div(2) == Some(5)); - assert!((5 as $T).checked_div(0) == None); - assert!(isize::MIN.checked_div(-1) == None); + assert_eq!((10 as $T).checked_div(2), Some(5)); + assert_eq!((5 as $T).checked_div(0), None); + assert_eq!(isize::MIN.checked_div(-1), None); } #[test] diff --git a/src/libcore/tests/num/mod.rs b/src/libcore/tests/num/mod.rs index 0928f7560e175..a17c094679ea8 100644 --- a/src/libcore/tests/num/mod.rs +++ b/src/libcore/tests/num/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::convert::{TryFrom, TryInto}; use core::cmp::PartialEq; use core::fmt::Debug; @@ -694,23 +684,23 @@ macro_rules! test_float { assert!(($nan as $fty).max($nan).is_nan()); } #[test] - fn mod_euc() { + fn rem_euclid() { let a: $fty = 42.0; - assert!($inf.mod_euc(a).is_nan()); - assert_eq!(a.mod_euc($inf), a); - assert!(a.mod_euc($nan).is_nan()); - assert!($inf.mod_euc($inf).is_nan()); - assert!($inf.mod_euc($nan).is_nan()); - assert!($nan.mod_euc($inf).is_nan()); + assert!($inf.rem_euclid(a).is_nan()); + assert_eq!(a.rem_euclid($inf), a); + assert!(a.rem_euclid($nan).is_nan()); + assert!($inf.rem_euclid($inf).is_nan()); + assert!($inf.rem_euclid($nan).is_nan()); + assert!($nan.rem_euclid($inf).is_nan()); } #[test] - fn div_euc() { + fn div_euclid() { let a: $fty = 42.0; - assert_eq!(a.div_euc($inf), 0.0); - assert!(a.div_euc($nan).is_nan()); - assert!($inf.div_euc($inf).is_nan()); - assert!($inf.div_euc($nan).is_nan()); - assert!($nan.div_euc($inf).is_nan()); + assert_eq!(a.div_euclid($inf), 0.0); + assert!(a.div_euclid($nan).is_nan()); + assert!($inf.div_euclid($inf).is_nan()); + assert!($inf.div_euclid($nan).is_nan()); + assert!($nan.div_euclid($inf).is_nan()); } } } } diff --git a/src/libcore/tests/num/u16.rs b/src/libcore/tests/num/u16.rs index 8455207583cc1..435b914224c5a 100644 --- a/src/libcore/tests/num/u16.rs +++ b/src/libcore/tests/num/u16.rs @@ -1,11 +1 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - uint_module!(u16, u16); diff --git a/src/libcore/tests/num/u32.rs b/src/libcore/tests/num/u32.rs index b44e60f652979..71dc005dea370 100644 --- a/src/libcore/tests/num/u32.rs +++ b/src/libcore/tests/num/u32.rs @@ -1,11 +1 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - uint_module!(u32, u32); diff --git a/src/libcore/tests/num/u64.rs b/src/libcore/tests/num/u64.rs index ffcd1015d58d6..b498ebc52042e 100644 --- a/src/libcore/tests/num/u64.rs +++ b/src/libcore/tests/num/u64.rs @@ -1,11 +1 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - uint_module!(u64, u64); diff --git a/src/libcore/tests/num/u8.rs b/src/libcore/tests/num/u8.rs index 4ee14e22f2d57..68e938be704ac 100644 --- a/src/libcore/tests/num/u8.rs +++ b/src/libcore/tests/num/u8.rs @@ -1,11 +1 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - uint_module!(u8, u8); diff --git a/src/libcore/tests/num/uint_macros.rs b/src/libcore/tests/num/uint_macros.rs index ca6906f731047..6e81542b6ec88 100644 --- a/src/libcore/tests/num/uint_macros.rs +++ b/src/libcore/tests/num/uint_macros.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - macro_rules! uint_module { ($T:ident, $T_i:ident) => ( #[cfg(test)] mod tests { diff --git a/src/libcore/tests/ops.rs b/src/libcore/tests/ops.rs index d66193b1687c8..78cf07119e729 100644 --- a/src/libcore/tests/ops.rs +++ b/src/libcore/tests/ops.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::ops::{Range, RangeFull, RangeFrom, RangeTo, RangeInclusive}; // Test the Range structs without the syntactic sugar. @@ -17,11 +7,11 @@ fn test_range() { let r = Range { start: 2, end: 10 }; let mut count = 0; for (i, ri) in r.enumerate() { - assert!(ri == i + 2); + assert_eq!(ri, i + 2); assert!(ri >= 2 && ri < 10); count += 1; } - assert!(count == 8); + assert_eq!(count, 8); } #[test] @@ -29,11 +19,11 @@ fn test_range_from() { let r = RangeFrom { start: 2 }; let mut count = 0; for (i, ri) in r.take(10).enumerate() { - assert!(ri == i + 2); + assert_eq!(ri, i + 2); assert!(ri >= 2 && ri < 12); count += 1; } - assert!(count == 10); + assert_eq!(count, 10); } #[test] diff --git a/src/libcore/tests/option.rs b/src/libcore/tests/option.rs index 1324ba2d9a9c3..b059b134868d9 100644 --- a/src/libcore/tests/option.rs +++ b/src/libcore/tests/option.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::option::*; use core::mem; use core::clone::Clone; @@ -248,6 +238,27 @@ fn test_collect() { assert!(v == None); } +#[test] +fn test_copied() { + let val = 1; + let val_ref = &val; + let opt_none: Option<&'static u32> = None; + let opt_ref = Some(&val); + let opt_ref_ref = Some(&val_ref); + + // None works + assert_eq!(opt_none.clone(), None); + assert_eq!(opt_none.copied(), None); + + // Immutable ref works + assert_eq!(opt_ref.clone(), Some(&val)); + assert_eq!(opt_ref.copied(), Some(1)); + + // Double Immutable ref works + assert_eq!(opt_ref_ref.clone(), Some(&val_ref)); + assert_eq!(opt_ref_ref.clone().copied(), Some(&val)); + assert_eq!(opt_ref_ref.copied().copied(), Some(1)); +} #[test] fn test_cloned() { diff --git a/src/libcore/tests/pattern.rs b/src/libcore/tests/pattern.rs index cfa3b7ee6640f..b78ed0210770f 100644 --- a/src/libcore/tests/pattern.rs +++ b/src/libcore/tests/pattern.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::str::pattern::*; // This macro makes it easier to write diff --git a/src/libcore/tests/ptr.rs b/src/libcore/tests/ptr.rs index 92160910d8f70..03fe1fe5a7cf8 100644 --- a/src/libcore/tests/ptr.rs +++ b/src/libcore/tests/ptr.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::ptr::*; use core::cell::RefCell; @@ -54,13 +44,13 @@ fn test_is_null() { let p: *const isize = null(); assert!(p.is_null()); - let q = unsafe { p.offset(1) }; + let q = p.wrapping_offset(1); assert!(!q.is_null()); let mp: *mut isize = null_mut(); assert!(mp.is_null()); - let mq = unsafe { mp.offset(1) }; + let mq = mp.wrapping_offset(1); assert!(!mq.is_null()); // Pointers to unsized types -- slices @@ -155,6 +145,7 @@ fn test_as_ref() { } #[test] +#[cfg(not(miri))] // This test is UB according to Stacked Borrows fn test_as_mut() { unsafe { let p: *mut isize = null_mut(); @@ -231,8 +222,11 @@ fn test_ptr_subtraction() { let m_start = xs_mut.as_mut_ptr(); let mut m_ptr = m_start.offset(9); - while m_ptr >= m_start { + loop { *m_ptr += *m_ptr; + if m_ptr == m_start { + break; + } m_ptr = m_ptr.offset(-1); } @@ -259,6 +253,7 @@ fn test_unsized_nonnull() { #[test] #[allow(warnings)] +#[cfg(not(miri))] // Miri cannot hash pointers // Have a symbol for the test below. It doesn’t need to be an actual variadic function, match the // ABI, or even point to an actual executable code, because the function itself is never invoked. #[no_mangle] @@ -298,6 +293,7 @@ fn write_unaligned_drop() { } #[test] +#[cfg(not(miri))] // Miri cannot compute actual alignment of an allocation fn align_offset_zst() { // For pointers of stride = 0, the pointer is already aligned or it cannot be aligned at // all, because no amount of elements will align the pointer. @@ -312,6 +308,7 @@ fn align_offset_zst() { } #[test] +#[cfg(not(miri))] // Miri cannot compute actual alignment of an allocation fn align_offset_stride1() { // For pointers of stride = 1, the pointer can always be aligned. The offset is equal to // number of bytes. @@ -328,6 +325,7 @@ fn align_offset_stride1() { } #[test] +#[cfg(not(miri))] // Miri is too slow fn align_offset_weird_strides() { #[repr(packed)] struct A3(u16, u8); diff --git a/src/libcore/tests/result.rs b/src/libcore/tests/result.rs index 0c00992ffd84e..1fab07526a07f 100644 --- a/src/libcore/tests/result.rs +++ b/src/libcore/tests/result.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::option::*; fn op1() -> Result { Ok(666) } diff --git a/src/libcore/tests/slice.rs b/src/libcore/tests/slice.rs index 4f00ebee1d227..007283b5f69c8 100644 --- a/src/libcore/tests/slice.rs +++ b/src/libcore/tests/slice.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::result::Result::{Ok, Err}; #[test] @@ -588,6 +578,19 @@ fn test_windows_nth() { assert_eq!(c2.next(), None); } +#[test] +fn test_windows_nth_back() { + let v: &[i32] = &[0, 1, 2, 3, 4, 5]; + let mut c = v.windows(2); + assert_eq!(c.nth_back(2).unwrap()[0], 2); + assert_eq!(c.next_back().unwrap()[1], 2); + + let v2: &[i32] = &[0, 1, 2, 3, 4]; + let mut c2 = v2.windows(4); + assert_eq!(c2.nth_back(1).unwrap()[1], 1); + assert_eq!(c2.next_back(), None); +} + #[test] fn test_windows_last() { let v: &[i32] = &[0, 1, 2, 3, 4, 5]; @@ -1021,6 +1024,7 @@ fn test_rotate_right() { #[test] #[cfg(not(target_arch = "wasm32"))] +#[cfg(not(miri))] // Miri does not support entropy fn sort_unstable() { use core::cmp::Ordering::{Equal, Greater, Less}; use core::slice::heapsort; @@ -1089,6 +1093,124 @@ fn sort_unstable() { assert!(v == [0xDEADBEEF]); } +#[test] +#[cfg(not(target_arch = "wasm32"))] +#[cfg(not(miri))] // Miri does not support entropy +fn partition_at_index() { + use core::cmp::Ordering::{Equal, Greater, Less}; + use rand::rngs::SmallRng; + use rand::seq::SliceRandom; + use rand::{FromEntropy, Rng}; + + let mut rng = SmallRng::from_entropy(); + + for len in (2..21).chain(500..501) { + let mut orig = vec![0; len]; + + for &modulus in &[5, 10, 1000] { + for _ in 0..10 { + for i in 0..len { + orig[i] = rng.gen::() % modulus; + } + + let v_sorted = { + let mut v = orig.clone(); + v.sort(); + v + }; + + // Sort in default order. + for pivot in 0..len { + let mut v = orig.clone(); + v.partition_at_index(pivot); + + assert_eq!(v_sorted[pivot], v[pivot]); + for i in 0..pivot { + for j in pivot..len { + assert!(v[i] <= v[j]); + } + } + } + + // Sort in ascending order. + for pivot in 0..len { + let mut v = orig.clone(); + let (left, pivot, right) = v.partition_at_index_by(pivot, |a, b| a.cmp(b)); + + assert_eq!(left.len() + right.len(), len - 1); + + for l in left { + assert!(l <= pivot); + for r in right.iter_mut() { + assert!(l <= r); + assert!(pivot <= r); + } + } + } + + // Sort in descending order. + let sort_descending_comparator = |a: &i32, b: &i32| b.cmp(a); + let v_sorted_descending = { + let mut v = orig.clone(); + v.sort_by(sort_descending_comparator); + v + }; + + for pivot in 0..len { + let mut v = orig.clone(); + v.partition_at_index_by(pivot, sort_descending_comparator); + + assert_eq!(v_sorted_descending[pivot], v[pivot]); + for i in 0..pivot { + for j in pivot..len { + assert!(v[j] <= v[i]); + } + } + } + } + } + } + + // Sort at index using a completely random comparison function. + // This will reorder the elements *somehow*, but won't panic. + let mut v = [0; 500]; + for i in 0..v.len() { + v[i] = i as i32; + } + + for pivot in 0..v.len() { + v.partition_at_index_by(pivot, |_, _| *[Less, Equal, Greater].choose(&mut rng).unwrap()); + v.sort(); + for i in 0..v.len() { + assert_eq!(v[i], i as i32); + } + } + + // Should not panic. + [(); 10].partition_at_index(0); + [(); 10].partition_at_index(5); + [(); 10].partition_at_index(9); + [(); 100].partition_at_index(0); + [(); 100].partition_at_index(50); + [(); 100].partition_at_index(99); + + let mut v = [0xDEADBEEFu64]; + v.partition_at_index(0); + assert!(v == [0xDEADBEEF]); +} + +#[test] +#[should_panic(expected = "index 0 greater than length of slice")] +fn partition_at_index_zero_length() { + [0i32; 0].partition_at_index(0); +} + +#[test] +#[should_panic(expected = "index 20 greater than length of slice")] +fn partition_at_index_past_length() { + [0i32; 10].partition_at_index(20); +} + pub mod memchr { use core::slice::memchr::{memchr, memrchr}; @@ -1176,6 +1298,7 @@ pub mod memchr { } #[test] +#[cfg(not(miri))] // Miri cannot compute actual alignment of an allocation fn test_align_to_simple() { let bytes = [1u8, 2, 3, 4, 5, 6, 7]; let (prefix, aligned, suffix) = unsafe { bytes.align_to::() }; @@ -1199,6 +1322,7 @@ fn test_align_to_zst() { } #[test] +#[cfg(not(miri))] // Miri cannot compute actual alignment of an allocation fn test_align_to_non_trivial() { #[repr(align(8))] struct U64(u64, u64); #[repr(align(8))] struct U64U64U32(u64, u64, u32); @@ -1327,3 +1451,18 @@ fn test_copy_within_panics_src_inverted() { // 2 is greater than 1, so this range is invalid. bytes.copy_within(2..1, 0); } + +#[test] +fn test_is_sorted() { + let empty: [i32; 0] = []; + + assert!([1, 2, 2, 9].is_sorted()); + assert!(![1, 3, 2].is_sorted()); + assert!([0].is_sorted()); + assert!(empty.is_sorted()); + assert!(![0.0, 1.0, std::f32::NAN].is_sorted()); + assert!([-2, -1, 0, 3].is_sorted()); + assert!(![-2i32, -1, 0, 3].is_sorted_by_key(|n| n.abs())); + assert!(!["c", "bb", "aaa"].is_sorted()); + assert!(["c", "bb", "aaa"].is_sorted_by_key(|s| s.len())); +} diff --git a/src/libcore/tests/str.rs b/src/libcore/tests/str.rs index 343c9596c5383..ed939ca7139a5 100644 --- a/src/libcore/tests/str.rs +++ b/src/libcore/tests/str.rs @@ -1,11 +1 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // All `str` tests live in liballoc/tests diff --git a/src/libcore/tests/str_lossy.rs b/src/libcore/tests/str_lossy.rs index 56ef3f070c1fa..f9fd333cca712 100644 --- a/src/libcore/tests/str_lossy.rs +++ b/src/libcore/tests/str_lossy.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::str::lossy::*; #[test] diff --git a/src/libcore/tests/time.rs b/src/libcore/tests/time.rs index 466f28f0ef0aa..6efd22572dc18 100644 --- a/src/libcore/tests/time.rs +++ b/src/libcore/tests/time.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::time::Duration; #[test] diff --git a/src/libcore/tests/tuple.rs b/src/libcore/tests/tuple.rs index 4fe5e0a740bf7..c7ed1612dd5ea 100644 --- a/src/libcore/tests/tuple.rs +++ b/src/libcore/tests/tuple.rs @@ -1,14 +1,5 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::cmp::Ordering::{Equal, Less, Greater}; +use std::f64::NAN; #[test] fn test_clone() { @@ -18,18 +9,18 @@ fn test_clone() { } #[test] -fn test_tuple_cmp() { +fn test_partial_eq() { let (small, big) = ((1, 2, 3), (3, 2, 1)); - - let nan = 0.0f64/0.0; - - // PartialEq assert_eq!(small, small); assert_eq!(big, big); - assert!(small != big); - assert!(big != small); + assert_ne!(small, big); + assert_ne!(big, small); +} + +#[test] +fn test_partial_ord() { + let (small, big) = ((1, 2, 3), (3, 2, 1)); - // PartialOrd assert!(small < big); assert!(!(small < small)); assert!(!(big < small)); @@ -43,18 +34,21 @@ fn test_tuple_cmp() { assert!(big >= small); assert!(big >= big); - assert!(!((1.0f64, 2.0f64) < (nan, 3.0))); - assert!(!((1.0f64, 2.0f64) <= (nan, 3.0))); - assert!(!((1.0f64, 2.0f64) > (nan, 3.0))); - assert!(!((1.0f64, 2.0f64) >= (nan, 3.0))); - assert!(((1.0f64, 2.0f64) < (2.0, nan))); - assert!(!((2.0f64, 2.0f64) < (2.0, nan))); + assert!(!((1.0f64, 2.0f64) < (NAN, 3.0))); + assert!(!((1.0f64, 2.0f64) <= (NAN, 3.0))); + assert!(!((1.0f64, 2.0f64) > (NAN, 3.0))); + assert!(!((1.0f64, 2.0f64) >= (NAN, 3.0))); + assert!(((1.0f64, 2.0f64) < (2.0, NAN))); + assert!(!((2.0f64, 2.0f64) < (2.0, NAN))); +} - // Ord - assert!(small.cmp(&small) == Equal); - assert!(big.cmp(&big) == Equal); - assert!(small.cmp(&big) == Less); - assert!(big.cmp(&small) == Greater); +#[test] +fn test_ord() { + let (small, big) = ((1, 2, 3), (3, 2, 1)); + assert_eq!(small.cmp(&small), Equal); + assert_eq!(big.cmp(&big), Equal); + assert_eq!(small.cmp(&big), Less); + assert_eq!(big.cmp(&small), Greater); } #[test] diff --git a/src/libcore/time.rs b/src/libcore/time.rs index 475bb721f23f7..ae6d8078fd236 100644 --- a/src/libcore/time.rs +++ b/src/libcore/time.rs @@ -1,12 +1,3 @@ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. #![stable(feature = "duration_core", since = "1.25.0")] //! Temporal quantification. @@ -30,13 +21,12 @@ const NANOS_PER_MILLI: u32 = 1_000_000; const NANOS_PER_MICRO: u32 = 1_000; const MILLIS_PER_SEC: u64 = 1_000; const MICROS_PER_SEC: u64 = 1_000_000; -const MAX_NANOS_F64: f64 = ((u64::MAX as u128 + 1)*(NANOS_PER_SEC as u128)) as f64; /// A `Duration` type to represent a span of time, typically used for system /// timeouts. /// /// Each `Duration` is composed of a whole number of seconds and a fractional part -/// represented in nanoseconds. If the underlying system does not support +/// represented in nanoseconds. If the underlying system does not support /// nanosecond-level precision, APIs binding a system timeout will typically round up /// the number of nanoseconds. /// @@ -68,6 +58,58 @@ pub struct Duration { } impl Duration { + /// The duration of one second. + /// + /// # Examples + /// + /// ``` + /// #![feature(duration_constants)] + /// use std::time::Duration; + /// + /// assert_eq!(Duration::SECOND, Duration::from_secs(1)); + /// ``` + #[unstable(feature = "duration_constants", issue = "57391")] + pub const SECOND: Duration = Duration::from_secs(1); + + /// The duration of one millisecond. + /// + /// # Examples + /// + /// ``` + /// #![feature(duration_constants)] + /// use std::time::Duration; + /// + /// assert_eq!(Duration::MILLISECOND, Duration::from_millis(1)); + /// ``` + #[unstable(feature = "duration_constants", issue = "57391")] + pub const MILLISECOND: Duration = Duration::from_millis(1); + + /// The duration of one microsecond. + /// + /// # Examples + /// + /// ``` + /// #![feature(duration_constants)] + /// use std::time::Duration; + /// + /// assert_eq!(Duration::MICROSECOND, Duration::from_micros(1)); + /// ``` + #[unstable(feature = "duration_constants", issue = "57391")] + pub const MICROSECOND: Duration = Duration::from_micros(1); + + /// The duration of one nanosecond. + /// + /// # Examples + /// + /// ``` + /// #![feature(duration_constants)] + /// use std::time::Duration; + /// + /// assert_eq!(Duration::NANOSECOND, Duration::from_nanos(1)); + /// ``` + #[unstable(feature = "duration_constants", issue = "57391")] + pub const NANOSECOND: Duration = Duration::from_nanos(1); + /// Creates a new `Duration` from the specified number of whole seconds and /// additional nanoseconds. /// @@ -274,13 +316,12 @@ impl Duration { /// # Examples /// /// ``` - /// # #![feature(duration_as_u128)] /// use std::time::Duration; /// /// let duration = Duration::new(5, 730023852); /// assert_eq!(duration.as_millis(), 5730); /// ``` - #[unstable(feature = "duration_as_u128", issue = "50202")] + #[stable(feature = "duration_as_u128", since = "1.33.0")] #[inline] pub const fn as_millis(&self) -> u128 { self.secs as u128 * MILLIS_PER_SEC as u128 + (self.nanos / NANOS_PER_MILLI) as u128 @@ -291,13 +332,12 @@ impl Duration { /// # Examples /// /// ``` - /// # #![feature(duration_as_u128)] /// use std::time::Duration; /// /// let duration = Duration::new(5, 730023852); /// assert_eq!(duration.as_micros(), 5730023); /// ``` - #[unstable(feature = "duration_as_u128", issue = "50202")] + #[stable(feature = "duration_as_u128", since = "1.33.0")] #[inline] pub const fn as_micros(&self) -> u128 { self.secs as u128 * MICROS_PER_SEC as u128 + (self.nanos / NANOS_PER_MICRO) as u128 @@ -308,13 +348,12 @@ impl Duration { /// # Examples /// /// ``` - /// # #![feature(duration_as_u128)] /// use std::time::Duration; /// /// let duration = Duration::new(5, 730023852); /// assert_eq!(duration.as_nanos(), 5730023852); /// ``` - #[unstable(feature = "duration_as_u128", issue = "50202")] + #[stable(feature = "duration_as_u128", since = "1.33.0")] #[inline] pub const fn as_nanos(&self) -> u128 { self.secs as u128 * NANOS_PER_SEC as u128 + self.nanos as u128 @@ -470,15 +509,34 @@ impl Duration { /// use std::time::Duration; /// /// let dur = Duration::new(2, 700_000_000); - /// assert_eq!(dur.as_float_secs(), 2.7); + /// assert_eq!(dur.as_secs_f64(), 2.7); /// ``` #[unstable(feature = "duration_float", issue = "54361")] #[inline] - pub const fn as_float_secs(&self) -> f64 { + pub const fn as_secs_f64(&self) -> f64 { (self.secs as f64) + (self.nanos as f64) / (NANOS_PER_SEC as f64) } - /// Creates a new `Duration` from the specified number of seconds. + /// Returns the number of seconds contained by this `Duration` as `f32`. + /// + /// The returned value does include the fractional (nanosecond) part of the duration. + /// + /// # Examples + /// ``` + /// #![feature(duration_float)] + /// use std::time::Duration; + /// + /// let dur = Duration::new(2, 700_000_000); + /// assert_eq!(dur.as_secs_f32(), 2.7); + /// ``` + #[unstable(feature = "duration_float", issue = "54361")] + #[inline] + pub const fn as_secs_f32(&self) -> f32 { + (self.secs as f32) + (self.nanos as f32) / (NANOS_PER_SEC as f32) + } + + /// Creates a new `Duration` from the specified number of seconds represented + /// as `f64`. /// /// # Panics /// This constructor will panic if `secs` is not finite, negative or overflows `Duration`. @@ -488,12 +546,14 @@ impl Duration { /// #![feature(duration_float)] /// use std::time::Duration; /// - /// let dur = Duration::from_float_secs(2.7); + /// let dur = Duration::from_secs_f64(2.7); /// assert_eq!(dur, Duration::new(2, 700_000_000)); /// ``` #[unstable(feature = "duration_float", issue = "54361")] #[inline] - pub fn from_float_secs(secs: f64) -> Duration { + pub fn from_secs_f64(secs: f64) -> Duration { + const MAX_NANOS_F64: f64 = + ((u64::MAX as u128 + 1)*(NANOS_PER_SEC as u128)) as f64; let nanos = secs * (NANOS_PER_SEC as f64); if !nanos.is_finite() { panic!("got non-finite value when converting float to duration"); @@ -511,7 +571,43 @@ impl Duration { } } - /// Multiply `Duration` by `f64`. + /// Creates a new `Duration` from the specified number of seconds represented + /// as `f32`. + /// + /// # Panics + /// This constructor will panic if `secs` is not finite, negative or overflows `Duration`. + /// + /// # Examples + /// ``` + /// #![feature(duration_float)] + /// use std::time::Duration; + /// + /// let dur = Duration::from_secs_f32(2.7); + /// assert_eq!(dur, Duration::new(2, 700_000_000)); + /// ``` + #[unstable(feature = "duration_float", issue = "54361")] + #[inline] + pub fn from_secs_f32(secs: f32) -> Duration { + const MAX_NANOS_F32: f32 = + ((u64::MAX as u128 + 1)*(NANOS_PER_SEC as u128)) as f32; + let nanos = secs * (NANOS_PER_SEC as f32); + if !nanos.is_finite() { + panic!("got non-finite value when converting float to duration"); + } + if nanos >= MAX_NANOS_F32 { + panic!("overflow when converting float to duration"); + } + if nanos < 0.0 { + panic!("underflow when converting float to duration"); + } + let nanos = nanos as u128; + Duration { + secs: (nanos / (NANOS_PER_SEC as u128)) as u64, + nanos: (nanos % (NANOS_PER_SEC as u128)) as u32, + } + } + + /// Multiplies `Duration` by `f64`. /// /// # Panics /// This method will panic if result is not finite, negative or overflows `Duration`. @@ -528,7 +624,29 @@ impl Duration { #[unstable(feature = "duration_float", issue = "54361")] #[inline] pub fn mul_f64(self, rhs: f64) -> Duration { - Duration::from_float_secs(rhs * self.as_float_secs()) + Duration::from_secs_f64(rhs * self.as_secs_f64()) + } + + /// Multiplies `Duration` by `f32`. + /// + /// # Panics + /// This method will panic if result is not finite, negative or overflows `Duration`. + /// + /// # Examples + /// ``` + /// #![feature(duration_float)] + /// use std::time::Duration; + /// + /// let dur = Duration::new(2, 700_000_000); + /// // note that due to rounding errors result is slightly different + /// // from 8.478 and 847800.0 + /// assert_eq!(dur.mul_f32(3.14), Duration::new(8, 478_000_640)); + /// assert_eq!(dur.mul_f32(3.14e5), Duration::new(847799, 969_120_256)); + /// ``` + #[unstable(feature = "duration_float", issue = "54361")] + #[inline] + pub fn mul_f32(self, rhs: f32) -> Duration { + Duration::from_secs_f32(rhs * self.as_secs_f32()) } /// Divide `Duration` by `f64`. @@ -549,7 +667,30 @@ impl Duration { #[unstable(feature = "duration_float", issue = "54361")] #[inline] pub fn div_f64(self, rhs: f64) -> Duration { - Duration::from_float_secs(self.as_float_secs() / rhs) + Duration::from_secs_f64(self.as_secs_f64() / rhs) + } + + /// Divide `Duration` by `f32`. + /// + /// # Panics + /// This method will panic if result is not finite, negative or overflows `Duration`. + /// + /// # Examples + /// ``` + /// #![feature(duration_float)] + /// use std::time::Duration; + /// + /// let dur = Duration::new(2, 700_000_000); + /// // note that due to rounding errors result is slightly + /// // different from 0.859_872_611 + /// assert_eq!(dur.div_f32(3.14), Duration::new(0, 859_872_576)); + /// // note that truncation is used, not rounding + /// assert_eq!(dur.div_f32(3.14e5), Duration::new(0, 8_598)); + /// ``` + #[unstable(feature = "duration_float", issue = "54361")] + #[inline] + pub fn div_f32(self, rhs: f32) -> Duration { + Duration::from_secs_f32(self.as_secs_f32() / rhs) } /// Divide `Duration` by `Duration` and return `f64`. @@ -561,12 +702,29 @@ impl Duration { /// /// let dur1 = Duration::new(2, 700_000_000); /// let dur2 = Duration::new(5, 400_000_000); - /// assert_eq!(dur1.div_duration(dur2), 0.5); + /// assert_eq!(dur1.div_duration_f64(dur2), 0.5); + /// ``` + #[unstable(feature = "duration_float", issue = "54361")] + #[inline] + pub fn div_duration_f64(self, rhs: Duration) -> f64 { + self.as_secs_f64() / rhs.as_secs_f64() + } + + /// Divide `Duration` by `Duration` and return `f32`. + /// + /// # Examples + /// ``` + /// #![feature(duration_float)] + /// use std::time::Duration; + /// + /// let dur1 = Duration::new(2, 700_000_000); + /// let dur2 = Duration::new(5, 400_000_000); + /// assert_eq!(dur1.div_duration_f32(dur2), 0.5); /// ``` #[unstable(feature = "duration_float", issue = "54361")] #[inline] - pub fn div_duration(self, rhs: Duration) -> f64 { - self.as_float_secs() / rhs.as_float_secs() + pub fn div_duration_f32(self, rhs: Duration) -> f32 { + self.as_secs_f32() / rhs.as_secs_f32() } } diff --git a/src/libcore/tuple.rs b/src/libcore/tuple.rs index 4c5370194fecb..a82666d8f70f8 100644 --- a/src/libcore/tuple.rs +++ b/src/libcore/tuple.rs @@ -1,13 +1,3 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // See src/libstd/primitive_docs.rs for documentation. use cmp::*; diff --git a/src/libcore/unicode/bool_trie.rs b/src/libcore/unicode/bool_trie.rs index 0e6437fded594..39584d346e4a8 100644 --- a/src/libcore/unicode/bool_trie.rs +++ b/src/libcore/unicode/bool_trie.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// BoolTrie is a trie for representing a set of Unicode codepoints. It is /// implemented with postfix compression (sharing of identical child nodes), /// which gives both compact size and fast lookup. diff --git a/src/libcore/unicode/mod.rs b/src/libcore/unicode/mod.rs index e5cda880f8807..3b86246269f62 100644 --- a/src/libcore/unicode/mod.rs +++ b/src/libcore/unicode/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![unstable(feature = "unicode_internals", issue = "0")] #![allow(missing_docs)] diff --git a/src/libcore/unicode/printable.py b/src/libcore/unicode/printable.py index 9410dafbbc364..1288a784123cd 100644 --- a/src/libcore/unicode/printable.py +++ b/src/libcore/unicode/printable.py @@ -1,14 +1,4 @@ #!/usr/bin/env python -# -# Copyright 2011-2016 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. # This script uses the following Unicode tables: # - UnicodeData.txt @@ -177,16 +167,6 @@ def main(): normal1 = compress_normal(normal1) print("""\ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // NOTE: The following code was generated by "src/libcore/unicode/printable.py", // do not edit directly! diff --git a/src/libcore/unicode/printable.rs b/src/libcore/unicode/printable.rs index 32e4b6b0fa512..a950e82cba241 100644 --- a/src/libcore/unicode/printable.rs +++ b/src/libcore/unicode/printable.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // NOTE: The following code was generated by "src/libcore/unicode/printable.py", // do not edit directly! diff --git a/src/libcore/unicode/tables.rs b/src/libcore/unicode/tables.rs index e525c0574002b..edef4ca361e4f 100644 --- a/src/libcore/unicode/tables.rs +++ b/src/libcore/unicode/tables.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // NOTE: The following code was generated by "./unicode.py", do not edit directly #![allow(missing_docs, non_upper_case_globals, non_snake_case)] diff --git a/src/libcore/unicode/unicode.py b/src/libcore/unicode/unicode.py index 28a1e01805e46..ae356c3ff4459 100755 --- a/src/libcore/unicode/unicode.py +++ b/src/libcore/unicode/unicode.py @@ -1,14 +1,4 @@ #!/usr/bin/env python -# -# Copyright 2011-2013 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. # This script uses the following Unicode tables: # - DerivedCoreProperties.txt @@ -28,16 +18,7 @@ # The directory in which this file resides. fdir = os.path.dirname(os.path.realpath(__file__)) + "/" -preamble = '''// Copyright 2012-{year} The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - +preamble = ''' // NOTE: The following code was generated by "./unicode.py", do not edit directly #![allow(missing_docs, non_upper_case_globals, non_snake_case)] diff --git a/src/libcore/unicode/version.rs b/src/libcore/unicode/version.rs index 59ebf5f501269..4d68d2e8c2ef7 100644 --- a/src/libcore/unicode/version.rs +++ b/src/libcore/unicode/version.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// Represents a Unicode Version. /// /// See also: diff --git a/src/libcore/unit.rs b/src/libcore/unit.rs index 087ddf9688ab7..540025d77bb4c 100644 --- a/src/libcore/unit.rs +++ b/src/libcore/unit.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use iter::FromIterator; /// Collapses all unit items from an iterator into one. diff --git a/src/libfmt_macros/Cargo.toml b/src/libfmt_macros/Cargo.toml index b3f4d2deae2fc..50779a2d9ad08 100644 --- a/src/libfmt_macros/Cargo.toml +++ b/src/libfmt_macros/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "fmt_macros" version = "0.0.0" +edition = "2018" [lib] name = "fmt_macros" diff --git a/src/libfmt_macros/lib.rs b/src/libfmt_macros/lib.rs index 7d87c1839d78c..2536121c7a324 100644 --- a/src/libfmt_macros/lib.rs +++ b/src/libfmt_macros/lib.rs @@ -1,32 +1,24 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Macro support for format strings //! //! These structures are used when parsing format strings for the compiler. //! Parsing does not happen at runtime: structures of `std::fmt::rt` are //! generated instead. -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/", +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/", html_playground_url = "https://play.rust-lang.org/", test(attr(deny(warnings))))] +#![deny(rust_2018_idioms)] +#![cfg_attr(not(stage0), deny(internal))] + #![feature(nll)] +#![feature(rustc_private)] -pub use self::Piece::*; -pub use self::Position::*; -pub use self::Alignment::*; -pub use self::Flag::*; -pub use self::Count::*; +pub use Piece::*; +pub use Position::*; +pub use Alignment::*; +pub use Flag::*; +pub use Count::*; use std::str; use std::string; @@ -82,6 +74,15 @@ pub enum Position<'a> { ArgumentNamed(&'a str), } +impl Position<'_> { + pub fn index(&self) -> Option { + match self { + ArgumentIs(i) | ArgumentImplicitlyIs(i) => Some(*i), + _ => None, + } + } +} + /// Enum of alignments which are supported. #[derive(Copy, Clone, PartialEq)] pub enum Alignment { @@ -133,8 +134,9 @@ pub struct ParseError { pub description: string::String, pub note: Option, pub label: string::String, - pub start: usize, - pub end: usize, + pub start: SpanIndex, + pub end: SpanIndex, + pub secondary_label: Option<(string::String, SpanIndex, SpanIndex)>, } /// The parser structure for interpreting the input format string. This is @@ -152,27 +154,43 @@ pub struct Parser<'a> { curarg: usize, /// `Some(raw count)` when the string is "raw", used to position spans correctly style: Option, - /// How many newlines have been seen in the string so far, to adjust the error spans - seen_newlines: usize, /// Start and end byte offset of every successfully parsed argument - pub arg_places: Vec<(usize, usize)>, + pub arg_places: Vec<(SpanIndex, SpanIndex)>, + /// Characters that need to be shifted + skips: Vec, + /// Span offset of the last opening brace seen, used for error reporting + last_opening_brace_pos: Option, + /// Wether the source string is comes from `println!` as opposed to `format!` or `print!` + append_newline: bool, +} + +#[derive(Clone, Copy, Debug)] +pub struct SpanIndex(pub usize); + +impl SpanIndex { + pub fn unwrap(self) -> usize { + self.0 + } } impl<'a> Iterator for Parser<'a> { type Item = Piece<'a>; fn next(&mut self) -> Option> { - let raw = self.style.map(|raw| raw + self.seen_newlines).unwrap_or(0); if let Some(&(pos, c)) = self.cur.peek() { match c { '{' => { + let curr_last_brace = self.last_opening_brace_pos; + self.last_opening_brace_pos = Some(self.to_span_index(pos)); self.cur.next(); if self.consume('{') { + self.last_opening_brace_pos = curr_last_brace; + Some(String(self.string(pos + 1))) } else { let arg = self.argument(); if let Some(arg_pos) = self.must_consume('}').map(|end| { - (pos + raw + 1, end + raw + 2) + (self.to_span_index(pos), self.to_span_index(end + 1)) }) { self.arg_places.push(arg_pos); } @@ -184,7 +202,7 @@ impl<'a> Iterator for Parser<'a> { if self.consume('}') { Some(String(self.string(pos + 1))) } else { - let err_pos = pos + raw + 1; + let err_pos = self.to_span_index(pos); self.err_with_note( "unmatched `}` found", "unmatched `}`", @@ -196,7 +214,6 @@ impl<'a> Iterator for Parser<'a> { } } '\n' => { - self.seen_newlines += 1; Some(String(self.string(pos))) } _ => Some(String(self.string(pos))), @@ -209,15 +226,22 @@ impl<'a> Iterator for Parser<'a> { impl<'a> Parser<'a> { /// Creates a new parser for the given format string - pub fn new(s: &'a str, style: Option) -> Parser<'a> { + pub fn new( + s: &'a str, + style: Option, + skips: Vec, + append_newline: bool, + ) -> Parser<'a> { Parser { input: s, cur: s.char_indices().peekable(), errors: vec![], curarg: 0, style, - seen_newlines: 0, arg_places: vec![], + skips, + last_opening_brace_pos: None, + append_newline, } } @@ -228,8 +252,8 @@ impl<'a> Parser<'a> { &mut self, description: S1, label: S2, - start: usize, - end: usize, + start: SpanIndex, + end: SpanIndex, ) { self.errors.push(ParseError { description: description.into(), @@ -237,6 +261,7 @@ impl<'a> Parser<'a> { label: label.into(), start, end, + secondary_label: None, }); } @@ -248,8 +273,8 @@ impl<'a> Parser<'a> { description: S1, label: S2, note: S3, - start: usize, - end: usize, + start: SpanIndex, + end: SpanIndex, ) { self.errors.push(ParseError { description: description.into(), @@ -257,6 +282,7 @@ impl<'a> Parser<'a> { label: label.into(), start, end, + secondary_label: None, }); } @@ -276,47 +302,86 @@ impl<'a> Parser<'a> { } } + fn raw(&self) -> usize { + self.style.map(|raw| raw + 1).unwrap_or(0) + } + + fn to_span_index(&self, pos: usize) -> SpanIndex { + let mut pos = pos; + for skip in &self.skips { + if pos > *skip { + pos += 1; + } else if pos == *skip && self.raw() == 0 { + pos += 1; + } else { + break; + } + } + SpanIndex(self.raw() + pos + 1) + } + /// Forces consumption of the specified character. If the character is not /// found, an error is emitted. fn must_consume(&mut self, c: char) -> Option { self.ws(); - let raw = self.style.unwrap_or(0); - let padding = raw + self.seen_newlines; if let Some(&(pos, maybe)) = self.cur.peek() { if c == maybe { self.cur.next(); Some(pos) } else { - let pos = pos + raw + 1; - self.err(format!("expected `{:?}`, found `{:?}`", c, maybe), - format!("expected `{}`", c), - pos, - pos); + let pos = self.to_span_index(pos); + let description = format!("expected `'}}'`, found `{:?}`", maybe); + let label = "expected `}`".to_owned(); + let (note, secondary_label) = if c == '}' { + (Some("if you intended to print `{`, you can escape it using `{{`".to_owned()), + self.last_opening_brace_pos.map(|pos| { + ("because of this opening brace".to_owned(), pos, pos) + })) + } else { + (None, None) + }; + self.errors.push(ParseError { + description, + note, + label, + start: pos, + end: pos, + secondary_label, + }); None } } else { - let msg = format!("expected `{:?}` but string was terminated", c); - // point at closing `"`, unless the last char is `\n` to account for `println` - let pos = match self.input.chars().last() { - Some('\n') => self.input.len(), - _ => self.input.len() + 1, - }; + let description = format!("expected `{:?}` but string was terminated", c); + // point at closing `"` + let pos = self.input.len() - if self.append_newline { 1 } else { 0 }; + let pos = self.to_span_index(pos); if c == '}' { - self.err_with_note(msg, - format!("expected `{:?}`", c), - "if you intended to print `{`, you can escape it using `{{`", - pos + padding, - pos + padding); + let label = format!("expected `{:?}`", c); + let (note, secondary_label) = if c == '}' { + (Some("if you intended to print `{`, you can escape it using `{{`".to_owned()), + self.last_opening_brace_pos.map(|pos| { + ("because of this opening brace".to_owned(), pos, pos) + })) + } else { + (None, None) + }; + self.errors.push(ParseError { + description, + note, + label, + start: pos, + end: pos, + secondary_label, + }); } else { - self.err(msg, format!("expected `{:?}`", c), pos, pos); + self.err(description, format!("expected `{:?}`", c), pos, pos); } None } } - /// Consumes all whitespace characters until the first non-whitespace - /// character + /// Consumes all whitespace characters until the first non-whitespace character fn ws(&mut self) { while let Some(&(_, c)) = self.cur.peek() { if c.is_whitespace() { @@ -344,8 +409,7 @@ impl<'a> Parser<'a> { &self.input[start..self.input.len()] } - /// Parses an Argument structure, or what's contained within braces inside - /// the format string + /// Parses an Argument structure, or what's contained within braces inside the format string fn argument(&mut self) -> Argument<'a> { let pos = self.position(); let format = self.format(); @@ -381,8 +445,8 @@ impl<'a> Parser<'a> { self.err_with_note(format!("invalid argument name `{}`", invalid_name), "invalid argument name", "argument names cannot start with an underscore", - pos + 1, // add 1 to account for leading `{` - pos + 1 + invalid_name.len()); + self.to_span_index(pos), + self.to_span_index(pos + invalid_name.len())); Some(ArgumentNamed(invalid_name)) }, @@ -563,7 +627,7 @@ mod tests { use super::*; fn same(fmt: &'static str, p: &[Piece<'static>]) { - let parser = Parser::new(fmt, None); + let parser = Parser::new(fmt, None, vec![], false); assert!(parser.collect::>>() == p); } @@ -579,7 +643,7 @@ mod tests { } fn musterr(s: &str) { - let mut p = Parser::new(s, None); + let mut p = Parser::new(s, None, vec![], false); p.next(); assert!(!p.errors.is_empty()); } diff --git a/src/libgraphviz/Cargo.toml b/src/libgraphviz/Cargo.toml index 76ef3a1d188ce..a6a3c1a249d64 100644 --- a/src/libgraphviz/Cargo.toml +++ b/src/libgraphviz/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "graphviz" version = "0.0.0" +edition = "2018" [lib] name = "graphviz" diff --git a/src/libgraphviz/lib.rs b/src/libgraphviz/lib.rs index e3cf959beb8e5..489020d4ee778 100644 --- a/src/libgraphviz/lib.rs +++ b/src/libgraphviz/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Generate files suitable for use with [Graphviz](http://www.graphviz.org/) //! //! The `render` function generates output (e.g., an `output.dot` file) for @@ -281,15 +271,14 @@ //! //! * [DOT language](http://www.graphviz.org/doc/info/lang.html) -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/", +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(allow(unused_variables), deny(warnings))))] +#![deny(rust_2018_idioms)] + #![feature(nll)] -#![feature(str_escape)] -use self::LabelText::*; +use LabelText::*; use std::borrow::Cow; use std::io::prelude::*; @@ -403,7 +392,7 @@ impl<'a> Id<'a> { /// digit (i.e., the regular expression `[a-zA-Z_][a-zA-Z_0-9]*`). /// /// (Note: this format is a strict subset of the `ID` format - /// defined by the DOT language. This function may change in the + /// defined by the DOT language. This function may change in the /// future to accept a broader subset, or the entirety, of DOT's /// `ID` format.) /// @@ -540,7 +529,7 @@ impl<'a> LabelText<'a> { } /// Decomposes content into string suitable for making EscStr that - /// yields same content as self. The result obeys the law + /// yields same content as self. The result obeys the law /// render(`lt`) == render(`EscStr(lt.pre_escaped_content())`) for /// all `lt: LabelText`. fn pre_escaped_content(self) -> Cow<'a, str> { @@ -548,7 +537,7 @@ impl<'a> LabelText<'a> { EscStr(s) => s, LabelStr(s) => { if s.contains('\\') { - (&*s).escape_default().into() + (&*s).escape_default().to_string().into() } else { s } @@ -558,12 +547,12 @@ impl<'a> LabelText<'a> { } /// Puts `prefix` on a line above this label, with a blank line separator. - pub fn prefix_line(self, prefix: LabelText) -> LabelText<'static> { + pub fn prefix_line(self, prefix: LabelText<'_>) -> LabelText<'static> { prefix.suffix_line(self) } /// Puts `suffix` on a line below this label, with a blank line separator. - pub fn suffix_line(self, suffix: LabelText) -> LabelText<'static> { + pub fn suffix_line(self, suffix: LabelText<'_>) -> LabelText<'static> { let mut prefix = self.pre_escaped_content().into_owned(); let suffix = suffix.pre_escaped_content(); prefix.push_str(r"\n\n"); @@ -696,7 +685,7 @@ pub fn render_opts<'a, N, E, G, W>(g: &'a G, #[cfg(test)] mod tests { - use self::NodeLabels::*; + use NodeLabels::*; use super::{Id, Labeller, Nodes, Edges, GraphWalk, render, Style}; use super::LabelText::{self, LabelStr, EscStr, HtmlStr}; use std::io; diff --git a/src/libpanic_abort/Cargo.toml b/src/libpanic_abort/Cargo.toml index e304e61c32936..2bee0b716c750 100644 --- a/src/libpanic_abort/Cargo.toml +++ b/src/libpanic_abort/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "panic_abort" version = "0.0.0" +edition = "2018" [lib] path = "lib.rs" diff --git a/src/libpanic_abort/lib.rs b/src/libpanic_abort/lib.rs index 95c3514185e2f..8c20a6ea55ad0 100644 --- a/src/libpanic_abort/lib.rs +++ b/src/libpanic_abort/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Implementation of Rust panics via process aborts //! //! When compared to the implementation via unwinding, this crate is *much* @@ -15,12 +5,12 @@ #![no_std] #![unstable(feature = "panic_abort", issue = "32837")] -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/", +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/", issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")] #![panic_runtime] + #![allow(unused_features)] +#![deny(rust_2018_idioms)] #![feature(core_intrinsics)] #![feature(libc)] @@ -56,7 +46,6 @@ pub unsafe extern fn __rust_start_panic(_payload: usize) -> u32 { #[cfg(any(unix, target_os = "cloudabi"))] unsafe fn abort() -> ! { - extern crate libc; libc::abort(); } @@ -67,10 +56,11 @@ pub unsafe extern fn __rust_start_panic(_payload: usize) -> u32 { core::intrinsics::abort(); } - #[cfg(target_env="sgx")] + #[cfg(all(target_vendor="fortanix", target_env="sgx"))] unsafe fn abort() -> ! { - extern "C" { pub fn panic_exit() -> !; } - panic_exit(); + // call std::sys::abort_internal + extern "C" { pub fn __rust_abort() -> !; } + __rust_abort(); } } diff --git a/src/libpanic_unwind/Cargo.toml b/src/libpanic_unwind/Cargo.toml index c9fce621608a2..1b3901ac11a96 100644 --- a/src/libpanic_unwind/Cargo.toml +++ b/src/libpanic_unwind/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "panic_unwind" version = "0.0.0" +edition = "2018" [lib] path = "lib.rs" diff --git a/src/libpanic_unwind/dummy.rs b/src/libpanic_unwind/dummy.rs index 7150560b4a13d..3a00d6376658c 100644 --- a/src/libpanic_unwind/dummy.rs +++ b/src/libpanic_unwind/dummy.rs @@ -1,16 +1,6 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Unwinding for wasm32 +//! Unwinding for *wasm32* target. //! -//! Right now we don't support this, so this is just stubs +//! Right now we don't support this, so this is just stubs. use alloc::boxed::Box; use core::any::Any; diff --git a/src/libpanic_unwind/dwarf/eh.rs b/src/libpanic_unwind/dwarf/eh.rs index a24c659689139..07fa2971847f6 100644 --- a/src/libpanic_unwind/dwarf/eh.rs +++ b/src/libpanic_unwind/dwarf/eh.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Parsing of GCC-style Language-Specific Data Area (LSDA) //! For details see: //! http://refspecs.linuxfoundation.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html @@ -16,12 +6,12 @@ //! http://www.airs.com/blog/archives/464 //! //! A reference implementation may be found in the GCC source tree -//! (/libgcc/unwind-c.c as of this writing) +//! (`/libgcc/unwind-c.c` as of this writing). #![allow(non_upper_case_globals)] #![allow(unused)] -use dwarf::DwarfReader; +use crate::dwarf::DwarfReader; use core::mem; pub const DW_EH_PE_omit: u8 = 0xFF; @@ -61,7 +51,7 @@ pub enum EHAction { pub const USING_SJLJ_EXCEPTIONS: bool = cfg!(all(target_os = "ios", target_arch = "arm")); -pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext) +pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) -> Result { if lsda.is_null() { @@ -155,7 +145,7 @@ fn round_up(unrounded: usize, align: usize) -> Result { } unsafe fn read_encoded_pointer(reader: &mut DwarfReader, - context: &EHContext, + context: &EHContext<'_>, encoding: u8) -> Result { if encoding == DW_EH_PE_omit { diff --git a/src/libpanic_unwind/dwarf/mod.rs b/src/libpanic_unwind/dwarf/mod.rs index c9ae87ade283d..0360696426dc9 100644 --- a/src/libpanic_unwind/dwarf/mod.rs +++ b/src/libpanic_unwind/dwarf/mod.rs @@ -1,15 +1,5 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Utilities for parsing DWARF-encoded data streams. -//! See http://www.dwarfstd.org, +//! See , //! DWARF-4 standard, Section 7 - "Data Representation" // This module is used only by x86_64-pc-windows-gnu for now, but we diff --git a/src/libpanic_unwind/emcc.rs b/src/libpanic_unwind/emcc.rs index 87efc23abc81d..18e9006468ef3 100644 --- a/src/libpanic_unwind/emcc.rs +++ b/src/libpanic_unwind/emcc.rs @@ -1,29 +1,19 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Unwinding for emscripten +//! Unwinding for *emscripten* target. //! //! Whereas Rust's usual unwinding implementation for Unix platforms -//! calls into the libunwind APIs directly, on emscripten we instead +//! calls into the libunwind APIs directly, on Emscripten we instead //! call into the C++ unwinding APIs. This is just an expedience since -//! emscripten's runtime always implements those APIs and does not +//! Emscripten's runtime always implements those APIs and does not //! implement libunwind. #![allow(private_no_mangle_fns)] use core::any::Any; use core::ptr; +use core::mem; use alloc::boxed::Box; use libc::{self, c_int}; use unwind as uw; -use core::mem; pub fn payload() -> *mut u8 { ptr::null_mut() diff --git a/src/libpanic_unwind/gcc.rs b/src/libpanic_unwind/gcc.rs index 441058c8d74ca..e2b743b379704 100644 --- a/src/libpanic_unwind/gcc.rs +++ b/src/libpanic_unwind/gcc.rs @@ -1,14 +1,4 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Implementation of panics backed by libgcc/libunwind (in some form) +//! Implementation of panics backed by libgcc/libunwind (in some form). //! //! For background on exception handling and stack unwinding please see //! "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and @@ -33,14 +23,14 @@ //! //! In the search phase, the job of a personality routine is to examine //! exception object being thrown, and to decide whether it should be caught at -//! that stack frame. Once the handler frame has been identified, cleanup phase +//! that stack frame. Once the handler frame has been identified, cleanup phase //! begins. //! //! In the cleanup phase, the unwinder invokes each personality routine again. //! This time it decides which (if any) cleanup code needs to be run for -//! the current stack frame. If so, the control is transferred to a special +//! the current stack frame. If so, the control is transferred to a special //! branch in the function body, the "landing pad", which invokes destructors, -//! frees memory, etc. At the end of the landing pad, control is transferred +//! frees memory, etc. At the end of the landing pad, control is transferred //! back to the unwinder and unwinding resumes. //! //! Once stack has been unwound down to the handler frame level, unwinding stops @@ -49,7 +39,7 @@ //! ## `eh_personality` and `eh_unwind_resume` //! //! These language items are used by the compiler when generating unwind info. -//! The first one is the personality routine described above. The second one +//! The first one is the personality routine described above. The second one //! allows compilation target to customize the process of resuming unwind at the //! end of the landing pads. `eh_unwind_resume` is used only if //! `custom_unwind_resume` flag in the target options is set. @@ -62,7 +52,7 @@ use alloc::boxed::Box; use unwind as uw; use libc::{c_int, uintptr_t}; -use dwarf::eh::{self, EHContext, EHAction}; +use crate::dwarf::eh::{self, EHContext, EHAction}; #[repr(C)] struct Exception { diff --git a/src/libpanic_unwind/lib.rs b/src/libpanic_unwind/lib.rs index 49f8a429126b7..9d3d8f6185bb3 100644 --- a/src/libpanic_unwind/lib.rs +++ b/src/libpanic_unwind/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Implementation of panics via stack unwinding //! //! This crate is an implementation of panics in Rust using "most native" stack @@ -24,11 +14,11 @@ #![no_std] #![unstable(feature = "panic_unwind", issue = "32837")] -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/", +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/", issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/")] +#![deny(rust_2018_idioms)] + #![feature(allocator_api)] #![feature(alloc)] #![feature(core_intrinsics)] @@ -44,11 +34,6 @@ #![panic_runtime] #![feature(panic_runtime)] -extern crate alloc; -extern crate libc; -#[cfg(not(any(target_env = "msvc", all(windows, target_arch = "x86_64", target_env = "gnu"))))] -extern crate unwind; - use alloc::boxed::Box; use core::intrinsics; use core::mem; @@ -62,7 +47,7 @@ cfg_if! { if #[cfg(target_os = "emscripten")] { #[path = "emcc.rs"] mod imp; - } else if #[cfg(any(target_arch = "wasm32", target_env = "sgx"))] { + } else if #[cfg(target_arch = "wasm32")] { #[path = "dummy.rs"] mod imp; } else if #[cfg(all(target_env = "msvc", target_arch = "aarch64"))] { @@ -99,7 +84,7 @@ pub unsafe extern "C" fn __rust_maybe_catch_panic(f: fn(*mut u8), vtable_ptr: *mut usize) -> u32 { let mut payload = imp::payload(); - if intrinsics::try(f, data, &mut payload as *mut _ as *mut _) == 0 { + if intrinsics::r#try(f, data, &mut payload as *mut _ as *mut _) == 0 { 0 } else { let obj = mem::transmute::<_, raw::TraitObject>(imp::cleanup(payload)); diff --git a/src/libpanic_unwind/macros.rs b/src/libpanic_unwind/macros.rs index 6ea79dc862bda..659e977285e35 100644 --- a/src/libpanic_unwind/macros.rs +++ b/src/libpanic_unwind/macros.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// A macro for defining `#[cfg]` if-else statements. /// /// This is similar to the `if/elif` C preprocessor macro by allowing definition diff --git a/src/libpanic_unwind/seh.rs b/src/libpanic_unwind/seh.rs index 9d24079d91e2d..996fdb931eff2 100644 --- a/src/libpanic_unwind/seh.rs +++ b/src/libpanic_unwind/seh.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Windows SEH //! //! On Windows (currently only on MSVC), the default exception handling @@ -62,7 +52,7 @@ use core::any::Any; use core::mem; use core::raw; -use windows as c; +use crate::windows as c; use libc::{c_int, c_uint}; // First up, a whole bunch of type definitions. There's a few platform-specific @@ -311,5 +301,5 @@ pub unsafe fn cleanup(payload: [u64; 2]) -> Box { #[lang = "eh_personality"] #[cfg(not(test))] fn rust_eh_personality() { - unsafe { ::core::intrinsics::abort() } + unsafe { core::intrinsics::abort() } } diff --git a/src/libpanic_unwind/seh64_gnu.rs b/src/libpanic_unwind/seh64_gnu.rs index 60e9829ef9eaf..457ffcd34f9c7 100644 --- a/src/libpanic_unwind/seh64_gnu.rs +++ b/src/libpanic_unwind/seh64_gnu.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Unwinding implementation of top of native Win64 SEH, //! however the unwind handler data (aka LSDA) uses GCC-compatible encoding. @@ -19,8 +9,8 @@ use alloc::boxed::Box; use core::any::Any; use core::intrinsics; use core::ptr; -use dwarf::eh::{EHContext, EHAction, find_eh_action}; -use windows as c; +use crate::dwarf::eh::{EHContext, EHAction, find_eh_action}; +use crate::windows as c; // Define our exception codes: // according to http://msdn.microsoft.com/en-us/library/het71c37(v=VS.80).aspx, diff --git a/src/libpanic_unwind/windows.rs b/src/libpanic_unwind/windows.rs index 0a1c9b3adf183..3257a9d25a51a 100644 --- a/src/libpanic_unwind/windows.rs +++ b/src/libpanic_unwind/windows.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(nonstandard_style)] #![allow(dead_code)] #![cfg(windows)] diff --git a/src/libproc_macro/Cargo.toml b/src/libproc_macro/Cargo.toml index f903f79f9afc0..b3d0ee94f0e12 100644 --- a/src/libproc_macro/Cargo.toml +++ b/src/libproc_macro/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "proc_macro" version = "0.0.0" +edition = "2018" [lib] path = "lib.rs" diff --git a/src/libproc_macro/bridge/buffer.rs b/src/libproc_macro/bridge/buffer.rs index f228841c1e80f..0d8cc552d61ab 100644 --- a/src/libproc_macro/bridge/buffer.rs +++ b/src/libproc_macro/bridge/buffer.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Buffer management for same-process client<->server communication. use std::io::{self, Write}; @@ -16,7 +6,7 @@ use std::ops::{Deref, DerefMut}; use std::slice; #[repr(C)] -struct Slice<'a, T: 'a> { +struct Slice<'a, T> { data: &'a [T; 0], len: usize, } @@ -52,7 +42,7 @@ pub struct Buffer { data: *mut T, len: usize, capacity: usize, - extend_from_slice: extern "C" fn(Buffer, Slice) -> Buffer, + extend_from_slice: extern "C" fn(Buffer, Slice<'_, T>) -> Buffer, drop: extern "C" fn(Buffer), } @@ -149,7 +139,7 @@ impl From> for Buffer { } } - extern "C" fn extend_from_slice(b: Buffer, xs: Slice) -> Buffer { + extern "C" fn extend_from_slice(b: Buffer, xs: Slice<'_, T>) -> Buffer { let mut v = to_vec(b); v.extend_from_slice(&xs); Buffer::from(v) diff --git a/src/libproc_macro/bridge/client.rs b/src/libproc_macro/bridge/client.rs index f5e12713e4e72..6052b4a4d43f4 100644 --- a/src/libproc_macro/bridge/client.rs +++ b/src/libproc_macro/bridge/client.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Client-side types. use super::*; @@ -76,7 +66,7 @@ macro_rules! define_handles { impl DecodeMut<'_, '_, HandleStore>> for Marked { - fn decode(r: &mut Reader, s: &mut HandleStore>) -> Self { + fn decode(r: &mut Reader<'_>, s: &mut HandleStore>) -> Self { s.$oty.take(handle::Handle::decode(r, &mut ())) } } @@ -90,7 +80,7 @@ macro_rules! define_handles { impl Decode<'_, 's, HandleStore>> for &'s Marked { - fn decode(r: &mut Reader, s: &'s HandleStore>) -> Self { + fn decode(r: &mut Reader<'_>, s: &'s HandleStore>) -> Self { &s.$oty[handle::Handle::decode(r, &mut ())] } } @@ -104,7 +94,10 @@ macro_rules! define_handles { impl DecodeMut<'_, 's, HandleStore>> for &'s mut Marked { - fn decode(r: &mut Reader, s: &'s mut HandleStore>) -> Self { + fn decode( + r: &mut Reader<'_>, + s: &'s mut HandleStore> + ) -> Self { &mut s.$oty[handle::Handle::decode(r, &mut ())] } } @@ -118,7 +111,7 @@ macro_rules! define_handles { } impl DecodeMut<'_, '_, S> for $oty { - fn decode(r: &mut Reader, s: &mut S) -> Self { + fn decode(r: &mut Reader<'_>, s: &mut S) -> Self { $oty(handle::Handle::decode(r, s)) } } @@ -140,7 +133,7 @@ macro_rules! define_handles { impl DecodeMut<'_, '_, HandleStore>> for Marked { - fn decode(r: &mut Reader, s: &mut HandleStore>) -> Self { + fn decode(r: &mut Reader<'_>, s: &mut HandleStore>) -> Self { s.$ity.copy(handle::Handle::decode(r, &mut ())) } } @@ -154,7 +147,7 @@ macro_rules! define_handles { } impl DecodeMut<'_, '_, S> for $ity { - fn decode(r: &mut Reader, s: &mut S) -> Self { + fn decode(r: &mut Reader<'_>, s: &mut S) -> Self { $ity(handle::Handle::decode(r, s)) } } @@ -210,7 +203,7 @@ impl Clone for Literal { // FIXME(eddyb) `Literal` should not expose internal `Debug` impls. impl fmt::Debug for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.debug()) } } @@ -222,15 +215,15 @@ impl Clone for SourceFile { } impl fmt::Debug for Span { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.debug()) } } macro_rules! define_client_side { ($($name:ident { - $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)*) $(-> $ret_ty:ty)*;)* - }),* $(,)*) => { + $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)?) $(-> $ret_ty:ty)*;)* + }),* $(,)?) => { $(impl $name { $(pub(crate) fn $method($($arg: $arg_ty),*) $(-> $ret_ty)* { Bridge::with(|bridge| { @@ -285,7 +278,7 @@ impl BridgeState<'_> { /// /// N.B., while `f` is running, the thread-local state /// is `BridgeState::InUse`. - fn with(f: impl FnOnce(&mut BridgeState) -> R) -> R { + fn with(f: impl FnOnce(&mut BridgeState<'_>) -> R) -> R { BRIDGE_STATE.with(|state| { state.replace(BridgeState::InUse, |mut state| { // FIXME(#52812) pass `f` directly to `replace` when `RefMutL` is gone @@ -316,7 +309,7 @@ impl Bridge<'_> { BRIDGE_STATE.with(|state| state.set(BridgeState::Connected(self), f)) } - fn with(f: impl FnOnce(&mut Bridge) -> R) -> R { + fn with(f: impl FnOnce(&mut Bridge<'_>) -> R) -> R { BridgeState::with(|state| match state { BridgeState::NotConnected => { panic!("procedural macro API is used outside of a procedural macro"); @@ -341,15 +334,15 @@ impl Bridge<'_> { #[derive(Copy, Clone)] pub struct Client { pub(super) get_handle_counters: extern "C" fn() -> &'static HandleCounters, - pub(super) run: extern "C" fn(Bridge, F) -> Buffer, + pub(super) run: extern "C" fn(Bridge<'_>, F) -> Buffer, pub(super) f: F, } // FIXME(#53451) public to work around `Cannot create local mono-item` ICE, // affecting not only the function itself, but also the `BridgeState` `thread_local!`. pub extern "C" fn __run_expand1( - mut bridge: Bridge, - f: fn(::TokenStream) -> ::TokenStream, + mut bridge: Bridge<'_>, + f: fn(crate::TokenStream) -> crate::TokenStream, ) -> Buffer { // The initial `cached_buffer` contains the input. let mut b = bridge.cached_buffer.take(); @@ -362,7 +355,7 @@ pub extern "C" fn __run_expand1( // Put the `cached_buffer` back in the `Bridge`, for requests. Bridge::with(|bridge| bridge.cached_buffer = b.take()); - let output = f(::TokenStream(input)).0; + let output = f(crate::TokenStream(input)).0; // Take the `cached_buffer` back out, for the output value. b = Bridge::with(|bridge| bridge.cached_buffer.take()); @@ -388,8 +381,8 @@ pub extern "C" fn __run_expand1( b } -impl Client ::TokenStream> { - pub const fn expand1(f: fn(::TokenStream) -> ::TokenStream) -> Self { +impl Client crate::TokenStream> { + pub const fn expand1(f: fn(crate::TokenStream) -> crate::TokenStream) -> Self { Client { get_handle_counters: HandleCounters::get, run: __run_expand1, @@ -401,8 +394,8 @@ impl Client ::TokenStream> { // FIXME(#53451) public to work around `Cannot create local mono-item` ICE, // affecting not only the function itself, but also the `BridgeState` `thread_local!`. pub extern "C" fn __run_expand2( - mut bridge: Bridge, - f: fn(::TokenStream, ::TokenStream) -> ::TokenStream, + mut bridge: Bridge<'_>, + f: fn(crate::TokenStream, crate::TokenStream) -> crate::TokenStream, ) -> Buffer { // The initial `cached_buffer` contains the input. let mut b = bridge.cached_buffer.take(); @@ -416,7 +409,7 @@ pub extern "C" fn __run_expand2( // Put the `cached_buffer` back in the `Bridge`, for requests. Bridge::with(|bridge| bridge.cached_buffer = b.take()); - let output = f(::TokenStream(input), ::TokenStream(input2)).0; + let output = f(crate::TokenStream(input), crate::TokenStream(input2)).0; // Take the `cached_buffer` back out, for the output value. b = Bridge::with(|bridge| bridge.cached_buffer.take()); @@ -442,8 +435,10 @@ pub extern "C" fn __run_expand2( b } -impl Client ::TokenStream> { - pub const fn expand2(f: fn(::TokenStream, ::TokenStream) -> ::TokenStream) -> Self { +impl Client crate::TokenStream> { + pub const fn expand2( + f: fn(crate::TokenStream, crate::TokenStream) -> crate::TokenStream + ) -> Self { Client { get_handle_counters: HandleCounters::get, run: __run_expand2, @@ -458,17 +453,17 @@ pub enum ProcMacro { CustomDerive { trait_name: &'static str, attributes: &'static [&'static str], - client: Client ::TokenStream>, + client: Client crate::TokenStream>, }, Attr { name: &'static str, - client: Client ::TokenStream>, + client: Client crate::TokenStream>, }, Bang { name: &'static str, - client: Client ::TokenStream>, + client: Client crate::TokenStream>, }, } @@ -476,7 +471,7 @@ impl ProcMacro { pub const fn custom_derive( trait_name: &'static str, attributes: &'static [&'static str], - expand: fn(::TokenStream) -> ::TokenStream, + expand: fn(crate::TokenStream) -> crate::TokenStream, ) -> Self { ProcMacro::CustomDerive { trait_name, @@ -487,7 +482,7 @@ impl ProcMacro { pub const fn attr( name: &'static str, - expand: fn(::TokenStream, ::TokenStream) -> ::TokenStream, + expand: fn(crate::TokenStream, crate::TokenStream) -> crate::TokenStream, ) -> Self { ProcMacro::Attr { name, @@ -495,7 +490,10 @@ impl ProcMacro { } } - pub const fn bang(name: &'static str, expand: fn(::TokenStream) -> ::TokenStream) -> Self { + pub const fn bang( + name: &'static str, + expand: fn(crate::TokenStream) -> crate::TokenStream + ) -> Self { ProcMacro::Bang { name, client: Client::expand1(expand), diff --git a/src/libproc_macro/bridge/closure.rs b/src/libproc_macro/bridge/closure.rs index 92fe7baae097c..8d8adfa1caada 100644 --- a/src/libproc_macro/bridge/closure.rs +++ b/src/libproc_macro/bridge/closure.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Closure type (equivalent to `&mut dyn FnMut(A) -> R`) that's `repr(C)`. #[repr(C)] diff --git a/src/libproc_macro/bridge/handle.rs b/src/libproc_macro/bridge/handle.rs index 5c91a1408a42f..66496ff3f1ad8 100644 --- a/src/libproc_macro/bridge/handle.rs +++ b/src/libproc_macro/bridge/handle.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Server-side handles and storage for per-handle data. use std::collections::{BTreeMap, HashMap}; diff --git a/src/libproc_macro/bridge/mod.rs b/src/libproc_macro/bridge/mod.rs index edb4d3fbdaabb..3c48466fffa28 100644 --- a/src/libproc_macro/bridge/mod.rs +++ b/src/libproc_macro/bridge/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Internal interface for communicating between a `proc_macro` client //! (a proc macro crate) and a `proc_macro` server (a compiler front-end). //! @@ -27,7 +17,7 @@ use std::panic; use std::sync::atomic::AtomicUsize; use std::sync::Once; use std::thread; -use {Delimiter, Level, LineColumn, Spacing}; +use crate::{Delimiter, Level, LineColumn, Spacing}; /// Higher-order macro describing the server RPC API, allowing automatic /// generation of type-safe Rust APIs, both client-side and server-side. @@ -165,6 +155,7 @@ macro_rules! with_api { fn end($self: $S::Span) -> LineColumn; fn join($self: $S::Span, other: $S::Span) -> Option<$S::Span>; fn resolved_at($self: $S::Span, at: $S::Span) -> $S::Span; + fn source_text($self: $S::Span) -> Option; }, } }; @@ -206,9 +197,9 @@ mod scoped_cell; #[forbid(unsafe_code)] pub mod server; -use self::buffer::Buffer; -pub use self::rpc::PanicMessage; -use self::rpc::{Decode, DecodeMut, Encode, Reader, Writer}; +use buffer::Buffer; +pub use rpc::PanicMessage; +use rpc::{Decode, DecodeMut, Encode, Reader, Writer}; /// An active connection between a server and a client. /// The server creates the bridge (`Bridge::run_server` in `server.rs`), @@ -235,8 +226,8 @@ mod api_tags { macro_rules! declare_tags { ($($name:ident { - $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)*) $(-> $ret_ty:ty)*;)* - }),* $(,)*) => { + $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)?) $(-> $ret_ty:ty)*;)* + }),* $(,)?) => { $( pub(super) enum $name { $($method),* @@ -317,7 +308,7 @@ impl Unmark for Option { } macro_rules! mark_noop { - ($($ty:ty),* $(,)*) => { + ($($ty:ty),* $(,)?) => { $( impl Mark for $ty { type Unmarked = Self; diff --git a/src/libproc_macro/bridge/rpc.rs b/src/libproc_macro/bridge/rpc.rs index fafc3d0074065..5018be74f8997 100644 --- a/src/libproc_macro/bridge/rpc.rs +++ b/src/libproc_macro/bridge/rpc.rs @@ -1,14 +1,4 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Serialization for client<->server communication. +//! Serialization for client-server communication. use std::any::Any; use std::char; @@ -34,36 +24,26 @@ pub(super) trait DecodeMut<'a, 's, S>: Sized { } macro_rules! rpc_encode_decode { - (uleb128 $ty:ty) => { + (le $ty:ty) => { impl Encode for $ty { - fn encode(mut self, w: &mut Writer, s: &mut S) { - let mut byte = 0x80; - while byte & 0x80 != 0 { - byte = (self & 0x7f) as u8; - self >>= 7; - if self != 0 { - byte |= 0x80; - } - byte.encode(w, s); - } + fn encode(self, w: &mut Writer, _: &mut S) { + w.write_all(&self.to_le_bytes()).unwrap(); } } impl DecodeMut<'_, '_, S> for $ty { - fn decode(r: &mut Reader, s: &mut S) -> Self { - let mut byte = 0x80; - let mut v = 0; - let mut shift = 0; - while byte & 0x80 != 0 { - byte = u8::decode(r, s); - v |= ((byte & 0x7f) as Self) << shift; - shift += 7; - } - v + fn decode(r: &mut Reader<'_>, _: &mut S) -> Self { + const N: usize = ::std::mem::size_of::<$ty>(); + + let mut bytes = [0; N]; + bytes.copy_from_slice(&r[..N]); + *r = &r[N..]; + + Self::from_le_bytes(bytes) } } }; - (struct $name:ident { $($field:ident),* $(,)* }) => { + (struct $name:ident { $($field:ident),* $(,)? }) => { impl Encode for $name { fn encode(self, w: &mut Writer, s: &mut S) { $(self.$field.encode(w, s);)* @@ -71,25 +51,28 @@ macro_rules! rpc_encode_decode { } impl DecodeMut<'_, '_, S> for $name { - fn decode(r: &mut Reader, s: &mut S) -> Self { + fn decode(r: &mut Reader<'_>, s: &mut S) -> Self { $name { $($field: DecodeMut::decode(r, s)),* } } } }; - (enum $name:ident $(<$($T:ident),+>)* { $($variant:ident $(($field:ident))*),* $(,)* }) => { - impl),+)*> Encode for $name $(<$($T),+>)* { + (enum $name:ident $(<$($T:ident),+>)? { $($variant:ident $(($field:ident))*),* $(,)? }) => { + impl),+)?> Encode for $name $(<$($T),+>)* { fn encode(self, w: &mut Writer, s: &mut S) { - // HACK(eddyb) `Tag` enum duplicated between the + // HACK(eddyb): `Tag` enum duplicated between the // two impls as there's no other place to stash it. - #[repr(u8)] enum Tag { $($variant),* } #[allow(non_upper_case_globals)] - impl Tag { $(const $variant: u8 = Tag::$variant as u8;)* } + mod tag { + #[repr(u8)] enum Tag { $($variant),* } + + $(pub const $variant: u8 = Tag::$variant as u8;)* + } match self { $($name::$variant $(($field))* => { - ::$variant.encode(w, s); + tag::$variant.encode(w, s); $($field.encode(w, s);)* })* } @@ -100,14 +83,17 @@ macro_rules! rpc_encode_decode { for $name $(<$($T),+>)* { fn decode(r: &mut Reader<'a>, s: &mut S) -> Self { - // HACK(eddyb) `Tag` enum duplicated between the + // HACK(eddyb): `Tag` enum duplicated between the // two impls as there's no other place to stash it. - #[repr(u8)] enum Tag { $($variant),* } #[allow(non_upper_case_globals)] - impl Tag { $(const $variant: u8 = Tag::$variant as u8;)* } + mod tag { + #[repr(u8)] enum Tag { $($variant),* } + + $(pub const $variant: u8 = Tag::$variant as u8;)* + } match u8::decode(r, s) { - $(::$variant => { + $(tag::$variant => { $(let $field = DecodeMut::decode(r, s);)* $name::$variant $(($field))* })* @@ -123,7 +109,7 @@ impl Encode for () { } impl DecodeMut<'_, '_, S> for () { - fn decode(_: &mut Reader, _: &mut S) -> Self {} + fn decode(_: &mut Reader<'_>, _: &mut S) -> Self {} } impl Encode for u8 { @@ -133,15 +119,15 @@ impl Encode for u8 { } impl DecodeMut<'_, '_, S> for u8 { - fn decode(r: &mut Reader, _: &mut S) -> Self { + fn decode(r: &mut Reader<'_>, _: &mut S) -> Self { let x = r[0]; *r = &r[1..]; x } } -rpc_encode_decode!(uleb128 u32); -rpc_encode_decode!(uleb128 usize); +rpc_encode_decode!(le u32); +rpc_encode_decode!(le usize); impl Encode for bool { fn encode(self, w: &mut Writer, s: &mut S) { @@ -150,7 +136,7 @@ impl Encode for bool { } impl DecodeMut<'_, '_, S> for bool { - fn decode(r: &mut Reader, s: &mut S) -> Self { + fn decode(r: &mut Reader<'_>, s: &mut S) -> Self { match u8::decode(r, s) { 0 => false, 1 => true, @@ -166,7 +152,7 @@ impl Encode for char { } impl DecodeMut<'_, '_, S> for char { - fn decode(r: &mut Reader, s: &mut S) -> Self { + fn decode(r: &mut Reader<'_>, s: &mut S) -> Self { char::from_u32(u32::decode(r, s)).unwrap() } } @@ -178,7 +164,7 @@ impl Encode for NonZeroU32 { } impl DecodeMut<'_, '_, S> for NonZeroU32 { - fn decode(r: &mut Reader, s: &mut S) -> Self { + fn decode(r: &mut Reader<'_>, s: &mut S) -> Self { Self::new(u32::decode(r, s)).unwrap() } } @@ -255,7 +241,7 @@ impl Encode for String { } impl DecodeMut<'_, '_, S> for String { - fn decode(r: &mut Reader, s: &mut S) -> Self { + fn decode(r: &mut Reader<'_>, s: &mut S) -> Self { <&str>::decode(r, s).to_string() } } @@ -310,7 +296,7 @@ impl Encode for PanicMessage { } impl DecodeMut<'_, '_, S> for PanicMessage { - fn decode(r: &mut Reader, s: &mut S) -> Self { + fn decode(r: &mut Reader<'_>, s: &mut S) -> Self { match Option::::decode(r, s) { Some(s) => PanicMessage::String(s), None => PanicMessage::Unknown, diff --git a/src/libproc_macro/bridge/scoped_cell.rs b/src/libproc_macro/bridge/scoped_cell.rs index c86d5fc309a39..6f7965095b638 100644 --- a/src/libproc_macro/bridge/scoped_cell.rs +++ b/src/libproc_macro/bridge/scoped_cell.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! `Cell` variant for (scoped) existential lifetimes. use std::cell::Cell; @@ -48,7 +38,7 @@ impl ScopedCell { ScopedCell(Cell::new(value)) } - /// Set the value in `self` to `replacement` while + /// Sets the value in `self` to `replacement` while /// running `f`, which gets the old value, mutably. /// The old value will be restored after `f` exits, even /// by panic, including modifications made to it by `f`. @@ -83,7 +73,7 @@ impl ScopedCell { f(RefMutL(put_back_on_drop.value.as_mut().unwrap())) } - /// Set the value in `self` to `value` while running `f`. + /// Sets the value in `self` to `value` while running `f`. pub fn set<'a, R>(&self, value: >::Out, f: impl FnOnce() -> R) -> R { self.replace(value, |_| f()) } diff --git a/src/libproc_macro/bridge/server.rs b/src/libproc_macro/bridge/server.rs index 0c1d4f7cc5069..f303e3e828834 100644 --- a/src/libproc_macro/bridge/server.rs +++ b/src/libproc_macro/bridge/server.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Server-side traits. use super::*; @@ -49,14 +39,14 @@ macro_rules! associated_item { macro_rules! declare_server_traits { ($($name:ident { - $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)*) $(-> $ret_ty:ty)*;)* - }),* $(,)*) => { + $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)?) $(-> $ret_ty:ty)?;)* + }),* $(,)?) => { pub trait Types { $(associated_item!(type $name);)* } $(pub trait $name: Types { - $(associated_item!(fn $method(&mut self, $($arg: $arg_ty),*) $(-> $ret_ty)*);)* + $(associated_item!(fn $method(&mut self, $($arg: $arg_ty),*) $(-> $ret_ty)?);)* })* pub trait Server: Types $(+ $name)* {} @@ -69,14 +59,14 @@ pub(super) struct MarkedTypes(S); macro_rules! define_mark_types_impls { ($($name:ident { - $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)*) $(-> $ret_ty:ty)*;)* - }),* $(,)*) => { + $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)?) $(-> $ret_ty:ty)?;)* + }),* $(,)?) => { impl Types for MarkedTypes { $(type $name = Marked;)* } $(impl $name for MarkedTypes { - $(fn $method(&mut self, $($arg: $arg_ty),*) $(-> $ret_ty)* { + $(fn $method(&mut self, $($arg: $arg_ty),*) $(-> $ret_ty)? { <_>::mark($name::$method(&mut self.0, $($arg.unmark()),*)) })* })* @@ -91,8 +81,8 @@ struct Dispatcher { macro_rules! define_dispatcher_impl { ($($name:ident { - $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)*) $(-> $ret_ty:ty)*;)* - }),* $(,)*) => { + $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)?) $(-> $ret_ty:ty)?;)* + }),* $(,)?) => { // FIXME(eddyb) `pub` only for `ExecutionStrategy` below. pub trait DispatcherTrait { // HACK(eddyb) these are here to allow `Self::$name` to work below. @@ -141,7 +131,7 @@ pub trait ExecutionStrategy { &self, dispatcher: &mut impl DispatcherTrait, input: Buffer, - run_client: extern "C" fn(Bridge, D) -> Buffer, + run_client: extern "C" fn(Bridge<'_>, D) -> Buffer, client_data: D, ) -> Buffer; } @@ -153,7 +143,7 @@ impl ExecutionStrategy for SameThread { &self, dispatcher: &mut impl DispatcherTrait, input: Buffer, - run_client: extern "C" fn(Bridge, D) -> Buffer, + run_client: extern "C" fn(Bridge<'_>, D) -> Buffer, client_data: D, ) -> Buffer { let mut dispatch = |b| dispatcher.dispatch(b); @@ -178,7 +168,7 @@ impl ExecutionStrategy for CrossThread1 { &self, dispatcher: &mut impl DispatcherTrait, input: Buffer, - run_client: extern "C" fn(Bridge, D) -> Buffer, + run_client: extern "C" fn(Bridge<'_>, D) -> Buffer, client_data: D, ) -> Buffer { use std::sync::mpsc::channel; @@ -216,7 +206,7 @@ impl ExecutionStrategy for CrossThread2 { &self, dispatcher: &mut impl DispatcherTrait, input: Buffer, - run_client: extern "C" fn(Bridge, D) -> Buffer, + run_client: extern "C" fn(Bridge<'_>, D) -> Buffer, client_data: D, ) -> Buffer { use std::sync::{Arc, Mutex}; @@ -283,7 +273,7 @@ fn run_server< handle_counters: &'static client::HandleCounters, server: S, input: I, - run_client: extern "C" fn(Bridge, D) -> Buffer, + run_client: extern "C" fn(Bridge<'_>, D) -> Buffer, client_data: D, ) -> Result { let mut dispatcher = Dispatcher { @@ -299,7 +289,7 @@ fn run_server< Result::decode(&mut &b[..], &mut dispatcher.handle_store) } -impl client::Client ::TokenStream> { +impl client::Client crate::TokenStream> { pub fn run( &self, strategy: &impl ExecutionStrategy, @@ -323,7 +313,7 @@ impl client::Client ::TokenStream> { } } -impl client::Client ::TokenStream> { +impl client::Client crate::TokenStream> { pub fn run( &self, strategy: &impl ExecutionStrategy, diff --git a/src/libproc_macro/diagnostic.rs b/src/libproc_macro/diagnostic.rs index 4234f0bcd21c1..65eebb5ec3737 100644 --- a/src/libproc_macro/diagnostic.rs +++ b/src/libproc_macro/diagnostic.rs @@ -1,14 +1,4 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use Span; +use crate::Span; /// An enum representing a diagnostic level. #[unstable(feature = "proc_macro_diagnostic", issue = "54140")] @@ -66,7 +56,7 @@ pub struct Diagnostic { macro_rules! diagnostic_child_methods { ($spanned:ident, $regular:ident, $level:expr) => ( - /// Add a new child diagnostic message to `self` with the level + /// Adds a new child diagnostic message to `self` with the level /// identified by this method's name with the given `spans` and /// `message`. #[unstable(feature = "proc_macro_diagnostic", issue = "54140")] @@ -77,7 +67,7 @@ macro_rules! diagnostic_child_methods { self } - /// Add a new child diagnostic message to `self` with the level + /// Adds a new child diagnostic message to `self` with the level /// identified by this method's name with the given `message`. #[unstable(feature = "proc_macro_diagnostic", issue = "54140")] pub fn $regular>(mut self, message: T) -> Diagnostic { @@ -90,7 +80,7 @@ macro_rules! diagnostic_child_methods { /// Iterator over the children diagnostics of a `Diagnostic`. #[derive(Debug, Clone)] #[unstable(feature = "proc_macro_diagnostic", issue = "54140")] -pub struct Children<'a>(::std::slice::Iter<'a, Diagnostic>); +pub struct Children<'a>(std::slice::Iter<'a, Diagnostic>); #[unstable(feature = "proc_macro_diagnostic", issue = "54140")] impl<'a> Iterator for Children<'a> { @@ -103,7 +93,7 @@ impl<'a> Iterator for Children<'a> { #[unstable(feature = "proc_macro_diagnostic", issue = "54140")] impl Diagnostic { - /// Create a new diagnostic with the given `level` and `message`. + /// Creates a new diagnostic with the given `level` and `message`. #[unstable(feature = "proc_macro_diagnostic", issue = "54140")] pub fn new>(level: Level, message: T) -> Diagnostic { Diagnostic { @@ -114,7 +104,7 @@ impl Diagnostic { } } - /// Create a new diagnostic with the given `level` and `message` pointing to + /// Creates a new diagnostic with the given `level` and `message` pointing to /// the given set of `spans`. #[unstable(feature = "proc_macro_diagnostic", issue = "54140")] pub fn spanned(spans: S, level: Level, message: T) -> Diagnostic @@ -171,22 +161,22 @@ impl Diagnostic { /// Returns an iterator over the children diagnostics of `self`. #[unstable(feature = "proc_macro_diagnostic", issue = "54140")] - pub fn children(&self) -> Children { + pub fn children(&self) -> Children<'_> { Children(self.children.iter()) } /// Emit the diagnostic. #[unstable(feature = "proc_macro_diagnostic", issue = "54140")] pub fn emit(self) { - fn to_internal(spans: Vec) -> ::bridge::client::MultiSpan { - let mut multi_span = ::bridge::client::MultiSpan::new(); + fn to_internal(spans: Vec) -> crate::bridge::client::MultiSpan { + let mut multi_span = crate::bridge::client::MultiSpan::new(); for span in spans { multi_span.push(span.0); } multi_span } - let mut diag = ::bridge::client::Diagnostic::new( + let mut diag = crate::bridge::client::Diagnostic::new( self.level, &self.message[..], to_internal(self.spans), diff --git a/src/libproc_macro/lib.rs b/src/libproc_macro/lib.rs index f2b85832dac31..1e0f1ed578aae 100644 --- a/src/libproc_macro/lib.rs +++ b/src/libproc_macro/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A support library for macro authors when defining new macros. //! //! This library, provided by the standard distribution, provides the types @@ -15,18 +5,20 @@ //! function-like macros `#[proc_macro]`, macro attributes `#[proc_macro_attribute]` and //! custom derive attributes`#[proc_macro_derive]`. //! -//! See [the book](../book/first-edition/procedural-macros.html) for more. +//! See [the book] for more. +//! +//! [the book]: ../book/ch19-06-macros.html#procedural-macros-for-generating-code-from-attributes #![stable(feature = "proc_macro_lib", since = "1.15.0")] #![deny(missing_docs)] -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/", +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/", html_playground_url = "https://play.rust-lang.org/", issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/", test(no_crate_inject, attr(deny(warnings))), test(attr(allow(dead_code, deprecated, unused_variables, unused_mut))))] +#![deny(rust_2018_idioms)] + #![feature(nll)] #![feature(staged_api)] #![feature(const_fn)] @@ -99,7 +91,7 @@ impl TokenStream { /// or characters not existing in the language. /// All tokens in the parsed stream get `Span::call_site()` spans. /// -/// NOTE: Some errors may cause panics instead of returning `LexError`. We reserve the right to +/// NOTE: some errors may cause panics instead of returning `LexError`. We reserve the right to /// change these errors into `LexError`s later. #[stable(feature = "proc_macro_lib", since = "1.15.0")] impl FromStr for TokenStream { @@ -124,7 +116,7 @@ impl ToString for TokenStream { /// with `Delimiter::None` delimiters and negative numeric literals. #[stable(feature = "proc_macro_lib", since = "1.15.0")] impl fmt::Display for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.to_string()) } } @@ -132,7 +124,7 @@ impl fmt::Display for TokenStream { /// Prints token in a form convenient for debugging. #[stable(feature = "proc_macro_lib", since = "1.15.0")] impl fmt::Debug for TokenStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("TokenStream ")?; f.debug_list().entries(self.clone()).finish() } @@ -168,9 +160,7 @@ impl iter::FromIterator for TokenStream { impl iter::FromIterator for TokenStream { fn from_iter>(streams: I) -> Self { let mut builder = bridge::client::TokenStreamBuilder::new(); - for stream in streams { - builder.push(stream.0); - } + streams.into_iter().for_each(|stream| builder.push(stream.0)); TokenStream(builder.build()) } } @@ -193,7 +183,7 @@ impl Extend for TokenStream { /// Public implementation details for the `TokenStream` type, such as iterators. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub mod token_stream { - use {bridge, Group, Ident, Literal, Punct, TokenTree, TokenStream}; + use crate::{bridge, Group, Ident, Literal, Punct, TokenTree, TokenStream}; /// An iterator over `TokenStream`'s `TokenTree`s. /// The iteration is "shallow", e.g., the iterator doesn't recurse into delimited groups, @@ -255,7 +245,7 @@ impl !Sync for Span {} macro_rules! diagnostic_method { ($name:ident, $level:expr) => ( - /// Create a new `Diagnostic` with the given `message` at the span + /// Creates a new `Diagnostic` with the given `message` at the span /// `self`. #[unstable(feature = "proc_macro_diagnostic", issue = "54140")] pub fn $name>(self, message: T) -> Diagnostic { @@ -301,19 +291,19 @@ impl Span { Span(self.0.source()) } - /// Get the starting line/column in the source file for this span. + /// Gets the starting line/column in the source file for this span. #[unstable(feature = "proc_macro_span", issue = "54725")] pub fn start(&self) -> LineColumn { self.0.start() } - /// Get the ending line/column in the source file for this span. + /// Gets the ending line/column in the source file for this span. #[unstable(feature = "proc_macro_span", issue = "54725")] pub fn end(&self) -> LineColumn { self.0.end() } - /// Create a new span encompassing `self` and `other`. + /// Creates a new span encompassing `self` and `other`. /// /// Returns `None` if `self` and `other` are from different files. #[unstable(feature = "proc_macro_span", issue = "54725")] @@ -341,6 +331,18 @@ impl Span { self.0 == other.0 } + /// Returns the source text behind a span. This preserves the original source + /// code, including spaces and comments. It only returns a result if the span + /// corresponds to real source code. + /// + /// Note: The observable result of a macro should only rely on the tokens and + /// not on this source text. The result of this function is a best effort to + /// be used for diagnostics only. + #[unstable(feature = "proc_macro_span", issue = "54725")] + pub fn source_text(&self) -> Option { + self.0.source_text() + } + diagnostic_method!(error, Level::Error); diagnostic_method!(warning, Level::Warning); diagnostic_method!(note, Level::Note); @@ -350,7 +352,7 @@ impl Span { /// Prints a span in a form convenient for debugging. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Debug for Span { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(f) } } @@ -379,7 +381,7 @@ impl !Sync for LineColumn {} pub struct SourceFile(bridge::client::SourceFile); impl SourceFile { - /// Get the path to this source file. + /// Gets the path to this source file. /// /// ### Note /// If the code span associated with this `SourceFile` was generated by an external macro, this @@ -408,7 +410,7 @@ impl SourceFile { #[unstable(feature = "proc_macro_span", issue = "54725")] impl fmt::Debug for SourceFile { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SourceFile") .field("path", &self.path()) .field("is_real", &self.is_real()) @@ -493,7 +495,7 @@ impl TokenTree { /// Prints token tree in a form convenient for debugging. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Debug for TokenTree { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // Each of these has the name in the struct type in the derived debug, // so don't bother with an extra layer of indirection match *self { @@ -552,7 +554,7 @@ impl ToString for TokenTree { /// with `Delimiter::None` delimiters and negative numeric literals. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Display for TokenTree { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.to_string()) } } @@ -677,14 +679,14 @@ impl ToString for Group { /// with `Delimiter::None` delimiters. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Display for Group { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.to_string()) } } #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Debug for Group { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Group") .field("delimiter", &self.delimiter()) .field("stream", &self.stream()) @@ -729,11 +731,6 @@ impl Punct { /// which can be further configured with the `set_span` method below. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn new(ch: char, spacing: Spacing) -> Punct { - const LEGAL_CHARS: &[char] = &['=', '<', '>', '!', '~', '+', '-', '*', '/', '%', '^', - '&', '|', '@', '.', ',', ';', ':', '#', '$', '?', '\'']; - if !LEGAL_CHARS.contains(&ch) { - panic!("unsupported character `{:?}`", ch) - } Punct(bridge::client::Punct::new(ch, spacing)) } @@ -778,14 +775,14 @@ impl ToString for Punct { /// back into the same character. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Display for Punct { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.to_string()) } } #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Debug for Punct { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Punct") .field("ch", &self.as_char()) .field("spacing", &self.spacing()) @@ -800,16 +797,6 @@ impl fmt::Debug for Punct { pub struct Ident(bridge::client::Ident); impl Ident { - fn is_valid(string: &str) -> bool { - let mut chars = string.chars(); - if let Some(start) = chars.next() { - (start == '_' || start.is_xid_start()) - && chars.all(|cont| cont == '_' || cont.is_xid_continue()) - } else { - false - } - } - /// Creates a new `Ident` with the given `string` as well as the specified /// `span`. /// The `string` argument must be a valid identifier permitted by the @@ -831,18 +818,12 @@ impl Ident { /// tokens, requires a `Span` to be specified at construction. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] pub fn new(string: &str, span: Span) -> Ident { - if !Ident::is_valid(string) { - panic!("`{:?}` is not a valid identifier", string) - } Ident(bridge::client::Ident::new(string, span.0, false)) } /// Same as `Ident::new`, but creates a raw identifier (`r#ident`). #[unstable(feature = "proc_macro_raw_ident", issue = "54723")] pub fn new_raw(string: &str, span: Span) -> Ident { - if !Ident::is_valid(string) { - panic!("`{:?}` is not a valid identifier", string) - } Ident(bridge::client::Ident::new(string, span.0, true)) } @@ -873,14 +854,14 @@ impl ToString for Ident { /// back into the same identifier. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Display for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.to_string()) } } #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Debug for Ident { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Ident") .field("ident", &self.to_string()) .field("span", &self.span()) @@ -1123,14 +1104,14 @@ impl ToString for Literal { /// back into the same literal (except for possible rounding for floating point literals). #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Display for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.to_string()) } } #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Debug for Literal { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // FIXME(eddyb) `Literal` should not expose internal `Debug` impls. self.0.fmt(f) } diff --git a/src/libproc_macro/quote.rs b/src/libproc_macro/quote.rs index 0f454a79055a0..e3d31b78f4a09 100644 --- a/src/libproc_macro/quote.rs +++ b/src/libproc_macro/quote.rs @@ -1,20 +1,10 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! # Quasiquoter //! This file contains the implementation internals of the quasiquoter provided by `quote!`. //! This quasiquoter uses macros 2.0 hygiene to reliably access //! items from `proc_macro`, to build a `proc_macro::TokenStream`. -use {Delimiter, Group, Ident, Literal, Punct, Spacing, Span, TokenStream, TokenTree}; +use crate::{Delimiter, Group, Ident, Literal, Punct, Spacing, Span, TokenStream, TokenTree}; macro_rules! quote_tt { (($($t:tt)*)) => { Group::new(Delimiter::Parenthesis, quote!($($t)*)) }; @@ -73,7 +63,7 @@ macro_rules! quote { #[unstable(feature = "proc_macro_quote", issue = "54722")] pub fn quote(stream: TokenStream) -> TokenStream { if stream.is_empty() { - return quote!(::TokenStream::new()); + return quote!(crate::TokenStream::new()); } let mut after_dollar = false; let tokens = stream @@ -83,7 +73,7 @@ pub fn quote(stream: TokenStream) -> TokenStream { after_dollar = false; match tree { TokenTree::Ident(_) => { - return Some(quote!(Into::<::TokenStream>::into( + return Some(quote!(Into::::into( Clone::clone(&(@ tree))),)); } TokenTree::Punct(ref tt) if tt.as_char() == '$' => {} @@ -96,33 +86,33 @@ pub fn quote(stream: TokenStream) -> TokenStream { } } - Some(quote!(::TokenStream::from((@ match tree { - TokenTree::Punct(tt) => quote!(::TokenTree::Punct(::Punct::new( + Some(quote!(crate::TokenStream::from((@ match tree { + TokenTree::Punct(tt) => quote!(crate::TokenTree::Punct(crate::Punct::new( (@ TokenTree::from(Literal::character(tt.as_char()))), (@ match tt.spacing() { - Spacing::Alone => quote!(::Spacing::Alone), - Spacing::Joint => quote!(::Spacing::Joint), + Spacing::Alone => quote!(crate::Spacing::Alone), + Spacing::Joint => quote!(crate::Spacing::Joint), }), ))), - TokenTree::Group(tt) => quote!(::TokenTree::Group(::Group::new( + TokenTree::Group(tt) => quote!(crate::TokenTree::Group(crate::Group::new( (@ match tt.delimiter() { - Delimiter::Parenthesis => quote!(::Delimiter::Parenthesis), - Delimiter::Brace => quote!(::Delimiter::Brace), - Delimiter::Bracket => quote!(::Delimiter::Bracket), - Delimiter::None => quote!(::Delimiter::None), + Delimiter::Parenthesis => quote!(crate::Delimiter::Parenthesis), + Delimiter::Brace => quote!(crate::Delimiter::Brace), + Delimiter::Bracket => quote!(crate::Delimiter::Bracket), + Delimiter::None => quote!(crate::Delimiter::None), }), (@ quote(tt.stream())), ))), - TokenTree::Ident(tt) => quote!(::TokenTree::Ident(::Ident::new( + TokenTree::Ident(tt) => quote!(crate::TokenTree::Ident(crate::Ident::new( (@ TokenTree::from(Literal::string(&tt.to_string()))), (@ quote_span(tt.span())), ))), - TokenTree::Literal(tt) => quote!(::TokenTree::Literal({ + TokenTree::Literal(tt) => quote!(crate::TokenTree::Literal({ let mut iter = (@ TokenTree::from(Literal::string(&tt.to_string()))) - .parse::<::TokenStream>() + .parse::() .unwrap() .into_iter(); - if let (Some(::TokenTree::Literal(mut lit)), None) = + if let (Some(crate::TokenTree::Literal(mut lit)), None) = (iter.next(), iter.next()) { lit.set_span((@ quote_span(tt.span()))); @@ -139,12 +129,12 @@ pub fn quote(stream: TokenStream) -> TokenStream { panic!("unexpected trailing `$` in `quote!`"); } - quote!([(@ tokens)].iter().cloned().collect::<::TokenStream>()) + quote!([(@ tokens)].iter().cloned().collect::()) } /// Quote a `Span` into a `TokenStream`. /// This is needed to implement a custom quoter. #[unstable(feature = "proc_macro_quote", issue = "54722")] pub fn quote_span(_: Span) -> TokenStream { - quote!(::Span::def_site()) + quote!(crate::Span::def_site()) } diff --git a/src/libprofiler_builtins/Cargo.toml b/src/libprofiler_builtins/Cargo.toml index 7c95cf0a0542a..0d36bd0b39d76 100644 --- a/src/libprofiler_builtins/Cargo.toml +++ b/src/libprofiler_builtins/Cargo.toml @@ -3,6 +3,7 @@ authors = ["The Rust Project Developers"] build = "build.rs" name = "profiler_builtins" version = "0.0.0" +edition = "2018" [lib] name = "profiler_builtins" diff --git a/src/libprofiler_builtins/build.rs b/src/libprofiler_builtins/build.rs index db72ce77e720b..ff52a03d9dd97 100644 --- a/src/libprofiler_builtins/build.rs +++ b/src/libprofiler_builtins/build.rs @@ -1,19 +1,7 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Compiles the profiler part of the `compiler-rt` library. //! //! See the build.rs for libcompiler_builtins crate for details. -extern crate cc; - use std::env; use std::path::Path; diff --git a/src/libprofiler_builtins/lib.rs b/src/libprofiler_builtins/lib.rs index 8678330a5a09e..2ce1a110b44c0 100644 --- a/src/libprofiler_builtins/lib.rs +++ b/src/libprofiler_builtins/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![no_std] #![feature(profiler_runtime)] #![profiler_runtime] @@ -17,3 +7,4 @@ #![allow(unused_features)] #![feature(nll)] #![feature(staged_api)] +#![deny(rust_2018_idioms)] diff --git a/src/librustc/Cargo.toml b/src/librustc/Cargo.toml index a572b6bf919e1..31e10c19c7a60 100644 --- a/src/librustc/Cargo.toml +++ b/src/librustc/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustc" version = "0.0.0" +edition = "2018" [lib] name = "rustc" @@ -15,22 +16,24 @@ fmt_macros = { path = "../libfmt_macros" } graphviz = { path = "../libgraphviz" } jobserver = "0.1" lazy_static = "1.0.0" -scoped-tls = { version = "0.1.1", features = ["nightly"] } +num_cpus = "1.0" +scoped-tls = "1.0" log = { version = "0.4", features = ["release_max_level_info", "std"] } -polonius-engine = "0.5.0" -rustc-rayon = "0.1.1" -rustc-rayon-core = "0.1.1" +polonius-engine = "0.6.2" +rustc-rayon = "0.1.2" +rustc-rayon-core = "0.1.2" rustc_apfloat = { path = "../librustc_apfloat" } rustc_target = { path = "../librustc_target" } +rustc_macros = { path = "../librustc_macros" } rustc_data_structures = { path = "../librustc_data_structures" } -rustc_errors = { path = "../librustc_errors" } +errors = { path = "../librustc_errors", package = "rustc_errors" } serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } backtrace = "0.3.3" -parking_lot = "0.6" +parking_lot = "0.7" byteorder = { version = "1.1", features = ["i128"]} -chalk-engine = { version = "0.8.0", default-features=false } +chalk-engine = { version = "0.9.0", default-features=false } rustc_fs_util = { path = "../librustc_fs_util" } smallvec = { version = "0.6.7", features = ["union", "may_dangle"] } diff --git a/src/librustc/arena.rs b/src/librustc/arena.rs new file mode 100644 index 0000000000000..e9751a23f1218 --- /dev/null +++ b/src/librustc/arena.rs @@ -0,0 +1,206 @@ +use arena::{TypedArena, DroplessArena}; +use std::mem; +use std::ptr; +use std::slice; +use std::cell::RefCell; +use std::marker::PhantomData; +use smallvec::SmallVec; + +#[macro_export] +macro_rules! arena_types { + ($macro:path, $args:tt, $tcx:lifetime) => ( + $macro!($args, [ + [] vtable_method: Option<( + rustc::hir::def_id::DefId, + rustc::ty::subst::SubstsRef<$tcx> + )>, + [few] mir_keys: rustc::util::nodemap::DefIdSet, + [decode] specialization_graph: rustc::traits::specialization_graph::Graph, + ], $tcx); + ) +} + +macro_rules! arena_for_type { + ([][$ty:ty]) => { + TypedArena<$ty> + }; + ([few $(, $attrs:ident)*][$ty:ty]) => { + PhantomData<$ty> + }; + ([$ignore:ident $(, $attrs:ident)*]$args:tt) => { + arena_for_type!([$($attrs),*]$args) + }; +} + +macro_rules! declare_arena { + ([], [$($a:tt $name:ident: $ty:ty,)*], $tcx:lifetime) => { + #[derive(Default)] + pub struct Arena<$tcx> { + dropless: DroplessArena, + drop: DropArena, + $($name: arena_for_type!($a[$ty]),)* + } + } +} + +macro_rules! which_arena_for_type { + ([][$arena:expr]) => { + Some($arena) + }; + ([few$(, $attrs:ident)*][$arena:expr]) => { + None + }; + ([$ignore:ident$(, $attrs:ident)*]$args:tt) => { + which_arena_for_type!([$($attrs),*]$args) + }; +} + +macro_rules! impl_arena_allocatable { + ([], [$($a:tt $name:ident: $ty:ty,)*], $tcx:lifetime) => { + $( + impl ArenaAllocatable for $ty {} + unsafe impl<$tcx> ArenaField<$tcx> for $ty { + #[inline] + fn arena<'a>(_arena: &'a Arena<$tcx>) -> Option<&'a TypedArena> { + which_arena_for_type!($a[&_arena.$name]) + } + } + )* + } +} + +arena_types!(declare_arena, [], 'tcx); + +arena_types!(impl_arena_allocatable, [], 'tcx); + +pub trait ArenaAllocatable {} + +impl ArenaAllocatable for T {} + +pub unsafe trait ArenaField<'tcx>: Sized { + /// Returns a specific arena to allocate from. + /// If None is returned, the DropArena will be used. + fn arena<'a>(arena: &'a Arena<'tcx>) -> Option<&'a TypedArena>; +} + +unsafe impl<'tcx, T> ArenaField<'tcx> for T { + #[inline] + default fn arena<'a>(_: &'a Arena<'tcx>) -> Option<&'a TypedArena> { + panic!() + } +} + +impl<'tcx> Arena<'tcx> { + #[inline] + pub fn alloc(&self, value: T) -> &mut T { + if !mem::needs_drop::() { + return self.dropless.alloc(value); + } + match >::arena(self) { + Some(arena) => arena.alloc(value), + None => unsafe { self.drop.alloc(value) }, + } + } + + pub fn alloc_from_iter< + T: ArenaAllocatable, + I: IntoIterator + >( + &'a self, + iter: I + ) -> &'a mut [T] { + if !mem::needs_drop::() { + return self.dropless.alloc_from_iter(iter); + } + match >::arena(self) { + Some(arena) => arena.alloc_from_iter(iter), + None => unsafe { self.drop.alloc_from_iter(iter) }, + } + } +} + +/// Calls the destructor for an object when dropped. +struct DropType { + drop_fn: unsafe fn(*mut u8), + obj: *mut u8, +} + +unsafe fn drop_for_type(to_drop: *mut u8) { + std::ptr::drop_in_place(to_drop as *mut T) +} + +impl Drop for DropType { + fn drop(&mut self) { + unsafe { + (self.drop_fn)(self.obj) + } + } +} + +/// An arena which can be used to allocate any type. +/// Allocating in this arena is unsafe since the type system +/// doesn't know which types it contains. In order to +/// allocate safely, you must store a PhantomData +/// alongside this arena for each type T you allocate. +#[derive(Default)] +struct DropArena { + /// A list of destructors to run when the arena drops. + /// Ordered so `destructors` gets dropped before the arena + /// since its destructor can reference memory in the arena. + destructors: RefCell>, + arena: DroplessArena, +} + +impl DropArena { + #[inline] + unsafe fn alloc(&self, object: T) -> &mut T { + let mem = self.arena.alloc_raw( + mem::size_of::(), + mem::align_of::() + ) as *mut _ as *mut T; + // Write into uninitialized memory. + ptr::write(mem, object); + let result = &mut *mem; + // Record the destructor after doing the allocation as that may panic + // and would cause `object`'s destuctor to run twice if it was recorded before + self.destructors.borrow_mut().push(DropType { + drop_fn: drop_for_type::, + obj: result as *mut T as *mut u8, + }); + result + } + + #[inline] + unsafe fn alloc_from_iter>(&self, iter: I) -> &mut [T] { + let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect(); + if vec.is_empty() { + return &mut []; + } + let len = vec.len(); + + let start_ptr = self.arena.alloc_raw( + len.checked_mul(mem::size_of::()).unwrap(), + mem::align_of::() + ) as *mut _ as *mut T; + + let mut destructors = self.destructors.borrow_mut(); + // Reserve space for the destructors so we can't panic while adding them + destructors.reserve(len); + + // Move the content to the arena by copying it and then forgetting + // the content of the SmallVec + vec.as_ptr().copy_to_nonoverlapping(start_ptr, len); + mem::forget(vec.drain()); + + // Record the destructors after doing the allocation as that may panic + // and would cause `object`'s destuctor to run twice if it was recorded before + for i in 0..len { + destructors.push(DropType { + drop_fn: drop_for_type::, + obj: start_ptr.offset(i as isize) as *mut u8, + }); + } + + slice::from_raw_parts_mut(start_ptr, len) + } +} diff --git a/src/librustc/benches/dispatch.rs b/src/librustc/benches/dispatch.rs index 63e74778fb92a..e3b36be5696b3 100644 --- a/src/librustc/benches/dispatch.rs +++ b/src/librustc/benches/dispatch.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use test::Bencher; // Static/dynamic method dispatch diff --git a/src/librustc/benches/lib.rs b/src/librustc/benches/lib.rs index 5496df1342ff4..0f81586d3bdd9 100644 --- a/src/librustc/benches/lib.rs +++ b/src/librustc/benches/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![feature(test)] extern crate test; diff --git a/src/librustc/benches/pattern.rs b/src/librustc/benches/pattern.rs index 638b1ce3f7530..fd8cc5b83fd5a 100644 --- a/src/librustc/benches/pattern.rs +++ b/src/librustc/benches/pattern.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use test::Bencher; // Overhead of various match forms diff --git a/src/librustc/build.rs b/src/librustc/build.rs index bde503d86de73..af7723aea34e4 100644 --- a/src/librustc/build.rs +++ b/src/librustc/build.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::env; fn main() { diff --git a/src/librustc/cfg/construct.rs b/src/librustc/cfg/construct.rs index c5d6ce24c5df4..e96709f6d14e5 100644 --- a/src/librustc/cfg/construct.rs +++ b/src/librustc/cfg/construct.rs @@ -1,21 +1,11 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use cfg::*; -use middle::region; +use crate::cfg::*; +use crate::middle::region; use rustc_data_structures::graph::implementation as graph; use syntax::ptr::P; -use ty::{self, TyCtxt}; +use crate::ty::{self, TyCtxt}; -use hir::{self, PatKind}; -use hir::def_id::DefId; +use crate::hir::{self, PatKind}; +use crate::hir::def_id::DefId; struct CFGBuilder<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -109,30 +99,20 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { } fn stmt(&mut self, stmt: &hir::Stmt, pred: CFGIndex) -> CFGIndex { - let hir_id = self.tcx.hir().node_to_hir_id(stmt.node.id()); - match stmt.node { - hir::StmtKind::Decl(ref decl, _) => { - let exit = self.decl(&decl, pred); - self.add_ast_node(hir_id.local_id, &[exit]) - } - - hir::StmtKind::Expr(ref expr, _) | - hir::StmtKind::Semi(ref expr, _) => { - let exit = self.expr(&expr, pred); - self.add_ast_node(hir_id.local_id, &[exit]) - } - } - } - - fn decl(&mut self, decl: &hir::Decl, pred: CFGIndex) -> CFGIndex { - match decl.node { - hir::DeclKind::Local(ref local) => { + let exit = match stmt.node { + hir::StmtKind::Local(ref local) => { let init_exit = self.opt_expr(&local.init, pred); self.pat(&local.pat, init_exit) } - - hir::DeclKind::Item(_) => pred, - } + hir::StmtKind::Item(_) => { + pred + } + hir::StmtKind::Expr(ref expr) | + hir::StmtKind::Semi(ref expr) => { + self.expr(&expr, pred) + } + }; + self.add_ast_node(stmt.hir_id.local_id, &[exit]) } fn pat(&mut self, pat: &hir::Pat, pred: CFGIndex) -> CFGIndex { @@ -170,9 +150,11 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { } } - fn pats_all<'b, I: Iterator>>(&mut self, - pats: I, - pred: CFGIndex) -> CFGIndex { + fn pats_all<'b, I: Iterator>>( + &mut self, + pats: I, + pred: CFGIndex + ) -> CFGIndex { //! Handles case where all of the patterns must match. pats.fold(pred, |pred, pat| self.pat(&pat, pred)) } @@ -402,7 +384,8 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { hir::ExprKind::Closure(..) | hir::ExprKind::Lit(..) | - hir::ExprKind::Path(_) => { + hir::ExprKind::Path(_) | + hir::ExprKind::Err => { self.straightline(expr, pred, None::.iter()) } } @@ -415,8 +398,8 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { args: I) -> CFGIndex { let func_or_rcvr_exit = self.expr(func_or_rcvr, pred); let ret = self.straightline(call_expr, func_or_rcvr_exit, args); - // FIXME(canndrew): This is_never should probably be an is_uninhabited. - if self.tables.expr_ty(call_expr).is_never() { + let m = self.tcx.hir().get_module_parent_by_hir_id(call_expr.hir_id); + if self.tcx.is_ty_uninhabited_from(m, self.tables.expr_ty(call_expr)) { self.add_unreachable_node() } else { ret @@ -588,9 +571,9 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { match destination.target_id { Ok(loop_id) => { for b in &self.breakable_block_scopes { - if b.block_expr_id == self.tcx.hir().node_to_hir_id(loop_id).local_id { + if b.block_expr_id == loop_id.local_id { let scope = region::Scope { - id: self.tcx.hir().node_to_hir_id(loop_id).local_id, + id: loop_id.local_id, data: region::ScopeData::Node }; return (scope, match scope_cf_kind { @@ -600,9 +583,9 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { } } for l in &self.loop_scopes { - if l.loop_id == self.tcx.hir().node_to_hir_id(loop_id).local_id { + if l.loop_id == loop_id.local_id { let scope = region::Scope { - id: self.tcx.hir().node_to_hir_id(loop_id).local_id, + id: loop_id.local_id, data: region::ScopeData::Node }; return (scope, match scope_cf_kind { diff --git a/src/librustc/cfg/graphviz.rs b/src/librustc/cfg/graphviz.rs index 39810691df855..969c38bd66329 100644 --- a/src/librustc/cfg/graphviz.rs +++ b/src/librustc/cfg/graphviz.rs @@ -1,22 +1,12 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// This module provides linkage between rustc::middle::graph and /// libgraphviz traits. // For clarity, rename the graphviz crate locally to dot. use graphviz as dot; -use cfg; -use hir; -use ty::TyCtxt; +use crate::cfg; +use crate::hir; +use crate::ty::TyCtxt; pub type Node<'a> = (cfg::CFGIndex, &'a cfg::CFGNode); pub type Edge<'a> = &'a cfg::CFGEdge; diff --git a/src/librustc/cfg/mod.rs b/src/librustc/cfg/mod.rs index cf9c24cc58a62..345dff88b5f0b 100644 --- a/src/librustc/cfg/mod.rs +++ b/src/librustc/cfg/mod.rs @@ -1,20 +1,10 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Module that constructs a control-flow graph representing an item. //! Uses `Graph` as the underlying representation. use rustc_data_structures::graph::implementation as graph; -use ty::TyCtxt; -use hir; -use hir::def_id::DefId; +use crate::ty::TyCtxt; +use crate::hir; +use crate::hir::def_id::DefId; mod construct; pub mod graphviz; diff --git a/src/librustc/dep_graph/cgu_reuse_tracker.rs b/src/librustc/dep_graph/cgu_reuse_tracker.rs index 0392d32989697..13f6f95332973 100644 --- a/src/librustc/dep_graph/cgu_reuse_tracker.rs +++ b/src/librustc/dep_graph/cgu_reuse_tracker.rs @@ -1,18 +1,8 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Some facilities for tracking how codegen-units are reused during incremental //! compilation. This is used for incremental compilation tests and debug //! output. -use session::Session; +use crate::session::Session; use rustc_data_structures::fx::FxHashMap; use std::sync::{Arc, Mutex}; use syntax_pos::Span; diff --git a/src/librustc/dep_graph/debug.rs b/src/librustc/dep_graph/debug.rs index f0e43e78a50af..f18ee3dced72d 100644 --- a/src/librustc/dep_graph/debug.rs +++ b/src/librustc/dep_graph/debug.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Code for debugging the dep-graph. use super::dep_node::DepNode; @@ -32,7 +22,7 @@ impl DepNodeFilter { } } - /// True if all nodes always pass the filter. + /// Returns `true` if all nodes always pass the filter. pub fn accepts_all(&self) -> bool { self.text.is_empty() } diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs index f0c6196412adb..e5bf9a27ab050 100644 --- a/src/librustc/dep_graph/dep_node.rs +++ b/src/librustc/dep_graph/dep_node.rs @@ -1,14 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - //! This module defines the `DepNode` type which the compiler uses to represent //! nodes in the dependency graph. A `DepNode` consists of a `DepKind` (which //! specifies the kind of thing it represents, like a piece of HIR, MIR, etc) @@ -60,25 +49,24 @@ //! user of the `DepNode` API of having to know how to compute the expected //! fingerprint for a given set of node parameters. -use mir::interpret::GlobalId; -use hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX}; -use hir::map::DefPathHash; -use hir::HirId; +use crate::mir::interpret::GlobalId; +use crate::hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX}; +use crate::hir::map::DefPathHash; +use crate::hir::HirId; -use ich::{Fingerprint, StableHashingContext}; +use crate::ich::{Fingerprint, StableHashingContext}; use rustc_data_structures::stable_hasher::{StableHasher, HashStable}; use std::fmt; use std::hash::Hash; use syntax_pos::symbol::InternedString; -use traits; -use traits::query::{ +use crate::traits; +use crate::traits::query::{ CanonicalProjectionGoal, CanonicalTyGoal, CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpSubtypeGoal, CanonicalPredicateGoal, CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpNormalizeGoal, }; -use ty::{TyCtxt, FnSig, Instance, InstanceDef, - ParamEnv, ParamEnvAnd, Predicate, PolyFnSig, PolyTraitRef, Ty}; -use ty::subst::Substs; +use crate::ty::{self, TyCtxt, ParamEnvAnd, Ty}; +use crate::ty::subst::SubstsRef; // erase!() just makes tokens go away. It's used to specify which macro argument // is repeated (i.e., which sub-expression of the macro we are in) but don't need @@ -96,11 +84,6 @@ macro_rules! is_anon_attr { ($attr:ident) => (false); } -macro_rules! is_input_attr { - (input) => (true); - ($attr:ident) => (false); -} - macro_rules! is_eval_always_attr { (eval_always) => (true); ($attr:ident) => (false); @@ -110,10 +93,6 @@ macro_rules! contains_anon_attr { ($($attr:ident),*) => ({$(is_anon_attr!($attr) | )* false}); } -macro_rules! contains_input_attr { - ($($attr:ident),*) => ({$(is_input_attr!($attr) | )* false}); -} - macro_rules! contains_eval_always_attr { ($($attr:ident),*) => ({$(is_eval_always_attr!($attr) | )* false}); } @@ -122,7 +101,7 @@ macro_rules! define_dep_nodes { (<$tcx:tt> $( [$($attr:ident),* ] - $variant:ident $(( $tuple_arg_ty:ty $(,)* ))* + $variant:ident $(( $tuple_arg_ty:ty $(,)? ))* $({ $($struct_arg_name:ident : $struct_arg_ty:ty),* })* ,)* ) => ( @@ -162,7 +141,9 @@ macro_rules! define_dep_nodes { } } - #[inline] + // FIXME: Make `is_anon`, `is_eval_always` and `has_params` properties + // of queries + #[inline(always)] pub fn is_anon(&self) -> bool { match *self { $( @@ -171,16 +152,7 @@ macro_rules! define_dep_nodes { } } - #[inline] - pub fn is_input(&self) -> bool { - match *self { - $( - DepKind :: $variant => { contains_input_attr!($($attr),*) } - )* - } - } - - #[inline] + #[inline(always)] pub fn is_eval_always(&self) -> bool { match *self { $( @@ -190,7 +162,7 @@ macro_rules! define_dep_nodes { } #[allow(unreachable_code)] - #[inline] + #[inline(always)] pub fn has_params(&self) -> bool { match *self { $( @@ -230,6 +202,7 @@ macro_rules! define_dep_nodes { impl DepNode { #[allow(unreachable_code, non_snake_case)] + #[inline(always)] pub fn new<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, dep: DepConstructor<'gcx>) -> DepNode @@ -299,30 +272,30 @@ macro_rules! define_dep_nodes { /// Construct a DepNode from the given DepKind and DefPathHash. This /// method will assert that the given DepKind actually requires a /// single DefId/DefPathHash parameter. - #[inline] + #[inline(always)] pub fn from_def_path_hash(kind: DepKind, def_path_hash: DefPathHash) -> DepNode { - assert!(kind.can_reconstruct_query_key() && kind.has_params()); + debug_assert!(kind.can_reconstruct_query_key() && kind.has_params()); DepNode { kind, hash: def_path_hash.0, } } - /// Create a new, parameterless DepNode. This method will assert + /// Creates a new, parameterless DepNode. This method will assert /// that the DepNode corresponding to the given DepKind actually /// does not require any parameters. - #[inline] + #[inline(always)] pub fn new_no_params(kind: DepKind) -> DepNode { - assert!(!kind.has_params()); + debug_assert!(!kind.has_params()); DepNode { kind, hash: Fingerprint::ZERO, } } - /// Extract the DefId corresponding to this DepNode. This will work + /// Extracts the DefId corresponding to this DepNode. This will work /// if two conditions are met: /// /// 1. The Fingerprint of the DepNode actually is a DefPathHash, and @@ -397,7 +370,7 @@ impl fmt::Debug for DepNode { write!(f, "(")?; - ::ty::tls::with_opt(|opt_tcx| { + crate::ty::tls::with_opt(|opt_tcx| { if let Some(tcx) = opt_tcx { if let Some(def_id) = self.extract_def_id(tcx) { write!(f, "{}", tcx.def_path_debug_str(def_id))?; @@ -418,31 +391,20 @@ impl fmt::Debug for DepNode { impl DefPathHash { - #[inline] + #[inline(always)] pub fn to_dep_node(self, kind: DepKind) -> DepNode { DepNode::from_def_path_hash(kind, self) } } impl DefId { - #[inline] + #[inline(always)] pub fn to_dep_node(self, tcx: TyCtxt<'_, '_, '_>, kind: DepKind) -> DepNode { DepNode::from_def_path_hash(kind, tcx.def_path_hash(self)) } } -impl DepKind { - #[inline] - pub fn fingerprint_needed_for_crate_hash(self) -> bool { - match self { - DepKind::HirBody | - DepKind::Krate => true, - _ => false, - } - } -} - -define_dep_nodes!( <'tcx> +rustc_dep_node_append!([define_dep_nodes!][ <'tcx> // We use this for most things when incr. comp. is turned off. [] Null, @@ -457,230 +419,48 @@ define_dep_nodes!( <'tcx> // suitable wrapper, you can use `tcx.dep_graph.ignore()` to gain // access to the krate, but you must remember to add suitable // edges yourself for the individual items that you read. - [input] Krate, + [eval_always] Krate, // Represents the body of a function or method. The def-id is that of the // function/method. - [input] HirBody(DefId), + [eval_always] HirBody(DefId), // Represents the HIR node with the given node-id - [input] Hir(DefId), + [eval_always] Hir(DefId), // Represents metadata from an extern crate. - [input] CrateMetadata(CrateNum), - - // Represents different phases in the compiler. - [] RegionScopeTree(DefId), - [eval_always] Coherence, - [eval_always] CoherenceInherentImplOverlapCheck, - [] CoherenceCheckTrait(DefId), - [eval_always] PrivacyAccessLevels(CrateNum), - - // Represents the MIR for a fn; also used as the task node for - // things read/modify that MIR. - [] MirConstQualif(DefId), - [] MirBuilt(DefId), - [] MirConst(DefId), - [] MirValidated(DefId), - [] MirOptimized(DefId), - [] MirShim { instance_def: InstanceDef<'tcx> }, - - [] BorrowCheckKrate, - [] BorrowCheck(DefId), - [] MirBorrowCheck(DefId), - [] UnsafetyCheckResult(DefId), - [] UnsafeDeriveOnReprPacked(DefId), - - [] Reachability, - [] MirKeys, - [eval_always] CrateVariances, - - // Nodes representing bits of computed IR in the tcx. Each shared - // table in the tcx (or elsewhere) maps to one of these - // nodes. - [] AssociatedItems(DefId), - [] TypeOfItem(DefId), - [] GenericsOfItem(DefId), - [] PredicatesOfItem(DefId), - [] ExplicitPredicatesOfItem(DefId), - [] PredicatesDefinedOnItem(DefId), - [] InferredOutlivesOf(DefId), - [] InferredOutlivesCrate(CrateNum), - [] SuperPredicatesOfItem(DefId), - [] TraitDefOfItem(DefId), - [] AdtDefOfItem(DefId), - [] ImplTraitRef(DefId), - [] ImplPolarity(DefId), - [] FnSignature(DefId), - [] CoerceUnsizedInfo(DefId), - - [] ItemVarianceConstraints(DefId), - [] ItemVariances(DefId), - [] IsConstFn(DefId), - [] IsPromotableConstFn(DefId), - [] IsForeignItem(DefId), - [] TypeParamPredicates { item_id: DefId, param_id: DefId }, - [] SizedConstraint(DefId), - [] DtorckConstraint(DefId), - [] AdtDestructor(DefId), - [] AssociatedItemDefIds(DefId), - [eval_always] InherentImpls(DefId), - [] TypeckBodiesKrate, - [] TypeckTables(DefId), - [] UsedTraitImports(DefId), - [] HasTypeckTables(DefId), - [] ConstEval { param_env: ParamEnvAnd<'tcx, GlobalId<'tcx>> }, - [] ConstEvalRaw { param_env: ParamEnvAnd<'tcx, GlobalId<'tcx>> }, - [] CheckMatch(DefId), - [] SymbolName(DefId), - [] InstanceSymbolName { instance: Instance<'tcx> }, - [] SpecializationGraph(DefId), - [] ObjectSafety(DefId), - [] FulfillObligation { param_env: ParamEnv<'tcx>, trait_ref: PolyTraitRef<'tcx> }, - [] VtableMethods { trait_ref: PolyTraitRef<'tcx> }, - - [] IsCopy { param_env: ParamEnvAnd<'tcx, Ty<'tcx>> }, - [] IsSized { param_env: ParamEnvAnd<'tcx, Ty<'tcx>> }, - [] IsFreeze { param_env: ParamEnvAnd<'tcx, Ty<'tcx>> }, - [] NeedsDrop { param_env: ParamEnvAnd<'tcx, Ty<'tcx>> }, - [] Layout { param_env: ParamEnvAnd<'tcx, Ty<'tcx>> }, - - // The set of impls for a given trait. - [] TraitImpls(DefId), - - [input] AllLocalTraitImpls, + [eval_always] CrateMetadata(CrateNum), + + [eval_always] AllLocalTraitImpls, [anon] TraitSelect, - [] ParamEnv(DefId), - [] Environment(DefId), - [] DescribeDef(DefId), - - // FIXME(mw): DefSpans are not really inputs since they are derived from - // HIR. But at the moment HIR hashing still contains some hacks that allow - // to make type debuginfo to be source location independent. Declaring - // DefSpan an input makes sure that changes to these are always detected - // regardless of HIR hashing. - [input] DefSpan(DefId), - [] LookupStability(DefId), - [] LookupDeprecationEntry(DefId), - [] ConstIsRvaluePromotableToStatic(DefId), - [] RvaluePromotableMap(DefId), - [] ImplParent(DefId), - [] TraitOfItem(DefId), - [] IsReachableNonGeneric(DefId), - [] IsUnreachableLocalDefinition(DefId), - [] IsMirAvailable(DefId), - [] ItemAttrs(DefId), - [] CodegenFnAttrs(DefId), - [] FnArgNames(DefId), - [] RenderedConst(DefId), - [] DylibDepFormats(CrateNum), - [] IsPanicRuntime(CrateNum), - [] IsCompilerBuiltins(CrateNum), - [] HasGlobalAllocator(CrateNum), - [] HasPanicHandler(CrateNum), - [input] ExternCrate(DefId), - [eval_always] LintLevels, - [] Specializes { impl1: DefId, impl2: DefId }, - [input] InScopeTraits(DefIndex), - [input] ModuleExports(DefId), - [] IsSanitizerRuntime(CrateNum), - [] IsProfilerRuntime(CrateNum), - [] GetPanicStrategy(CrateNum), - [] IsNoBuiltins(CrateNum), - [] ImplDefaultness(DefId), - [] CheckItemWellFormed(DefId), - [] CheckTraitItemWellFormed(DefId), - [] CheckImplItemWellFormed(DefId), - [] ReachableNonGenerics(CrateNum), - [] NativeLibraries(CrateNum), - [] PluginRegistrarFn(CrateNum), - [] ProcMacroDeclsStatic(CrateNum), - [input] CrateDisambiguator(CrateNum), - [input] CrateHash(CrateNum), - [input] OriginalCrateName(CrateNum), - [input] ExtraFileName(CrateNum), - - [] ImplementationsOfTrait { krate: CrateNum, trait_id: DefId }, - [] AllTraitImplementations(CrateNum), - - [] DllimportForeignItems(CrateNum), - [] IsDllimportForeignItem(DefId), - [] IsStaticallyIncludedForeignItem(DefId), - [] NativeLibraryKind(DefId), - [input] LinkArgs, - - [] ResolveLifetimes(CrateNum), - [] NamedRegion(DefIndex), - [] IsLateBound(DefIndex), - [] ObjectLifetimeDefaults(DefIndex), - - [] Visibility(DefId), - [input] DepKind(CrateNum), - [input] CrateName(CrateNum), - [] ItemChildren(DefId), - [] ExternModStmtCnum(DefId), - [eval_always] GetLibFeatures, - [] DefinedLibFeatures(CrateNum), - [eval_always] GetLangItems, - [] DefinedLangItems(CrateNum), - [] MissingLangItems(CrateNum), - [] VisibleParentMap, - [input] MissingExternCrateItem(CrateNum), - [input] UsedCrateSource(CrateNum), - [input] PostorderCnums, - - // These queries are not expected to have inputs -- as a result, they - // are not good candidates for "replay" because they are essentially - // pure functions of their input (and hence the expectation is that - // no caller would be green **apart** from just these - // queries). Making them anonymous avoids hashing the result, which - // may save a bit of time. - [anon] EraseRegionsTy { ty: Ty<'tcx> }, - - [input] Freevars(DefId), - [input] MaybeUnusedTraitImport(DefId), - [input] MaybeUnusedExternCrates, - [eval_always] StabilityIndex, - [eval_always] AllTraits, - [input] AllCrateNums, - [] ExportedSymbols(CrateNum), - [eval_always] CollectAndPartitionMonoItems, - [] IsCodegenedItem(DefId), - [] CodegenUnit(InternedString), [] CompileCodegenUnit(InternedString), - [input] OutputFilenames, - [] NormalizeProjectionTy(CanonicalProjectionGoal<'tcx>), - [] NormalizeTyAfterErasingRegions(ParamEnvAnd<'tcx, Ty<'tcx>>), - [] ImpliedOutlivesBounds(CanonicalTyGoal<'tcx>), - [] DropckOutlives(CanonicalTyGoal<'tcx>), - [] EvaluateObligation(CanonicalPredicateGoal<'tcx>), - [] TypeOpAscribeUserType(CanonicalTypeOpAscribeUserTypeGoal<'tcx>), - [] TypeOpEq(CanonicalTypeOpEqGoal<'tcx>), - [] TypeOpSubtype(CanonicalTypeOpSubtypeGoal<'tcx>), - [] TypeOpProvePredicate(CanonicalTypeOpProvePredicateGoal<'tcx>), - [] TypeOpNormalizeTy(CanonicalTypeOpNormalizeGoal<'tcx, Ty<'tcx>>), - [] TypeOpNormalizePredicate(CanonicalTypeOpNormalizeGoal<'tcx, Predicate<'tcx>>), - [] TypeOpNormalizePolyFnSig(CanonicalTypeOpNormalizeGoal<'tcx, PolyFnSig<'tcx>>), - [] TypeOpNormalizeFnSig(CanonicalTypeOpNormalizeGoal<'tcx, FnSig<'tcx>>), - - [] SubstituteNormalizeAndTestPredicates { key: (DefId, &'tcx Substs<'tcx>) }, - - [input] TargetFeaturesWhitelist, - - [] InstanceDefSizeEstimate { instance_def: InstanceDef<'tcx> }, - - [input] Features, - - [] ProgramClausesFor(DefId), - [] ProgramClausesForEnv(traits::Environment<'tcx>), - [] WasmImportModuleMap(CrateNum), - [] ForeignModules(CrateNum), - - [] UpstreamMonomorphizations(CrateNum), - [] UpstreamMonomorphizationsFor(DefId), -); + + [eval_always] Analysis(CrateNum), +]); + +pub trait RecoverKey<'tcx>: Sized { + fn recover(tcx: TyCtxt<'_, 'tcx, 'tcx>, dep_node: &DepNode) -> Option; +} + +impl RecoverKey<'tcx> for CrateNum { + fn recover(tcx: TyCtxt<'_, 'tcx, 'tcx>, dep_node: &DepNode) -> Option { + dep_node.extract_def_id(tcx).map(|id| id.krate) + } +} + +impl RecoverKey<'tcx> for DefId { + fn recover(tcx: TyCtxt<'_, 'tcx, 'tcx>, dep_node: &DepNode) -> Option { + dep_node.extract_def_id(tcx) + } +} + +impl RecoverKey<'tcx> for DefIndex { + fn recover(tcx: TyCtxt<'_, 'tcx, 'tcx>, dep_node: &DepNode) -> Option { + dep_node.extract_def_id(tcx).map(|id| id.index) + } +} trait DepNodeParams<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> : fmt::Debug { const CAN_RECONSTRUCT_QUERY_KEY: bool; @@ -725,7 +505,7 @@ impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for DefId { } fn to_debug_str(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> String { - tcx.item_path_str(*self) + tcx.def_path_str(*self) } } @@ -737,7 +517,7 @@ impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for DefIndex { } fn to_debug_str(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> String { - tcx.item_path_str(DefId::local(*self)) + tcx.def_path_str(DefId::local(*self)) } } @@ -801,7 +581,7 @@ impl<'a, 'gcx: 'tcx + 'a, 'tcx: 'a> DepNodeParams<'a, 'gcx, 'tcx> for HirId { } /// A "work product" corresponds to a `.o` (or other) file that we -/// save in between runs. These ids do not have a DefId but rather +/// save in between runs. These IDs do not have a `DefId` but rather /// some independent path or string that persists between runs without /// the need to be mapped or unmapped. (This ensures we can serialize /// them even in the absence of a tcx.) @@ -828,6 +608,6 @@ impl WorkProductId { } } -impl_stable_hash_for!(struct ::dep_graph::WorkProductId { +impl_stable_hash_for!(struct crate::dep_graph::WorkProductId { hash }); diff --git a/src/librustc/dep_graph/dep_tracking_map.rs b/src/librustc/dep_graph/dep_tracking_map.rs index da7dabf6e1839..94b832bea628e 100644 --- a/src/librustc/dep_graph/dep_tracking_map.rs +++ b/src/librustc/dep_graph/dep_tracking_map.rs @@ -1,18 +1,8 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc_data_structures::fx::FxHashMap; use std::cell::RefCell; use std::hash::Hash; use std::marker::PhantomData; -use util::common::MemoizationMap; +use crate::util::common::MemoizationMap; use super::{DepKind, DepNodeIndex, DepGraph}; @@ -53,7 +43,7 @@ impl MemoizationMap for RefCell> { /// /// Here, `[op]` represents whatever nodes `op` reads in the /// course of execution; `Map(key)` represents the node for this - /// map; and `CurrentTask` represents the current task when + /// map, and `CurrentTask` represents the current task when /// `memoize` is invoked. /// /// **Important:** when `op` is invoked, the current task will be diff --git a/src/librustc/dep_graph/graph.rs b/src/librustc/dep_graph/graph.rs index 4c94c993ab405..db81a9d826f8c 100644 --- a/src/librustc/dep_graph/graph.rs +++ b/src/librustc/dep_graph/graph.rs @@ -1,25 +1,17 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use errors::DiagnosticBuilder; +use errors::{Diagnostic, DiagnosticBuilder}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use smallvec::SmallVec; -use rustc_data_structures::sync::{Lrc, Lock}; +use rustc_data_structures::sync::{Lrc, Lock, AtomicU32, Ordering}; use std::env; use std::hash::Hash; -use ty::{self, TyCtxt}; -use util::common::{ProfileQueriesMsg, profq_msg}; +use std::collections::hash_map::Entry; +use crate::ty::{self, TyCtxt}; +use crate::util::common::{ProfileQueriesMsg, profq_msg}; +use parking_lot::{Mutex, Condvar}; -use ich::{StableHashingContext, StableHashingContextProvider, Fingerprint}; +use crate::ich::{StableHashingContext, StableHashingContextProvider, Fingerprint}; use super::debug::EdgeFilter; use super::dep_node::{DepNode, DepKind, WorkProductId}; @@ -31,12 +23,6 @@ use super::prev::PreviousDepGraph; #[derive(Clone)] pub struct DepGraph { data: Option>, - - // A vector mapping depnodes from the current graph to their associated - // result value fingerprints. Do not rely on the length of this vector - // being the same as the number of nodes in the graph. The vector can - // contain an arbitrary number of zero-entries at the end. - fingerprints: Lrc>> } newtype_index! { @@ -73,9 +59,15 @@ struct DepGraphData { /// nodes and edges as well as all fingerprints of nodes that have them. previous: PreviousDepGraph, - colors: Lock, + colors: DepNodeColorMap, + + /// A set of loaded diagnostics that have been emitted. + emitted_diagnostics: Mutex>, + + /// Used to wait for diagnostics to be emitted. + emitted_diagnostics_cond_var: Condvar, - /// When we load, there may be `.o` files, cached mir, or other such + /// When we load, there may be `.o` files, cached MIR, or other such /// things available to us. If we find that they are not dirty, we /// load the path to the file storing those work-products here into /// this map. We can later look for and extract that data. @@ -87,38 +79,43 @@ struct DepGraphData { loaded_from_cache: Lock>, } +pub fn hash_result(hcx: &mut StableHashingContext<'_>, result: &R) -> Option +where + R: for<'a> HashStable>, +{ + let mut stable_hasher = StableHasher::new(); + result.hash_stable(hcx, &mut stable_hasher); + + Some(stable_hasher.finish()) +} + impl DepGraph { pub fn new(prev_graph: PreviousDepGraph, prev_work_products: FxHashMap) -> DepGraph { - // Pre-allocate the fingerprints array. We over-allocate a little so - // that we hopefully don't have to re-allocate during this compilation - // session. let prev_graph_node_count = prev_graph.node_count(); - let fingerprints = IndexVec::from_elem_n(Fingerprint::ZERO, - (prev_graph_node_count * 115) / 100); DepGraph { data: Some(Lrc::new(DepGraphData { previous_work_products: prev_work_products, dep_node_debug: Default::default(), - current: Lock::new(CurrentDepGraph::new()), + current: Lock::new(CurrentDepGraph::new(prev_graph_node_count)), + emitted_diagnostics: Default::default(), + emitted_diagnostics_cond_var: Condvar::new(), previous: prev_graph, - colors: Lock::new(DepNodeColorMap::new(prev_graph_node_count)), + colors: DepNodeColorMap::new(prev_graph_node_count), loaded_from_cache: Default::default(), })), - fingerprints: Lrc::new(Lock::new(fingerprints)), } } pub fn new_disabled() -> DepGraph { DepGraph { data: None, - fingerprints: Lrc::new(Lock::new(IndexVec::new())), } } - /// True if we are actually building the full dep-graph. + /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise. #[inline] pub fn is_fully_enabled(&self) -> bool { self.data.is_some() @@ -126,12 +123,12 @@ impl DepGraph { pub fn query(&self) -> DepGraphQuery { let current_dep_graph = self.data.as_ref().unwrap().current.borrow(); - let nodes: Vec<_> = current_dep_graph.nodes.iter().cloned().collect(); + let nodes: Vec<_> = current_dep_graph.data.iter().map(|n| n.node).collect(); let mut edges = Vec::new(); - for (index, edge_targets) in current_dep_graph.edges.iter_enumerated() { - let from = current_dep_graph.nodes[index]; + for (from, edge_targets) in current_dep_graph.data.iter() + .map(|d| (d.node, &d.edges)) { for &edge_target in edge_targets.iter() { - let to = current_dep_graph.nodes[edge_target]; + let to = current_dep_graph.data[edge_target].node; edges.push((from, to)); } } @@ -144,12 +141,7 @@ impl DepGraph { if let Some(..) = self.data { ty::tls::with_context_opt(|icx| { let icx = if let Some(icx) = icx { icx } else { return }; - match *icx.task { - OpenTask::Ignore => { - // ignored - } - _ => panic!("expected an ignore context") - } + assert!(icx.task_deps.is_none(), "expected no task dependency tracking"); }) } } @@ -159,7 +151,7 @@ impl DepGraph { { ty::tls::with_context(|icx| { let icx = ty::tls::ImplicitCtxt { - task: &OpenTask::Ignore, + task_deps: None, ..icx.clone() }; @@ -196,60 +188,70 @@ impl DepGraph { /// `arg` parameter. /// /// [rustc guide]: https://rust-lang.github.io/rustc-guide/incremental-compilation.html - pub fn with_task<'gcx, C, A, R>(&self, - key: DepNode, - cx: C, - arg: A, - task: fn(C, A) -> R) - -> (R, DepNodeIndex) - where C: DepGraphSafe + StableHashingContextProvider<'gcx>, - R: HashStable>, + pub fn with_task<'a, C, A, R>( + &self, + key: DepNode, + cx: C, + arg: A, + task: fn(C, A) -> R, + hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option, + ) -> (R, DepNodeIndex) + where + C: DepGraphSafe + StableHashingContextProvider<'a>, { self.with_task_impl(key, cx, arg, false, task, - |key| OpenTask::Regular(Lock::new(RegularOpenTask { - node: key, + |_key| Some(TaskDeps { + #[cfg(debug_assertions)] + node: Some(_key), reads: SmallVec::new(), read_set: Default::default(), - })), - |data, key, task| data.borrow_mut().complete_task(key, task)) + }), + |data, key, fingerprint, task| { + data.borrow_mut().complete_task(key, task.unwrap(), fingerprint) + }, + hash_result) } /// Creates a new dep-graph input with value `input` - pub fn input_task<'gcx, C, R>(&self, + pub fn input_task<'a, C, R>(&self, key: DepNode, cx: C, input: R) -> (R, DepNodeIndex) - where C: DepGraphSafe + StableHashingContextProvider<'gcx>, - R: HashStable>, + where C: DepGraphSafe + StableHashingContextProvider<'a>, + R: for<'b> HashStable>, { fn identity_fn(_: C, arg: A) -> A { arg } self.with_task_impl(key, cx, input, true, identity_fn, - |_| OpenTask::Ignore, - |data, key, _| data.borrow_mut().alloc_node(key, SmallVec::new())) + |_| None, + |data, key, fingerprint, _| { + data.borrow_mut().alloc_node(key, SmallVec::new(), fingerprint) + }, + hash_result::) } - fn with_task_impl<'gcx, C, A, R>( + fn with_task_impl<'a, C, A, R>( &self, key: DepNode, cx: C, arg: A, no_tcx: bool, task: fn(C, A) -> R, - create_task: fn(DepNode) -> OpenTask, + create_task: fn(DepNode) -> Option, finish_task_and_alloc_depnode: fn(&Lock, DepNode, - OpenTask) -> DepNodeIndex + Fingerprint, + Option) -> DepNodeIndex, + hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option, ) -> (R, DepNodeIndex) where - C: DepGraphSafe + StableHashingContextProvider<'gcx>, - R: HashStable>, + C: DepGraphSafe + StableHashingContextProvider<'a>, { if let Some(ref data) = self.data { - let open_task = create_task(key); + let task_deps = create_task(key).map(|deps| Lock::new(deps)); // In incremental mode, hash the result of the task. We don't // do anything with the hash yet, but we are computing it @@ -267,7 +269,7 @@ impl DepGraph { } else { ty::tls::with_context(|icx| { let icx = ty::tls::ImplicitCtxt { - task: &open_task, + task_deps: task_deps.as_ref(), ..icx.clone() }; @@ -281,84 +283,75 @@ impl DepGraph { profq_msg(hcx.sess(), ProfileQueriesMsg::TaskEnd) }; - let dep_node_index = finish_task_and_alloc_depnode(&data.current, key, open_task); - - let mut stable_hasher = StableHasher::new(); - result.hash_stable(&mut hcx, &mut stable_hasher); - - let current_fingerprint = stable_hasher.finish(); - - // Store the current fingerprint - { - let mut fingerprints = self.fingerprints.borrow_mut(); + let current_fingerprint = hash_result(&mut hcx, &result); - if dep_node_index.index() >= fingerprints.len() { - fingerprints.resize(dep_node_index.index() + 1, Fingerprint::ZERO); - } + let dep_node_index = finish_task_and_alloc_depnode( + &data.current, + key, + current_fingerprint.unwrap_or(Fingerprint::ZERO), + task_deps.map(|lock| lock.into_inner()), + ); - debug_assert!(fingerprints[dep_node_index] == Fingerprint::ZERO, - "DepGraph::with_task() - Duplicate fingerprint \ - insertion for {:?}", key); - fingerprints[dep_node_index] = current_fingerprint; - } + let print_status = cfg!(debug_assertions) && hcx.sess().opts.debugging_opts.dep_tasks; // Determine the color of the new DepNode. if let Some(prev_index) = data.previous.node_to_index_opt(&key) { let prev_fingerprint = data.previous.fingerprint_by_index(prev_index); - let color = if current_fingerprint == prev_fingerprint { - DepNodeColor::Green(dep_node_index) + let color = if let Some(current_fingerprint) = current_fingerprint { + if current_fingerprint == prev_fingerprint { + if print_status { + eprintln!("[task::green] {:?}", key); + } + DepNodeColor::Green(dep_node_index) + } else { + if print_status { + eprintln!("[task::red] {:?}", key); + } + DepNodeColor::Red + } } else { + if print_status { + eprintln!("[task::unknown] {:?}", key); + } + // Mark the node as Red if we can't hash the result DepNodeColor::Red }; - let mut colors = data.colors.borrow_mut(); - debug_assert!(colors.get(prev_index).is_none(), - "DepGraph::with_task() - Duplicate DepNodeColor \ - insertion for {:?}", key); + debug_assert!(data.colors.get(prev_index).is_none(), + "DepGraph::with_task() - Duplicate DepNodeColor \ + insertion for {:?}", key); - colors.insert(prev_index, color); + data.colors.insert(prev_index, color); + } else { + if print_status { + eprintln!("[task::new] {:?}", key); + } } (result, dep_node_index) } else { - if key.kind.fingerprint_needed_for_crate_hash() { - let mut hcx = cx.get_stable_hashing_context(); - let result = task(cx, arg); - let mut stable_hasher = StableHasher::new(); - result.hash_stable(&mut hcx, &mut stable_hasher); - let fingerprint = stable_hasher.finish(); - - let mut fingerprints = self.fingerprints.borrow_mut(); - let dep_node_index = DepNodeIndex::new(fingerprints.len()); - fingerprints.push(fingerprint); - - debug_assert!(fingerprints[dep_node_index] == fingerprint, - "DepGraph::with_task() - Assigned fingerprint to \ - unexpected index for {:?}", key); - - (result, dep_node_index) - } else { - (task(cx, arg), DepNodeIndex::INVALID) - } + (task(cx, arg), DepNodeIndex::INVALID) } } - /// Execute something within an "anonymous" task, that is, a task the - /// DepNode of which is determined by the list of inputs it read from. + /// Executes something within an "anonymous" task, that is, a task the + /// `DepNode` of which is determined by the list of inputs it read from. pub fn with_anon_task(&self, dep_kind: DepKind, op: OP) -> (R, DepNodeIndex) where OP: FnOnce() -> R { if let Some(ref data) = self.data { - let (result, open_task) = ty::tls::with_context(|icx| { - let task = OpenTask::Anon(Lock::new(AnonOpenTask { + let (result, task_deps) = ty::tls::with_context(|icx| { + let task_deps = Lock::new(TaskDeps { + #[cfg(debug_assertions)] + node: None, reads: SmallVec::new(), read_set: Default::default(), - })); + }); let r = { let icx = ty::tls::ImplicitCtxt { - task: &task, + task_deps: Some(&task_deps), ..icx.clone() }; @@ -367,39 +360,46 @@ impl DepGraph { }) }; - (r, task) + (r, task_deps.into_inner()) }); let dep_node_index = data.current .borrow_mut() - .pop_anon_task(dep_kind, open_task); + .complete_anon_task(dep_kind, task_deps); (result, dep_node_index) } else { (op(), DepNodeIndex::INVALID) } } - /// Execute something within an "eval-always" task which is a task - // that runs whenever anything changes. - pub fn with_eval_always_task<'gcx, C, A, R>(&self, - key: DepNode, - cx: C, - arg: A, - task: fn(C, A) -> R) - -> (R, DepNodeIndex) - where C: DepGraphSafe + StableHashingContextProvider<'gcx>, - R: HashStable>, + /// Executes something within an "eval-always" task which is a task + /// that runs whenever anything changes. + pub fn with_eval_always_task<'a, C, A, R>( + &self, + key: DepNode, + cx: C, + arg: A, + task: fn(C, A) -> R, + hash_result: impl FnOnce(&mut StableHashingContext<'_>, &R) -> Option, + ) -> (R, DepNodeIndex) + where + C: DepGraphSafe + StableHashingContextProvider<'a>, { self.with_task_impl(key, cx, arg, false, task, - |key| OpenTask::EvalAlways { node: key }, - |data, key, task| data.borrow_mut().complete_eval_always_task(key, task)) + |_| None, + |data, key, fingerprint, _| { + let mut current = data.borrow_mut(); + current.alloc_node(key, smallvec![], fingerprint) + }, + hash_result) } #[inline] pub fn read(&self, v: DepNode) { if let Some(ref data) = self.data { - let mut current = data.current.borrow_mut(); + let current = data.current.borrow_mut(); if let Some(&dep_node_index) = current.node_to_node_index.get(&v) { - current.read_index(dep_node_index); + std::mem::drop(current); + data.read_index(dep_node_index); } else { bug!("DepKind {:?} should be pre-allocated but isn't.", v.kind) } @@ -409,7 +409,7 @@ impl DepGraph { #[inline] pub fn read_index(&self, dep_node_index: DepNodeIndex) { if let Some(ref data) = self.data { - data.current.borrow_mut().read_index(dep_node_index); + data.read_index(dep_node_index); } } @@ -437,17 +437,8 @@ impl DepGraph { #[inline] pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint { - match self.fingerprints.borrow().get(dep_node_index) { - Some(&fingerprint) => fingerprint, - None => { - if let Some(ref data) = self.data { - let dep_node = data.current.borrow().nodes[dep_node_index]; - bug!("Could not find current fingerprint for {:?}", dep_node) - } else { - bug!("Could not find current fingerprint for {:?}", dep_node_index) - } - } - } + let current = self.data.as_ref().expect("dep graph enabled").current.borrow_mut(); + current.data[dep_node_index].fingerprint } pub fn prev_fingerprint_of(&self, dep_node: &DepNode) -> Option { @@ -459,7 +450,7 @@ impl DepGraph { self.data.as_ref().unwrap().previous.node_to_index(dep_node) } - /// Check whether a previous work product exists for `v` and, if + /// Checks whether a previous work product exists for `v` and, if /// so, return the path that leads to it. Used to skip doing work. pub fn previous_work_product(&self, v: &WorkProductId) -> Option { self.data @@ -499,26 +490,34 @@ impl DepGraph { .cloned() } - pub fn edge_deduplication_data(&self) -> (u64, u64) { - let current_dep_graph = self.data.as_ref().unwrap().current.borrow(); + pub fn edge_deduplication_data(&self) -> Option<(u64, u64)> { + if cfg!(debug_assertions) { + let current_dep_graph = self.data.as_ref().unwrap().current.borrow(); - (current_dep_graph.total_read_count, current_dep_graph.total_duplicate_read_count) + Some((current_dep_graph.total_read_count, + current_dep_graph.total_duplicate_read_count)) + } else { + None + } } pub fn serialize(&self) -> SerializedDepGraph { let current_dep_graph = self.data.as_ref().unwrap().current.borrow(); - let fingerprints = self.fingerprints.borrow().clone().convert_index_type(); - let nodes = current_dep_graph.nodes.clone().convert_index_type(); + let fingerprints: IndexVec = + current_dep_graph.data.iter().map(|d| d.fingerprint).collect(); + let nodes: IndexVec = + current_dep_graph.data.iter().map(|d| d.node).collect(); - let total_edge_count: usize = current_dep_graph.edges.iter() - .map(|v| v.len()) - .sum(); + let total_edge_count: usize = current_dep_graph.data.iter() + .map(|d| d.edges.len()) + .sum(); let mut edge_list_indices = IndexVec::with_capacity(nodes.len()); let mut edge_list_data = Vec::with_capacity(total_edge_count); - for (current_dep_node_index, edges) in current_dep_graph.edges.iter_enumerated() { + for (current_dep_node_index, edges) in current_dep_graph.data.iter_enumerated() + .map(|(i, d)| (i, &d.edges)) { let start = edge_list_data.len() as u32; // This should really just be a memcpy :/ edge_list_data.extend(edges.iter().map(|i| SerializedDepNodeIndex::new(i.index()))); @@ -542,7 +541,7 @@ impl DepGraph { pub fn node_color(&self, dep_node: &DepNode) -> Option { if let Some(ref data) = self.data { if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) { - return data.colors.borrow().get(prev_index) + return data.colors.get(prev_index) } else { // This is a node that did not exist in the previous compilation // session, so we consider it to be red. @@ -553,56 +552,89 @@ impl DepGraph { None } - pub fn try_mark_green<'tcx>(&self, - tcx: TyCtxt<'_, 'tcx, 'tcx>, - dep_node: &DepNode) - -> Option { - debug!("try_mark_green({:?}) - BEGIN", dep_node); - let data = self.data.as_ref().unwrap(); + /// Try to read a node index for the node dep_node. + /// A node will have an index, when it's already been marked green, or when we can mark it + /// green. This function will mark the current task as a reader of the specified node, when + /// a node index can be found for that node. + pub fn try_mark_green_and_read( + &self, + tcx: TyCtxt<'_, '_, '_>, + dep_node: &DepNode + ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> { + self.try_mark_green(tcx, dep_node).map(|(prev_index, dep_node_index)| { + debug_assert!(self.is_green(&dep_node)); + self.read_index(dep_node_index); + (prev_index, dep_node_index) + }) + } - #[cfg(not(parallel_queries))] - debug_assert!(!data.current.borrow().node_to_node_index.contains_key(dep_node)); - - if dep_node.kind.is_input() { - // We should only hit try_mark_green() for inputs that do not exist - // anymore in the current compilation session. Existing inputs are - // eagerly marked as either red/green before any queries are - // executed. - debug_assert!(dep_node.extract_def_id(tcx).is_none()); - debug!("try_mark_green({:?}) - END - DepNode is deleted input", dep_node); - return None; - } + pub fn try_mark_green( + &self, + tcx: TyCtxt<'_, '_, '_>, + dep_node: &DepNode + ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> { + debug_assert!(!dep_node.kind.is_eval_always()); - let (prev_deps, prev_dep_node_index) = match data.previous.edges_from(dep_node) { - Some(prev) => { + // Return None if the dep graph is disabled + let data = self.data.as_ref()?; + + // Return None if the dep node didn't exist in the previous session + let prev_index = data.previous.node_to_index_opt(dep_node)?; + + match data.colors.get(prev_index) { + Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)), + Some(DepNodeColor::Red) => None, + None => { // This DepNode and the corresponding query invocation existed // in the previous compilation session too, so we can try to // mark it as green by recursively marking all of its // dependencies green. - prev - } - None => { - // This DepNode did not exist in the previous compilation session, - // so we cannot mark it as green. - debug!("try_mark_green({:?}) - END - DepNode does not exist in \ - current compilation session anymore", dep_node); - return None + self.try_mark_previous_green( + tcx.global_tcx(), + data, + prev_index, + &dep_node + ).map(|dep_node_index| { + (prev_index, dep_node_index) + }) } - }; + } + } + + /// Try to mark a dep-node which existed in the previous compilation session as green. + fn try_mark_previous_green<'tcx>( + &self, + tcx: TyCtxt<'_, 'tcx, 'tcx>, + data: &DepGraphData, + prev_dep_node_index: SerializedDepNodeIndex, + dep_node: &DepNode + ) -> Option { + debug!("try_mark_previous_green({:?}) - BEGIN", dep_node); + + #[cfg(not(parallel_compiler))] + { + debug_assert!(!data.current.borrow().node_to_node_index.contains_key(dep_node)); + debug_assert!(data.colors.get(prev_dep_node_index).is_none()); + } + + // We never try to mark eval_always nodes as green + debug_assert!(!dep_node.kind.is_eval_always()); - debug_assert!(data.colors.borrow().get(prev_dep_node_index).is_none()); + debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node); + + let prev_deps = data.previous.edge_targets_from(prev_dep_node_index); let mut current_deps = SmallVec::new(); for &dep_dep_node_index in prev_deps { - let dep_dep_node_color = data.colors.borrow().get(dep_dep_node_index); + let dep_dep_node_color = data.colors.get(dep_dep_node_index); match dep_dep_node_color { Some(DepNodeColor::Green(node_index)) => { // This dependency has been marked as green before, we are // still fine and can continue with checking the other // dependencies. - debug!("try_mark_green({:?}) --- found dependency {:?} to \ + debug!("try_mark_previous_green({:?}) --- found dependency {:?} to \ be immediately green", dep_node, data.previous.index_to_node(dep_dep_node_index)); @@ -613,7 +645,7 @@ impl DepGraph { // compared to the previous compilation session. We cannot // mark the DepNode as green and also don't need to bother // with checking any of the other dependencies. - debug!("try_mark_green({:?}) - END - dependency {:?} was \ + debug!("try_mark_previous_green({:?}) - END - dependency {:?} was \ immediately red", dep_node, data.previous.index_to_node(dep_dep_node_index)); @@ -623,14 +655,20 @@ impl DepGraph { let dep_dep_node = &data.previous.index_to_node(dep_dep_node_index); // We don't know the state of this dependency. If it isn't - // an input node, let's try to mark it green recursively. - if !dep_dep_node.kind.is_input() { - debug!("try_mark_green({:?}) --- state of dependency {:?} \ + // an eval_always node, let's try to mark it green recursively. + if !dep_dep_node.kind.is_eval_always() { + debug!("try_mark_previous_green({:?}) --- state of dependency {:?} \ is unknown, trying to mark it green", dep_node, dep_dep_node); - if let Some(node_index) = self.try_mark_green(tcx, dep_dep_node) { - debug!("try_mark_green({:?}) --- managed to MARK \ + let node_index = self.try_mark_previous_green( + tcx, + data, + dep_dep_node_index, + dep_dep_node + ); + if let Some(node_index) = node_index { + debug!("try_mark_previous_green({:?}) --- managed to MARK \ dependency {:?} as green", dep_node, dep_dep_node); current_deps.push(node_index); continue; @@ -640,7 +678,7 @@ impl DepGraph { DepKind::Hir | DepKind::HirBody | DepKind::CrateMetadata => { - if dep_node.extract_def_id(tcx).is_none() { + if dep_dep_node.extract_def_id(tcx).is_none() { // If the node does not exist anymore, we // just fail to mark green. return None @@ -653,27 +691,27 @@ impl DepGraph { } } _ => { - // For other kinds of inputs it's OK to be + // For other kinds of nodes it's OK to be // forced. } } } // We failed to mark it green, so we try to force the query. - debug!("try_mark_green({:?}) --- trying to force \ + debug!("try_mark_previous_green({:?}) --- trying to force \ dependency {:?}", dep_node, dep_dep_node); - if ::ty::query::force_from_dep_node(tcx, dep_dep_node) { - let dep_dep_node_color = data.colors.borrow().get(dep_dep_node_index); + if crate::ty::query::force_from_dep_node(tcx, dep_dep_node) { + let dep_dep_node_color = data.colors.get(dep_dep_node_index); match dep_dep_node_color { Some(DepNodeColor::Green(node_index)) => { - debug!("try_mark_green({:?}) --- managed to \ + debug!("try_mark_previous_green({:?}) --- managed to \ FORCE dependency {:?} to green", dep_node, dep_dep_node); current_deps.push(node_index); } Some(DepNodeColor::Red) => { - debug!("try_mark_green({:?}) - END - \ + debug!("try_mark_previous_green({:?}) - END - \ dependency {:?} was red after forcing", dep_node, dep_dep_node); @@ -681,7 +719,7 @@ impl DepGraph { } None => { if !tcx.sess.has_errors() { - bug!("try_mark_green() - Forcing the DepNode \ + bug!("try_mark_previous_green() - Forcing the DepNode \ should have set its color") } else { // If the query we just forced has resulted @@ -693,7 +731,7 @@ impl DepGraph { } } else { // The DepNode could not be forced. - debug!("try_mark_green({:?}) - END - dependency {:?} \ + debug!("try_mark_previous_green({:?}) - END - dependency {:?} \ could not be forced", dep_node, dep_dep_node); return None } @@ -710,72 +748,84 @@ impl DepGraph { let (dep_node_index, did_allocation) = { let mut current = data.current.borrow_mut(); - if let Some(&dep_node_index) = current.node_to_node_index.get(&dep_node) { - // Someone else allocated it before us - (dep_node_index, false) - } else { - // We allocating an entry for the node in the current dependency graph and - // adding all the appropriate edges imported from the previous graph - (current.alloc_node(*dep_node, current_deps), true) - } - }; - - // ... copying the fingerprint from the previous graph too, so we don't - // have to recompute it ... - { + // Copy the fingerprint from the previous graph, + // so we don't have to recompute it let fingerprint = data.previous.fingerprint_by_index(prev_dep_node_index); - let mut fingerprints = self.fingerprints.borrow_mut(); - if dep_node_index.index() >= fingerprints.len() { - fingerprints.resize(dep_node_index.index() + 1, Fingerprint::ZERO); - } + // We allocating an entry for the node in the current dependency graph and + // adding all the appropriate edges imported from the previous graph + current.intern_node(*dep_node, current_deps, fingerprint) + }; - // Multiple threads can all write the same fingerprint here - #[cfg(not(parallel_queries))] - debug_assert!(fingerprints[dep_node_index] == Fingerprint::ZERO, - "DepGraph::try_mark_green() - Duplicate fingerprint \ - insertion for {:?}", dep_node); + // ... emitting any stored diagnostic ... - fingerprints[dep_node_index] = fingerprint; + let diagnostics = tcx.queries.on_disk_cache + .load_diagnostics(tcx, prev_dep_node_index); + + if unlikely!(diagnostics.len() > 0) { + self.emit_diagnostics( + tcx, + data, + dep_node_index, + did_allocation, + diagnostics + ); } - // ... emitting any stored diagnostic ... - if did_allocation { + // ... and finally storing a "Green" entry in the color map. + // Multiple threads can all write the same color here + #[cfg(not(parallel_compiler))] + debug_assert!(data.colors.get(prev_dep_node_index).is_none(), + "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \ + insertion for {:?}", dep_node); + + data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index)); + + debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node); + Some(dep_node_index) + } + + /// Atomically emits some loaded diagnotics, assuming that this only gets called with + /// `did_allocation` set to `true` on a single thread. + #[cold] + #[inline(never)] + fn emit_diagnostics<'tcx>( + &self, + tcx: TyCtxt<'_, 'tcx, 'tcx>, + data: &DepGraphData, + dep_node_index: DepNodeIndex, + did_allocation: bool, + diagnostics: Vec, + ) { + if did_allocation || !cfg!(parallel_compiler) { // Only the thread which did the allocation emits the error messages + let handle = tcx.sess.diagnostic(); - // FIXME: Ensure that these are printed before returning for all threads. - // Currently threads where did_allocation = false can continue on - // and emit other diagnostics before these diagnostics are emitted. - // Such diagnostics should be emitted after these. - // See https://github.com/rust-lang/rust/issues/48685 - let diagnostics = tcx.queries.on_disk_cache - .load_diagnostics(tcx, prev_dep_node_index); + // Promote the previous diagnostics to the current session. + tcx.queries.on_disk_cache + .store_diagnostics(dep_node_index, diagnostics.clone().into()); - if diagnostics.len() > 0 { - let handle = tcx.sess.diagnostic(); + for diagnostic in diagnostics { + DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit(); + } - // Promote the previous diagnostics to the current session. - tcx.queries.on_disk_cache - .store_diagnostics(dep_node_index, diagnostics.clone()); + #[cfg(parallel_compiler)] + { + // Mark the diagnostics and emitted and wake up waiters + data.emitted_diagnostics.lock().insert(dep_node_index); + data.emitted_diagnostics_cond_var.notify_all(); + } + } else { + // The other threads will wait for the diagnostics to be emitted - for diagnostic in diagnostics { - DiagnosticBuilder::new_diagnostic(handle, diagnostic).emit(); + let mut emitted_diagnostics = data.emitted_diagnostics.lock(); + loop { + if emitted_diagnostics.contains(&dep_node_index) { + break; } + data.emitted_diagnostics_cond_var.wait(&mut emitted_diagnostics); } } - - // ... and finally storing a "Green" entry in the color map. - let mut colors = data.colors.borrow_mut(); - // Multiple threads can all write the same color here - #[cfg(not(parallel_queries))] - debug_assert!(colors.get(prev_dep_node_index).is_none(), - "DepGraph::try_mark_green() - Duplicate DepNodeColor \ - insertion for {:?}", dep_node); - - colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index)); - - debug!("try_mark_green({:?}) - END - successfully marked as green", dep_node); - Some(dep_node_index) } // Returns true if the given node has been marked as green during the @@ -795,9 +845,8 @@ impl DepGraph { pub fn exec_cache_promotions<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) { let green_nodes: Vec = { let data = self.data.as_ref().unwrap(); - let colors = data.colors.borrow(); - colors.values.indices().filter_map(|prev_index| { - match colors.get(prev_index) { + data.colors.values.indices().filter_map(|prev_index| { + match data.colors.get(prev_index) { Some(DepNodeColor::Green(_)) => { let dep_node = data.previous.index_to_node(prev_index); if dep_node.cache_on_disk(tcx) { @@ -824,7 +873,7 @@ impl DepGraph { pub fn mark_loaded_from_cache(&self, dep_node_index: DepNodeIndex, state: bool) { debug!("mark_loaded_from_cache({:?}, {})", - self.data.as_ref().unwrap().current.borrow().nodes[dep_node_index], + self.data.as_ref().unwrap().current.borrow().data[dep_node_index].node, state); self.data @@ -876,7 +925,7 @@ impl DepGraph { #[derive(Clone, Debug, RustcEncodable, RustcDecodable)] pub struct WorkProduct { pub cgu_name: String, - /// Saved files associated with this CGU + /// Saved files associated with this CGU. pub saved_files: Vec<(WorkProductFileKind, String)>, } @@ -887,23 +936,30 @@ pub enum WorkProductFileKind { BytecodeCompressed, } +#[derive(Clone)] +struct DepNodeData { + node: DepNode, + edges: SmallVec<[DepNodeIndex; 8]>, + fingerprint: Fingerprint, +} + pub(super) struct CurrentDepGraph { - nodes: IndexVec, - edges: IndexVec>, + data: IndexVec, node_to_node_index: FxHashMap, + #[allow(dead_code)] forbidden_edge: Option, - // Anonymous DepNodes are nodes the ID of which we compute from the list of - // their edges. This has the beneficial side-effect that multiple anonymous - // nodes can be coalesced into one without changing the semantics of the - // dependency graph. However, the merging of nodes can lead to a subtle - // problem during red-green marking: The color of an anonymous node from - // the current session might "shadow" the color of the node with the same - // ID from the previous session. In order to side-step this problem, we make - // sure that anon-node IDs allocated in different sessions don't overlap. - // This is implemented by mixing a session-key into the ID fingerprint of - // each anon node. The session-key is just a random number generated when - // the DepGraph is created. + /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of + /// their edges. This has the beneficial side-effect that multiple anonymous + /// nodes can be coalesced into one without changing the semantics of the + /// dependency graph. However, the merging of nodes can lead to a subtle + /// problem during red-green marking: The color of an anonymous node from + /// the current session might "shadow" the color of the node with the same + /// ID from the previous session. In order to side-step this problem, we make + /// sure that anonymous `NodeId`s allocated in different sessions don't overlap. + /// This is implemented by mixing a session-key into the ID fingerprint of + /// each anon node. The session-key is just a random number generated when + /// the `DepGraph` is created. anon_id_seed: Fingerprint, total_read_count: u64, @@ -911,7 +967,7 @@ pub(super) struct CurrentDepGraph { } impl CurrentDepGraph { - fn new() -> CurrentDepGraph { + fn new(prev_graph_node_count: usize) -> CurrentDepGraph { use std::time::{SystemTime, UNIX_EPOCH}; let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); @@ -934,10 +990,17 @@ impl CurrentDepGraph { None }; + // Pre-allocate the dep node structures. We over-allocate a little so + // that we hopefully don't have to re-allocate during this compilation + // session. + let new_node_count_estimate = (prev_graph_node_count * 115) / 100; + CurrentDepGraph { - nodes: IndexVec::new(), - edges: IndexVec::new(), - node_to_node_index: Default::default(), + data: IndexVec::with_capacity(new_node_count_estimate), + node_to_node_index: FxHashMap::with_capacity_and_hasher( + new_node_count_estimate, + Default::default(), + ), anon_id_seed: stable_hasher.finish(), forbidden_edge, total_read_count: 0, @@ -945,108 +1008,94 @@ impl CurrentDepGraph { } } - fn complete_task(&mut self, key: DepNode, task: OpenTask) -> DepNodeIndex { - if let OpenTask::Regular(task) = task { - let RegularOpenTask { - node, - read_set: _, - reads - } = task.into_inner(); - assert_eq!(node, key); - - // If this is an input node, we expect that it either has no - // dependencies, or that it just depends on DepKind::CrateMetadata - // or DepKind::Krate. This happens for some "thin wrapper queries" - // like `crate_disambiguator` which sometimes have zero deps (for - // when called for LOCAL_CRATE) or they depend on a CrateMetadata - // node. - if cfg!(debug_assertions) { - if node.kind.is_input() && reads.len() > 0 && - // FIXME(mw): Special case for DefSpan until Spans are handled - // better in general. - node.kind != DepKind::DefSpan && - reads.iter().any(|&i| { - !(self.nodes[i].kind == DepKind::CrateMetadata || - self.nodes[i].kind == DepKind::Krate) - }) - { - bug!("Input node {:?} with unexpected reads: {:?}", - node, - reads.iter().map(|&i| self.nodes[i]).collect::>()) - } - } - - self.alloc_node(node, reads) - } else { - bug!("complete_task() - Expected regular task to be popped") - } + fn complete_task( + &mut self, + node: DepNode, + task_deps: TaskDeps, + fingerprint: Fingerprint + ) -> DepNodeIndex { + self.alloc_node(node, task_deps.reads, fingerprint) } - fn pop_anon_task(&mut self, kind: DepKind, task: OpenTask) -> DepNodeIndex { - if let OpenTask::Anon(task) = task { - let AnonOpenTask { - read_set: _, - reads - } = task.into_inner(); - debug_assert!(!kind.is_input()); + fn complete_anon_task(&mut self, kind: DepKind, task_deps: TaskDeps) -> DepNodeIndex { + debug_assert!(!kind.is_eval_always()); - let mut fingerprint = self.anon_id_seed; - let mut hasher = StableHasher::new(); + let mut fingerprint = self.anon_id_seed; + let mut hasher = StableHasher::new(); - for &read in reads.iter() { - let read_dep_node = self.nodes[read]; + for &read in task_deps.reads.iter() { + let read_dep_node = self.data[read].node; - ::std::mem::discriminant(&read_dep_node.kind).hash(&mut hasher); + ::std::mem::discriminant(&read_dep_node.kind).hash(&mut hasher); - // Fingerprint::combine() is faster than sending Fingerprint - // through the StableHasher (at least as long as StableHasher - // is so slow). - fingerprint = fingerprint.combine(read_dep_node.hash); - } + // Fingerprint::combine() is faster than sending Fingerprint + // through the StableHasher (at least as long as StableHasher + // is so slow). + fingerprint = fingerprint.combine(read_dep_node.hash); + } - fingerprint = fingerprint.combine(hasher.finish()); + fingerprint = fingerprint.combine(hasher.finish()); - let target_dep_node = DepNode { - kind, - hash: fingerprint, - }; + let target_dep_node = DepNode { + kind, + hash: fingerprint, + }; - if let Some(&index) = self.node_to_node_index.get(&target_dep_node) { - index - } else { - self.alloc_node(target_dep_node, reads) - } - } else { - bug!("pop_anon_task() - Expected anonymous task to be popped") - } + self.intern_node(target_dep_node, task_deps.reads, Fingerprint::ZERO).0 } - fn complete_eval_always_task(&mut self, key: DepNode, task: OpenTask) -> DepNodeIndex { - if let OpenTask::EvalAlways { - node, - } = task { - debug_assert_eq!(node, key); - let krate_idx = self.node_to_node_index[&DepNode::new_no_params(DepKind::Krate)]; - self.alloc_node(node, smallvec![krate_idx]) - } else { - bug!("complete_eval_always_task() - Expected eval always task to be popped"); + fn alloc_node( + &mut self, + dep_node: DepNode, + edges: SmallVec<[DepNodeIndex; 8]>, + fingerprint: Fingerprint + ) -> DepNodeIndex { + debug_assert!(!self.node_to_node_index.contains_key(&dep_node)); + self.intern_node(dep_node, edges, fingerprint).0 + } + + fn intern_node( + &mut self, + dep_node: DepNode, + edges: SmallVec<[DepNodeIndex; 8]>, + fingerprint: Fingerprint + ) -> (DepNodeIndex, bool) { + debug_assert_eq!(self.node_to_node_index.len(), self.data.len()); + + match self.node_to_node_index.entry(dep_node) { + Entry::Occupied(entry) => (*entry.get(), false), + Entry::Vacant(entry) => { + let dep_node_index = DepNodeIndex::new(self.data.len()); + self.data.push(DepNodeData { + node: dep_node, + edges, + fingerprint + }); + entry.insert(dep_node_index); + (dep_node_index, true) + } } } +} - fn read_index(&mut self, source: DepNodeIndex) { +impl DepGraphData { + fn read_index(&self, source: DepNodeIndex) { ty::tls::with_context_opt(|icx| { - let icx = if let Some(icx) = icx { icx } else { return }; - match *icx.task { - OpenTask::Regular(ref task) => { - let mut task = task.lock(); - self.total_read_count += 1; - if task.read_set.insert(source) { - task.reads.push(source); - - if cfg!(debug_assertions) { - if let Some(ref forbidden_edge) = self.forbidden_edge { - let target = &task.node; - let source = self.nodes[source]; + let icx = if let Some(icx) = icx { icx } else { return }; + if let Some(task_deps) = icx.task_deps { + let mut task_deps = task_deps.lock(); + if cfg!(debug_assertions) { + self.current.lock().total_read_count += 1; + } + if task_deps.read_set.insert(source) { + task_deps.reads.push(source); + + #[cfg(debug_assertions)] + { + if let Some(target) = task_deps.node { + let graph = self.current.lock(); + if let Some(ref forbidden_edge) = graph.forbidden_edge { + let source = graph.data[source].node; if forbidden_edge.test(&source, &target) { bug!("forbidden edge {:?} -> {:?} created", source, @@ -1054,62 +1103,26 @@ impl CurrentDepGraph { } } } - } else { - self.total_duplicate_read_count += 1; - } - } - OpenTask::Anon(ref task) => { - let mut task = task.lock(); - if task.read_set.insert(source) { - task.reads.push(source); } - } - OpenTask::Ignore | OpenTask::EvalAlways { .. } => { - // ignore + } else if cfg!(debug_assertions) { + self.current.lock().total_duplicate_read_count += 1; } } }) } - - fn alloc_node(&mut self, - dep_node: DepNode, - edges: SmallVec<[DepNodeIndex; 8]>) - -> DepNodeIndex { - debug_assert_eq!(self.edges.len(), self.nodes.len()); - debug_assert_eq!(self.node_to_node_index.len(), self.nodes.len()); - debug_assert!(!self.node_to_node_index.contains_key(&dep_node)); - let dep_node_index = DepNodeIndex::new(self.nodes.len()); - self.nodes.push(dep_node); - self.node_to_node_index.insert(dep_node, dep_node_index); - self.edges.push(edges); - dep_node_index - } } -pub struct RegularOpenTask { - node: DepNode, - reads: SmallVec<[DepNodeIndex; 8]>, - read_set: FxHashSet, -} - -pub struct AnonOpenTask { +pub struct TaskDeps { + #[cfg(debug_assertions)] + node: Option, reads: SmallVec<[DepNodeIndex; 8]>, read_set: FxHashSet, } -pub enum OpenTask { - Regular(Lock), - Anon(Lock), - Ignore, - EvalAlways { - node: DepNode, - }, -} - // A data structure that stores Option values as a contiguous // array, using one u32 per entry. struct DepNodeColorMap { - values: IndexVec, + values: IndexVec, } const COMPRESSED_NONE: u32 = 0; @@ -1119,12 +1132,12 @@ const COMPRESSED_FIRST_GREEN: u32 = 2; impl DepNodeColorMap { fn new(size: usize) -> DepNodeColorMap { DepNodeColorMap { - values: IndexVec::from_elem_n(COMPRESSED_NONE, size) + values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect(), } } fn get(&self, index: SerializedDepNodeIndex) -> Option { - match self.values[index] { + match self.values[index].load(Ordering::Acquire) { COMPRESSED_NONE => None, COMPRESSED_RED => Some(DepNodeColor::Red), value => Some(DepNodeColor::Green(DepNodeIndex::from_u32( @@ -1133,10 +1146,10 @@ impl DepNodeColorMap { } } - fn insert(&mut self, index: SerializedDepNodeIndex, color: DepNodeColor) { - self.values[index] = match color { + fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) { + self.values[index].store(match color { DepNodeColor::Red => COMPRESSED_RED, DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN, - } + }, Ordering::Release) } } diff --git a/src/librustc/dep_graph/mod.rs b/src/librustc/dep_graph/mod.rs index 158edc6c59e0e..1535e6d349cf1 100644 --- a/src/librustc/dep_graph/mod.rs +++ b/src/librustc/dep_graph/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - pub mod debug; mod dep_node; mod dep_tracking_map; @@ -19,8 +9,8 @@ mod serialized; pub mod cgu_reuse_tracker; pub use self::dep_tracking_map::{DepTrackingMap, DepTrackingMapConfig}; -pub use self::dep_node::{DepNode, DepKind, DepConstructor, WorkProductId, label_strs}; -pub use self::graph::{DepGraph, WorkProduct, DepNodeIndex, DepNodeColor, OpenTask}; +pub use self::dep_node::{DepNode, DepKind, DepConstructor, WorkProductId, RecoverKey, label_strs}; +pub use self::graph::{DepGraph, WorkProduct, DepNodeIndex, DepNodeColor, TaskDeps, hash_result}; pub use self::graph::WorkProductFileKind; pub use self::prev::PreviousDepGraph; pub use self::query::DepGraphQuery; diff --git a/src/librustc/dep_graph/prev.rs b/src/librustc/dep_graph/prev.rs index ebc50f4afb8c8..d971690bbe317 100644 --- a/src/librustc/dep_graph/prev.rs +++ b/src/librustc/dep_graph/prev.rs @@ -1,14 +1,4 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use ich::Fingerprint; +use crate::ich::Fingerprint; use rustc_data_structures::fx::FxHashMap; use super::dep_node::DepNode; use super::serialized::{SerializedDepGraph, SerializedDepNodeIndex}; @@ -29,14 +19,11 @@ impl PreviousDepGraph { } #[inline] - pub fn edges_from(&self, - dep_node: &DepNode) - -> Option<(&[SerializedDepNodeIndex], SerializedDepNodeIndex)> { - self.index - .get(dep_node) - .map(|&node_index| { - (self.data.edge_targets_from(node_index), node_index) - }) + pub fn edge_targets_from( + &self, + dep_node_index: SerializedDepNodeIndex + ) -> &[SerializedDepNodeIndex] { + self.data.edge_targets_from(dep_node_index) } #[inline] diff --git a/src/librustc/dep_graph/query.rs b/src/librustc/dep_graph/query.rs index 4aec2af887c88..cd4ced238d360 100644 --- a/src/librustc/dep_graph/query.rs +++ b/src/librustc/dep_graph/query.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::graph::implementation::{ Direction, INCOMING, Graph, NodeIndex, OUTGOING diff --git a/src/librustc/dep_graph/safe.rs b/src/librustc/dep_graph/safe.rs index f82bf9be03390..fc767defe9c71 100644 --- a/src/librustc/dep_graph/safe.rs +++ b/src/librustc/dep_graph/safe.rs @@ -1,19 +1,9 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The `DepGraphSafe` trait -use hir::BodyId; -use hir::def_id::DefId; +use crate::hir::BodyId; +use crate::hir::def_id::DefId; use syntax::ast::NodeId; -use ty::TyCtxt; +use crate::ty::TyCtxt; /// The `DepGraphSafe` trait is used to specify what kinds of values /// are safe to "leak" into a task. The idea is that this should be diff --git a/src/librustc/dep_graph/serialized.rs b/src/librustc/dep_graph/serialized.rs index 0c6c224fa914c..b64f71ed908d8 100644 --- a/src/librustc/dep_graph/serialized.rs +++ b/src/librustc/dep_graph/serialized.rs @@ -1,17 +1,7 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The data that we will serialize and deserialize. -use dep_graph::DepNode; -use ich::Fingerprint; +use crate::dep_graph::DepNode; +use crate::ich::Fingerprint; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; newtype_index! { diff --git a/src/librustc/diagnostics.rs b/src/librustc/diagnostics.rs index 3dc6f761ec961..00f9fa3a938d6 100644 --- a/src/librustc/diagnostics.rs +++ b/src/librustc/diagnostics.rs @@ -1,13 +1,4 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - +// ignore-tidy-linelength #![allow(non_snake_case)] // Error messages for EXXXX errors. @@ -372,6 +363,10 @@ struct Foo1 { x: &bool } // ^ expected lifetime parameter struct Foo2<'a> { x: &'a bool } // correct +impl Foo2 {} + // ^^^^ expected lifetime parameter +impl<'a> Foo2<'a> {} // correct + struct Bar1 { x: Foo2 } // ^^^^ expected lifetime parameter struct Bar2<'a> { x: Foo2<'a> } // correct @@ -413,11 +408,7 @@ fn bar(x: &str, y: &str) -> &str { } fn baz<'a>(x: &'a str, y: &str) -> &str { } ``` -Lifetime elision in implementation headers was part of the lifetime elision -RFC. It is, however, [currently unimplemented][iss15872]. - -[book-le]: https://doc.rust-lang.org/nightly/book/first-edition/lifetimes.html#lifetime-elision -[iss15872]: https://github.com/rust-lang/rust/issues/15872 +[book-le]: https://doc.rust-lang.org/book/ch10-03-lifetime-syntax.html#lifetime-elision "##, E0119: r##" @@ -652,7 +643,9 @@ attributes: #![no_std] ``` -See also https://doc.rust-lang.org/book/first-edition/no-stdlib.html +See also the [unstable book][1]. + +[1]: https://doc.rust-lang.org/unstable-book/language-features/lang-items.html#writing-an-executable-without-stdlib "##, E0214: r##" @@ -776,11 +769,40 @@ struct Foo { These can be fixed by declaring lifetime parameters: ``` +struct Foo<'a> { + x: &'a str, +} + fn foo<'a>(x: &'a str) {} +``` +Impl blocks declare lifetime parameters separately. You need to add lifetime +parameters to an impl block if you're implementing a type that has a lifetime +parameter of its own. +For example: + +```compile_fail,E0261 struct Foo<'a> { x: &'a str, } + +// error, use of undeclared lifetime name `'a` +impl Foo<'a> { + fn foo<'a>(x: &'a str) {} +} +``` + +This is fixed by declaring the impl block like this: + +``` +struct Foo<'a> { + x: &'a str, +} + +// correct +impl<'a> Foo<'a> { + fn foo(x: &'a str) {} +} ``` "##, @@ -1164,7 +1186,7 @@ impl Generator for AnotherImpl { fn main() { let cont: u32 = Generator::create(); // error, impossible to choose one of Generator trait implementation - // Impl or AnotherImpl? Maybe anything else? + // Should it be Impl or AnotherImpl, maybe something else? } ``` @@ -1190,27 +1212,6 @@ fn main() { ``` "##, -E0296: r##" -This error indicates that the given recursion limit could not be parsed. Ensure -that the value provided is a positive integer between quotes. - -Erroneous code example: - -```compile_fail,E0296 -#![recursion_limit] - -fn main() {} -``` - -And a working example: - -``` -#![recursion_limit="1000"] - -fn main() {} -``` -"##, - E0308: r##" This error occurs when the compiler was unable to infer the concrete type of a variable. It can occur for several cases, the most common of which is a @@ -1562,7 +1563,8 @@ fn takes_u8(_: u8) {} fn main() { unsafe { takes_u8(::std::mem::transmute(0u16)); } - // error: transmute called with types of different sizes + // error: cannot transmute between types of different sizes, + // or dependently-sized types } ``` @@ -1710,7 +1712,7 @@ fn main() { ``` To understand better how closures work in Rust, read: -https://doc.rust-lang.org/book/first-edition/closures.html +https://doc.rust-lang.org/book/ch13-01-closures.html "##, E0580: r##" @@ -2102,20 +2104,6 @@ trait Foo { } ``` "##, -E0702: r##" -This error indicates that a `#[non_exhaustive]` attribute had a value. The -`#[non_exhaustive]` should be empty. - -Examples of erroneous code: - -```compile_fail,E0702 -# #![feature(non_exhaustive)] - -#[non_exhaustive(anything)] -struct Foo; -``` -"##, - E0718: r##" This error indicates that a `#[lang = ".."]` attribute was placed on the wrong type of item. @@ -2147,6 +2135,7 @@ register_diagnostics! { E0280, // requirement is not satisfied E0284, // cannot resolve type // E0285, // overflow evaluation builtin bounds +// E0296, // replaced with a generic attribute input check // E0300, // unexpanded macro // E0304, // expected signed integer constant // E0305, // expected constant @@ -2189,4 +2178,5 @@ register_diagnostics! { E0709, // multiple different lifetimes used in arguments of `async fn` E0710, // an unknown tool name found in scoped lint E0711, // a feature has been declared with conflicting stability attributes +// E0702, // replaced with a generic attribute input check } diff --git a/src/librustc/hir/check_attr.rs b/src/librustc/hir/check_attr.rs index 5ff533fe34b2d..4c527f80d0f5d 100644 --- a/src/librustc/hir/check_attr.rs +++ b/src/librustc/hir/check_attr.rs @@ -1,22 +1,16 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This module implements some validity checks for attributes. //! In particular it verifies that `#[inline]` and `#[repr]` attributes are //! attached to items that actually support them and if there are //! conflicts between multiple such attributes attached to the same //! item. -use hir; -use hir::intravisit::{self, Visitor, NestedVisitorMap}; -use ty::TyCtxt; + +use crate::ty::TyCtxt; +use crate::ty::query::Providers; + +use crate::hir; +use crate::hir::def_id::DefId; +use crate::hir::intravisit::{self, Visitor, NestedVisitorMap}; use std::fmt::{self, Display}; use syntax_pos::Span; @@ -97,10 +91,10 @@ struct CheckAttrVisitor<'a, 'tcx: 'a> { } impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { - /// Check any attribute. + /// Checks any attribute. fn check_attributes(&self, item: &hir::Item, target: Target) { if target == Target::Fn || target == Target::Const { - self.tcx.codegen_fn_attrs(self.tcx.hir().local_def_id(item.id)); + self.tcx.codegen_fn_attrs(self.tcx.hir().local_def_id_from_hir_id(item.hir_id)); } else if let Some(a) = item.attrs.iter().find(|a| a.check_name("target_feature")) { self.tcx.sess.struct_span_err(a.span, "attribute should be applied to a function") .span_label(item.span, "not a function") @@ -121,7 +115,7 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { self.check_used(item, target); } - /// Check if an `#[inline]` is applied to a function or a closure. + /// Checks if an `#[inline]` is applied to a function or a closure. fn check_inline(&self, attr: &hir::Attribute, span: &Span, target: Target) { if target != Target::Fn && target != Target::Closure { struct_span_err!(self.tcx.sess, @@ -133,7 +127,7 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { } } - /// Check if the `#[non_exhaustive]` attribute on an `item` is valid. + /// Checks if the `#[non_exhaustive]` attribute on an `item` is valid. fn check_non_exhaustive(&self, attr: &hir::Attribute, item: &hir::Item, target: Target) { match target { Target::Struct | Target::Enum => { /* Valid */ }, @@ -147,18 +141,9 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { return; } } - - if attr.meta_item_list().is_some() || attr.value_str().is_some() { - struct_span_err!(self.tcx.sess, - attr.span, - E0702, - "attribute should be empty") - .span_label(item.span, "not empty") - .emit(); - } } - /// Check if the `#[marker]` attribute on an `item` is valid. + /// Checks if the `#[marker]` attribute on an `item` is valid. fn check_marker(&self, attr: &hir::Attribute, item: &hir::Item, target: Target) { match target { Target::Trait => { /* Valid */ }, @@ -170,15 +155,9 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { return; } } - - if !attr.is_word() { - self.tcx.sess - .struct_span_err(attr.span, "attribute should be empty") - .emit(); - } } - /// Check if the `#[repr]` attributes on `item` are valid. + /// Checks if the `#[repr]` attributes on `item` are valid. fn check_repr(&self, item: &hir::Item, target: Target) { // Extract the names of all repr hints, e.g., [foo, bar, align] for: // ``` @@ -187,7 +166,7 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { // ``` let hints: Vec<_> = item.attrs .iter() - .filter(|attr| attr.name() == "repr") + .filter(|attr| attr.check_name("repr")) .filter_map(|attr| attr.meta_item_list()) .flatten() .collect(); @@ -198,17 +177,9 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { let mut is_transparent = false; for hint in &hints { - let name = if let Some(name) = hint.name() { - name - } else { - // Invalid repr hint like repr(42). We don't check for unrecognized hints here - // (libsyntax does that), so just ignore it. - continue; - }; - - let (article, allowed_targets) = match &*name.as_str() { - "C" => { - is_c = true; + let (article, allowed_targets) = match hint.name_or_empty().get() { + name @ "C" | name @ "align" => { + is_c |= name == "C"; if target != Target::Struct && target != Target::Union && target != Target::Enum { @@ -233,14 +204,6 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { continue } } - "align" => { - if target != Target::Struct && - target != Target::Union { - ("a", "struct or union") - } else { - continue - } - } "transparent" => { is_transparent = true; if target != Target::Struct { @@ -262,7 +225,7 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { _ => continue, }; self.emit_repr_error( - hint.span, + hint.span(), item.span, &format!("attribute should be applied to {}", allowed_targets), &format!("not {} {}", article, allowed_targets), @@ -271,7 +234,7 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { // Just point at all repr hints if there are any incompatibilities. // This is not ideal, but tracking precisely which ones are at fault is a huge hassle. - let hint_spans = hints.iter().map(|hint| hint.span); + let hint_spans = hints.iter().map(|hint| hint.span()); // Error on repr(transparent, ). if is_transparent && hints.len() > 1 { @@ -303,8 +266,8 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { fn check_stmt_attributes(&self, stmt: &hir::Stmt) { // When checking statements ignore expressions, they will be checked later - if let hir::StmtKind::Decl(_, _) = stmt.node { - for attr in stmt.node.attrs() { + if let hir::StmtKind::Local(ref l) = stmt.node { + for attr in l.attrs.iter() { if attr.check_name("inline") { self.check_inline(attr, &stmt.span, Target::Statement); } @@ -342,7 +305,7 @@ impl<'a, 'tcx> CheckAttrVisitor<'a, 'tcx> { fn check_used(&self, item: &hir::Item, target: Target) { for attr in &item.attrs { - if attr.name() == "used" && target != Target::Static { + if attr.check_name("used") && target != Target::Static { self.tcx.sess .span_err(attr.span, "attribute must be applied to a `static` variable"); } @@ -373,16 +336,11 @@ impl<'a, 'tcx> Visitor<'tcx> for CheckAttrVisitor<'a, 'tcx> { } } -pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - let mut checker = CheckAttrVisitor { tcx }; - tcx.hir().krate().visit_all_item_likes(&mut checker.as_deep_visitor()); -} - fn is_c_like_enum(item: &hir::Item) -> bool { if let hir::ItemKind::Enum(ref def, _) = item.node { for variant in &def.variants { match variant.node.data { - hir::VariantData::Unit(_) => { /* continue */ } + hir::VariantData::Unit(..) => { /* continue */ } _ => { return false; } } } @@ -391,3 +349,17 @@ fn is_c_like_enum(item: &hir::Item) -> bool { false } } + +fn check_mod_attrs<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>, module_def_id: DefId) { + tcx.hir().visit_item_likes_in_module( + module_def_id, + &mut CheckAttrVisitor { tcx }.as_deep_visitor() + ); +} + +pub(crate) fn provide(providers: &mut Providers<'_>) { + *providers = Providers { + check_mod_attrs, + ..*providers + }; +} diff --git a/src/librustc/hir/def.rs b/src/librustc/hir/def.rs index fb3c3dec7c2be..3cc23ccdb642c 100644 --- a/src/librustc/hir/def.rs +++ b/src/librustc/hir/def.rs @@ -1,24 +1,24 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir::def_id::DefId; -use util::nodemap::{NodeMap, DefIdMap}; +use crate::hir::def_id::DefId; +use crate::util::nodemap::{NodeMap, DefIdMap}; use syntax::ast; use syntax::ext::base::MacroKind; use syntax_pos::Span; -use hir; -use ty; +use rustc_macros::HashStable; +use crate::hir; +use crate::ty; use self::Namespace::*; -#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +/// Encodes if a `Def::Ctor` is the constructor of an enum variant or a struct. +#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, HashStable)] +pub enum CtorOf { + /// This `Def::Ctor` is a synthesized constructor of a tuple or unit struct. + Struct, + /// This `Def::Ctor` is a synthesized constructor of a tuple or unit variant. + Variant, +} + +#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, HashStable)] pub enum CtorKind { /// Constructor function automatically created by a tuple struct/variant. Fn, @@ -28,7 +28,7 @@ pub enum CtorKind { Fictive, } -#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, HashStable)] pub enum NonMacroAttrKind { /// Single-segment attribute defined by the language (`#[inline]`) Builtin, @@ -42,13 +42,15 @@ pub enum NonMacroAttrKind { Custom, } -#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, HashStable)] pub enum Def { // Type namespace Mod(DefId), - Struct(DefId), // `DefId` refers to `NodeId` of the struct itself + /// `DefId` refers to the struct itself, `Def::Ctor` refers to its constructor if it exists. + Struct(DefId), Union(DefId), Enum(DefId), + /// `DefId` refers to the variant itself, `Def::Ctor` refers to its constructor if it exists. Variant(DefId), Trait(DefId), /// `existential type Foo: Bar;` @@ -68,9 +70,10 @@ pub enum Def { // Value namespace Fn(DefId), Const(DefId), + ConstParam(DefId), Static(DefId, bool /* is_mutbl */), - StructCtor(DefId, CtorKind), // `DefId` refers to `NodeId` of the struct's constructor - VariantCtor(DefId, CtorKind), // `DefId` refers to the enum variant + /// `DefId` refers to the struct or enum variant's constructor. + Ctor(DefId, CtorOf, CtorKind), SelfCtor(DefId /* impl */), // `DefId` refers to the impl Method(DefId), AssociatedConst(DefId), @@ -191,7 +194,7 @@ impl ::std::ops::IndexMut for PerNS { } impl PerNS> { - /// Returns whether all the items in this collection are `None`. + /// Returns `true` if all the items in this collection are `None`. pub fn is_empty(&self) -> bool { self.type_ns.is_none() && self.value_ns.is_none() && self.macro_ns.is_none() } @@ -218,7 +221,7 @@ pub type ExportMap = DefIdMap>; /// namespace. pub type ImportMap = NodeMap>>; -#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, HashStable)] pub struct Export { /// The name of the target. pub ident: ast::Ident, @@ -250,7 +253,7 @@ impl CtorKind { } impl NonMacroAttrKind { - fn descr(self) -> &'static str { + pub fn descr(self) -> &'static str { match self { NonMacroAttrKind::Builtin => "built-in attribute", NonMacroAttrKind::Tool => "tool attribute", @@ -262,18 +265,20 @@ impl NonMacroAttrKind { } impl Def { + /// Return the `DefId` of this `Def` if it has an id, else panic. pub fn def_id(&self) -> DefId { self.opt_def_id().unwrap_or_else(|| { bug!("attempted .def_id() on invalid def: {:?}", self) }) } + /// Return `Some(..)` with the `DefId` of this `Def` if it has a id, else `None`. pub fn opt_def_id(&self) -> Option { match *self { Def::Fn(id) | Def::Mod(id) | Def::Static(id, _) | - Def::Variant(id) | Def::VariantCtor(id, ..) | Def::Enum(id) | + Def::Variant(id) | Def::Ctor(id, ..) | Def::Enum(id) | Def::TyAlias(id) | Def::TraitAlias(id) | - Def::AssociatedTy(id) | Def::TyParam(id) | Def::Struct(id) | Def::StructCtor(id, ..) | + Def::AssociatedTy(id) | Def::TyParam(id) | Def::ConstParam(id) | Def::Struct(id) | Def::Union(id) | Def::Trait(id) | Def::Method(id) | Def::Const(id) | Def::AssociatedConst(id) | Def::Macro(id, ..) | Def::Existential(id) | Def::AssociatedExistential(id) | Def::ForeignTy(id) => { @@ -294,26 +299,35 @@ impl Def { } } - /// A human readable kind name + /// Return the `DefId` of this `Def` if it represents a module. + pub fn mod_def_id(&self) -> Option { + match *self { + Def::Mod(id) => Some(id), + _ => None, + } + } + + /// A human readable name for the def kind ("function", "module", etc.). pub fn kind_name(&self) -> &'static str { match *self { Def::Fn(..) => "function", Def::Mod(..) => "module", Def::Static(..) => "static", - Def::Variant(..) => "variant", - Def::VariantCtor(.., CtorKind::Fn) => "tuple variant", - Def::VariantCtor(.., CtorKind::Const) => "unit variant", - Def::VariantCtor(.., CtorKind::Fictive) => "struct variant", Def::Enum(..) => "enum", + Def::Variant(..) => "variant", + Def::Ctor(_, CtorOf::Variant, CtorKind::Fn) => "tuple variant", + Def::Ctor(_, CtorOf::Variant, CtorKind::Const) => "unit variant", + Def::Ctor(_, CtorOf::Variant, CtorKind::Fictive) => "struct variant", + Def::Struct(..) => "struct", + Def::Ctor(_, CtorOf::Struct, CtorKind::Fn) => "tuple struct", + Def::Ctor(_, CtorOf::Struct, CtorKind::Const) => "unit struct", + Def::Ctor(_, CtorOf::Struct, CtorKind::Fictive) => + bug!("impossible struct constructor"), Def::Existential(..) => "existential type", Def::TyAlias(..) => "type alias", Def::TraitAlias(..) => "trait alias", Def::AssociatedTy(..) => "associated type", Def::AssociatedExistential(..) => "associated existential type", - Def::Struct(..) => "struct", - Def::StructCtor(.., CtorKind::Fn) => "tuple struct", - Def::StructCtor(.., CtorKind::Const) => "unit struct", - Def::StructCtor(.., CtorKind::Fictive) => bug!("impossible struct constructor"), Def::SelfCtor(..) => "self constructor", Def::Union(..) => "union", Def::Trait(..) => "trait", @@ -322,6 +336,7 @@ impl Def { Def::Const(..) => "constant", Def::AssociatedConst(..) => "associated constant", Def::TyParam(..) => "type parameter", + Def::ConstParam(..) => "const parameter", Def::PrimTy(..) => "builtin type", Def::Local(..) => "local variable", Def::Upvar(..) => "closure capture", @@ -334,6 +349,7 @@ impl Def { } } + /// An English article for the def. pub fn article(&self) -> &'static str { match *self { Def::AssociatedTy(..) | Def::AssociatedConst(..) | Def::AssociatedExistential(..) | diff --git a/src/librustc/hir/def_id.rs b/src/librustc/hir/def_id.rs index 319d63f66c465..8536f38e48c6d 100644 --- a/src/librustc/hir/def_id.rs +++ b/src/librustc/hir/def_id.rs @@ -1,15 +1,5 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use ty; -use hir::map::definitions::FIRST_FREE_HIGH_DEF_INDEX; +use crate::ty::{self, TyCtxt}; +use crate::hir::map::definitions::FIRST_FREE_HIGH_DEF_INDEX; use rustc_data_structures::indexed_vec::Idx; use serialize; use std::fmt; @@ -27,8 +17,6 @@ pub enum CrateNum { // FIXME(jseyfried): this is also used for custom derives until proc-macro crates get // `CrateNum`s. BuiltinMacros, - /// A CrateNum value that indicates that something is wrong. - Invalid, /// A special CrateNum that we use for the tcx.rcache when decoding from /// the incr. comp. cache. ReservedForIncrCompCache, @@ -39,7 +27,6 @@ impl ::std::fmt::Debug for CrateNum { fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { match self { CrateNum::Index(id) => write!(fmt, "crate{}", id.private), - CrateNum::Invalid => write!(fmt, "invalid crate"), CrateNum::BuiltinMacros => write!(fmt, "builtin macros crate"), CrateNum::ReservedForIncrCompCache => write!(fmt, "crate for decoding incr comp cache"), } @@ -100,7 +87,6 @@ impl fmt::Display for CrateNum { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { CrateNum::Index(id) => fmt::Display::fmt(&id.private, f), - CrateNum::Invalid => write!(f, "invalid crate"), CrateNum::BuiltinMacros => write!(f, "builtin macros crate"), CrateNum::ReservedForIncrCompCache => write!(f, "crate for decoding incr comp cache"), } @@ -242,7 +228,7 @@ impl fmt::Debug for DefId { } impl DefId { - /// Make a local `DefId` with the given index. + /// Makes a local `DefId` from the given `DefIndex`. #[inline] pub fn local(index: DefIndex) -> DefId { DefId { krate: LOCAL_CRATE, index: index } @@ -257,6 +243,14 @@ impl DefId { pub fn to_local(self) -> LocalDefId { LocalDefId::from_def_id(self) } + + pub fn describe_as_module(&self, tcx: TyCtxt<'_, '_, '_>) -> String { + if self.is_local() && self.index == CRATE_DEF_INDEX { + format!("top-level module") + } else { + format!("module `{}`", tcx.def_path_str(*self)) + } + } } impl serialize::UseSpecializedEncodable for DefId {} diff --git a/src/librustc/hir/intravisit.rs b/src/librustc/hir/intravisit.rs index f7e2c7036f6f8..c2265eeb30d74 100644 --- a/src/librustc/hir/intravisit.rs +++ b/src/librustc/hir/intravisit.rs @@ -1,20 +1,10 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! HIR walker for walking the contents of nodes. //! //! **For an overview of the visitor strategy, see the docs on the //! `super::itemlikevisit::ItemLikeVisitor` trait.** //! //! If you have decided to use this visitor, here are some general -//! notes on how to do it: +//! notes on how to do so: //! //! Each overridden visit method has full control over what //! happens with its node, it can do its own traversal of the node's children, @@ -41,24 +31,21 @@ //! This order consistency is required in a few places in rustc, for //! example generator inference, and possibly also HIR borrowck. -use syntax::ast::{NodeId, CRATE_NODE_ID, Ident, Name, Attribute}; +use syntax::ast::{Ident, Name, Attribute}; use syntax_pos::Span; -use hir::*; -use hir::def::Def; -use hir::map::{self, Map}; +use crate::hir::*; +use crate::hir::map::Map; use super::itemlikevisit::DeepVisitor; -use std::cmp; - #[derive(Copy, Clone)] pub enum FnKind<'a> { - /// #[xxx] pub async/const/extern "Abi" fn foo() - ItemFn(Name, &'a Generics, FnHeader, &'a Visibility, &'a [Attribute]), + /// `#[xxx] pub async/const/extern "Abi" fn foo()` + ItemFn(Ident, &'a Generics, FnHeader, &'a Visibility, &'a [Attribute]), - /// fn foo(&self) + /// `fn foo(&self)` Method(Ident, &'a MethodSig, Option<&'a Visibility>, &'a [Attribute]), - /// |x, y| {} + /// `|x, y| {}` Closure(&'a [Attribute]), } @@ -98,7 +85,7 @@ pub enum NestedVisitorMap<'this, 'tcx: 'this> { /// using this setting. OnlyBodies(&'this Map<'tcx>), - /// Visit all nested things, including item-likes. + /// Visits all nested things, including item-likes. /// /// **This is an unusual choice.** It is used when you want to /// process everything within their lexical context. Typically you @@ -108,7 +95,7 @@ pub enum NestedVisitorMap<'this, 'tcx: 'this> { impl<'this, 'tcx> NestedVisitorMap<'this, 'tcx> { /// Returns the map to use for an "intra item-like" thing (if any). - /// e.g., function body. + /// E.g., function body. pub fn intra(self) -> Option<&'this Map<'tcx>> { match self { NestedVisitorMap::None => None, @@ -118,7 +105,7 @@ impl<'this, 'tcx> NestedVisitorMap<'this, 'tcx> { } /// Returns the map to use for an "item-like" thing (if any). - /// e.g., item, impl-item. + /// E.g., item, impl-item. pub fn inter(self) -> Option<&'this Map<'tcx>> { match self { NestedVisitorMap::None => None, @@ -129,7 +116,7 @@ impl<'this, 'tcx> NestedVisitorMap<'this, 'tcx> { } /// Each method of the Visitor trait is a hook to be potentially -/// overridden. Each method's default implementation recursively visits +/// overridden. Each method's default implementation recursively visits /// the substructure of the input via the corresponding `walk` method; /// e.g., the `visit_mod` method by default calls `intravisit::walk_mod`. /// @@ -141,7 +128,7 @@ impl<'this, 'tcx> NestedVisitorMap<'this, 'tcx> { /// on `visit_nested_item` for details on how to visit nested items. /// /// If you want to ensure that your code handles every variant -/// explicitly, you need to override each method. (And you also need +/// explicitly, you need to override each method. (And you also need /// to monitor future changes to `Visitor` in case a new method with a /// new default implementation gets introduced.) pub trait Visitor<'v> : Sized { @@ -176,7 +163,7 @@ pub trait Visitor<'v> : Sized { /// but cannot supply a `Map`; see `nested_visit_map` for advice. #[allow(unused_variables)] fn visit_nested_item(&mut self, id: ItemId) { - let opt_item = self.nested_visit_map().inter().map(|map| map.expect_item(id.id)); + let opt_item = self.nested_visit_map().inter().map(|map| map.expect_item_by_hir_id(id.id)); if let Some(item) = opt_item { self.visit_item(item); } @@ -215,7 +202,7 @@ pub trait Visitor<'v> : Sized { } } - /// Visit the top-level item and (optionally) nested items / impl items. See + /// Visits the top-level item and (optionally) nested items / impl items. See /// `visit_nested_item` for details. fn visit_item(&mut self, i: &'v Item) { walk_item(self, i) @@ -226,7 +213,7 @@ pub trait Visitor<'v> : Sized { } /// When invoking `visit_all_item_likes()`, you need to supply an - /// item-like visitor. This method converts a "intra-visit" + /// item-like visitor. This method converts a "intra-visit" /// visitor into an item-like visitor that walks the entire tree. /// If you use this, you probably don't want to process the /// contents of nested item-like things, since the outer loop will @@ -237,10 +224,7 @@ pub trait Visitor<'v> : Sized { /////////////////////////////////////////////////////////////////////////// - fn visit_id(&mut self, _node_id: NodeId) { - // Nothing to do. - } - fn visit_def_mention(&mut self, _def: Def) { + fn visit_id(&mut self, _hir_id: HirId) { // Nothing to do. } fn visit_name(&mut self, _span: Span, _name: Name) { @@ -249,7 +233,7 @@ pub trait Visitor<'v> : Sized { fn visit_ident(&mut self, ident: Ident) { walk_ident(self, ident) } - fn visit_mod(&mut self, m: &'v Mod, _s: Span, n: NodeId) { + fn visit_mod(&mut self, m: &'v Mod, _s: Span, n: HirId) { walk_mod(self, m, n) } fn visit_foreign_item(&mut self, i: &'v ForeignItem) { @@ -270,9 +254,6 @@ pub trait Visitor<'v> : Sized { fn visit_pat(&mut self, p: &'v Pat) { walk_pat(self, p) } - fn visit_decl(&mut self, d: &'v Decl) { - walk_decl(self, d) - } fn visit_anon_const(&mut self, c: &'v AnonConst) { walk_anon_const(self, c) } @@ -294,11 +275,11 @@ pub trait Visitor<'v> : Sized { fn visit_fn_decl(&mut self, fd: &'v FnDecl) { walk_fn_decl(self, fd) } - fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl, b: BodyId, s: Span, id: NodeId) { + fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl, b: BodyId, s: Span, id: HirId) { walk_fn(self, fk, fd, b, s, id) } - fn visit_use(&mut self, path: &'v Path, id: NodeId, hir_id: HirId) { - walk_use(self, path, id, hir_id) + fn visit_use(&mut self, path: &'v Path, hir_id: HirId) { + walk_use(self, path, hir_id) } fn visit_trait_item(&mut self, ti: &'v TraitItem) { walk_trait_item(self, ti) @@ -325,7 +306,7 @@ pub trait Visitor<'v> : Sized { s: &'v VariantData, _: Name, _: &'v Generics, - _parent_id: NodeId, + _parent_id: HirId, _: Span) { walk_struct_def(self, s) } @@ -335,11 +316,11 @@ pub trait Visitor<'v> : Sized { fn visit_enum_def(&mut self, enum_definition: &'v EnumDef, generics: &'v Generics, - item_id: NodeId, + item_id: HirId, _: Span) { walk_enum_def(self, enum_definition, generics, item_id) } - fn visit_variant(&mut self, v: &'v Variant, g: &'v Generics, item_id: NodeId) { + fn visit_variant(&mut self, v: &'v Variant, g: &'v Generics, item_id: HirId) { walk_variant(self, v, g, item_id) } fn visit_label(&mut self, label: &'v Label) { @@ -349,6 +330,7 @@ pub trait Visitor<'v> : Sized { match generic_arg { GenericArg::Lifetime(lt) => self.visit_lifetime(lt), GenericArg::Type(ty) => self.visit_ty(ty), + GenericArg::Const(ct) => self.visit_anon_const(&ct.value), } } fn visit_lifetime(&mut self, lifetime: &'v Lifetime) { @@ -387,19 +369,19 @@ pub trait Visitor<'v> : Sized { /// Walks the contents of a crate. See also `Crate::visit_all_items`. pub fn walk_crate<'v, V: Visitor<'v>>(visitor: &mut V, krate: &'v Crate) { - visitor.visit_mod(&krate.module, krate.span, CRATE_NODE_ID); + visitor.visit_mod(&krate.module, krate.span, CRATE_HIR_ID); walk_list!(visitor, visit_attribute, &krate.attrs); walk_list!(visitor, visit_macro_def, &krate.exported_macros); } pub fn walk_macro_def<'v, V: Visitor<'v>>(visitor: &mut V, macro_def: &'v MacroDef) { - visitor.visit_id(macro_def.id); + visitor.visit_id(macro_def.hir_id); visitor.visit_name(macro_def.span, macro_def.name); walk_list!(visitor, visit_attribute, ¯o_def.attrs); } -pub fn walk_mod<'v, V: Visitor<'v>>(visitor: &mut V, module: &'v Mod, mod_node_id: NodeId) { - visitor.visit_id(mod_node_id); +pub fn walk_mod<'v, V: Visitor<'v>>(visitor: &mut V, module: &'v Mod, mod_hir_id: HirId) { + visitor.visit_id(mod_hir_id); for &item_id in &module.item_ids { visitor.visit_nested_item(item_id); } @@ -407,7 +389,7 @@ pub fn walk_mod<'v, V: Visitor<'v>>(visitor: &mut V, module: &'v Mod, mod_node_i pub fn walk_body<'v, V: Visitor<'v>>(visitor: &mut V, body: &'v Body) { for argument in &body.arguments { - visitor.visit_id(argument.id); + visitor.visit_id(argument.hir_id); visitor.visit_pat(&argument.pat); } visitor.visit_expr(&body.value); @@ -418,7 +400,7 @@ pub fn walk_local<'v, V: Visitor<'v>>(visitor: &mut V, local: &'v Local) { // dominates the local's definition. walk_list!(visitor, visit_expr, &local.init); walk_list!(visitor, visit_attribute, local.attrs.iter()); - visitor.visit_id(local.id); + visitor.visit_id(local.hir_id); visitor.visit_pat(&local.pat); walk_list!(visitor, visit_ty, &local.ty); } @@ -432,7 +414,7 @@ pub fn walk_label<'v, V: Visitor<'v>>(visitor: &mut V, label: &'v Label) { } pub fn walk_lifetime<'v, V: Visitor<'v>>(visitor: &mut V, lifetime: &'v Lifetime) { - visitor.visit_id(lifetime.id); + visitor.visit_id(lifetime.hir_id); match lifetime.name { LifetimeName::Param(ParamName::Plain(ident)) => { visitor.visit_ident(ident); @@ -458,31 +440,31 @@ pub fn walk_poly_trait_ref<'v, V>(visitor: &mut V, pub fn walk_trait_ref<'v, V>(visitor: &mut V, trait_ref: &'v TraitRef) where V: Visitor<'v> { - visitor.visit_id(trait_ref.ref_id); + visitor.visit_id(trait_ref.hir_ref_id); visitor.visit_path(&trait_ref.path, trait_ref.hir_ref_id) } pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item) { visitor.visit_vis(&item.vis); - visitor.visit_name(item.span, item.name); + visitor.visit_ident(item.ident); match item.node { ItemKind::ExternCrate(orig_name) => { - visitor.visit_id(item.id); + visitor.visit_id(item.hir_id); if let Some(orig_name) = orig_name { visitor.visit_name(item.span, orig_name); } } ItemKind::Use(ref path, _) => { - visitor.visit_use(path, item.id, item.hir_id); + visitor.visit_use(path, item.hir_id); } ItemKind::Static(ref typ, _, body) | ItemKind::Const(ref typ, body) => { - visitor.visit_id(item.id); + visitor.visit_id(item.hir_id); visitor.visit_ty(typ); visitor.visit_nested_body(body); } ItemKind::Fn(ref declaration, header, ref generics, body_id) => { - visitor.visit_fn(FnKind::ItemFn(item.name, + visitor.visit_fn(FnKind::ItemFn(item.ident, generics, header, &item.vis, @@ -490,46 +472,47 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item) { declaration, body_id, item.span, - item.id) + item.hir_id) } ItemKind::Mod(ref module) => { - // `visit_mod()` takes care of visiting the `Item`'s `NodeId`. - visitor.visit_mod(module, item.span, item.id) + // `visit_mod()` takes care of visiting the `Item`'s `HirId`. + visitor.visit_mod(module, item.span, item.hir_id) } ItemKind::ForeignMod(ref foreign_module) => { - visitor.visit_id(item.id); + visitor.visit_id(item.hir_id); walk_list!(visitor, visit_foreign_item, &foreign_module.items); } ItemKind::GlobalAsm(_) => { - visitor.visit_id(item.id); + visitor.visit_id(item.hir_id); } - ItemKind::Ty(ref typ, ref type_parameters) => { - visitor.visit_id(item.id); - visitor.visit_ty(typ); - visitor.visit_generics(type_parameters) - } - ItemKind::Existential(ExistTy {ref generics, ref bounds, impl_trait_fn}) => { - visitor.visit_id(item.id); + ItemKind::Ty(ref ty, ref generics) => { + visitor.visit_id(item.hir_id); + visitor.visit_ty(ty); + visitor.visit_generics(generics) + } + ItemKind::Existential(ExistTy { + ref generics, + ref bounds, + .. + }) => { + visitor.visit_id(item.hir_id); walk_generics(visitor, generics); walk_list!(visitor, visit_param_bound, bounds); - if let Some(impl_trait_fn) = impl_trait_fn { - visitor.visit_def_mention(Def::Fn(impl_trait_fn)) - } } - ItemKind::Enum(ref enum_definition, ref type_parameters) => { - visitor.visit_generics(type_parameters); - // `visit_enum_def()` takes care of visiting the `Item`'s `NodeId`. - visitor.visit_enum_def(enum_definition, type_parameters, item.id, item.span) + ItemKind::Enum(ref enum_definition, ref generics) => { + visitor.visit_generics(generics); + // `visit_enum_def()` takes care of visiting the `Item`'s `HirId`. + visitor.visit_enum_def(enum_definition, generics, item.hir_id, item.span) } ItemKind::Impl( .., - ref type_parameters, + ref generics, ref opt_trait_reference, ref typ, ref impl_item_refs ) => { - visitor.visit_id(item.id); - visitor.visit_generics(type_parameters); + visitor.visit_id(item.hir_id); + visitor.visit_generics(generics); walk_list!(visitor, visit_trait_ref, opt_trait_reference); visitor.visit_ty(typ); walk_list!(visitor, visit_impl_item_ref, impl_item_refs); @@ -537,17 +520,18 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item) { ItemKind::Struct(ref struct_definition, ref generics) | ItemKind::Union(ref struct_definition, ref generics) => { visitor.visit_generics(generics); - visitor.visit_id(item.id); - visitor.visit_variant_data(struct_definition, item.name, generics, item.id, item.span); + visitor.visit_id(item.hir_id); + visitor.visit_variant_data(struct_definition, item.ident.name, generics, item.hir_id, + item.span); } ItemKind::Trait(.., ref generics, ref bounds, ref trait_item_refs) => { - visitor.visit_id(item.id); + visitor.visit_id(item.hir_id); visitor.visit_generics(generics); walk_list!(visitor, visit_param_bound, bounds); walk_list!(visitor, visit_trait_item_ref, trait_item_refs); } ItemKind::TraitAlias(ref generics, ref bounds) => { - visitor.visit_id(item.id); + visitor.visit_id(item.hir_id); visitor.visit_generics(generics); walk_list!(visitor, visit_param_bound, bounds); } @@ -557,16 +541,15 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item) { pub fn walk_use<'v, V: Visitor<'v>>(visitor: &mut V, path: &'v Path, - item_id: NodeId, hir_id: HirId) { - visitor.visit_id(item_id); + visitor.visit_id(hir_id); visitor.visit_path(path, hir_id); } pub fn walk_enum_def<'v, V: Visitor<'v>>(visitor: &mut V, enum_definition: &'v EnumDef, generics: &'v Generics, - item_id: NodeId) { + item_id: HirId) { visitor.visit_id(item_id); walk_list!(visitor, visit_variant, @@ -578,10 +561,11 @@ pub fn walk_enum_def<'v, V: Visitor<'v>>(visitor: &mut V, pub fn walk_variant<'v, V: Visitor<'v>>(visitor: &mut V, variant: &'v Variant, generics: &'v Generics, - parent_item_id: NodeId) { - visitor.visit_name(variant.span, variant.node.name); + parent_item_id: HirId) { + visitor.visit_ident(variant.node.ident); + visitor.visit_id(variant.node.id); visitor.visit_variant_data(&variant.node.data, - variant.node.name, + variant.node.ident.name, generics, parent_item_id, variant.span); @@ -590,7 +574,7 @@ pub fn walk_variant<'v, V: Visitor<'v>>(visitor: &mut V, } pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty) { - visitor.visit_id(typ.id); + visitor.visit_id(typ.hir_id); match typ.node { TyKind::Slice(ref ty) => { @@ -631,6 +615,9 @@ pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty) { TyKind::Typeof(ref expression) => { visitor.visit_anon_const(expression) } + TyKind::CVarArgs(ref lt) => { + visitor.visit_lifetime(lt) + } TyKind::Infer | TyKind::Err => {} } } @@ -651,7 +638,6 @@ pub fn walk_qpath<'v, V: Visitor<'v>>(visitor: &mut V, qpath: &'v QPath, id: Hir } pub fn walk_path<'v, V: Visitor<'v>>(visitor: &mut V, path: &'v Path) { - visitor.visit_def_mention(path.def); for segment in &path.segments { visitor.visit_path_segment(path.span, segment); } @@ -661,7 +647,7 @@ pub fn walk_path_segment<'v, V: Visitor<'v>>(visitor: &mut V, path_span: Span, segment: &'v PathSegment) { visitor.visit_ident(segment.ident); - if let Some(id) = segment.id { + if let Some(id) = segment.hir_id { visitor.visit_id(id); } if let Some(ref args) = segment.args { @@ -678,13 +664,13 @@ pub fn walk_generic_args<'v, V: Visitor<'v>>(visitor: &mut V, pub fn walk_assoc_type_binding<'v, V: Visitor<'v>>(visitor: &mut V, type_binding: &'v TypeBinding) { - visitor.visit_id(type_binding.id); + visitor.visit_id(type_binding.hir_id); visitor.visit_ident(type_binding.ident); visitor.visit_ty(&type_binding.ty); } pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat) { - visitor.visit_id(pattern.id); + visitor.visit_id(pattern.hir_id); match pattern.node { PatKind::TupleStruct(ref qpath, ref children, _) => { visitor.visit_qpath(qpath, pattern.hir_id, pattern.span); @@ -696,7 +682,7 @@ pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat) { PatKind::Struct(ref qpath, ref fields, _) => { visitor.visit_qpath(qpath, pattern.hir_id, pattern.span); for field in fields { - visitor.visit_id(field.node.id); + visitor.visit_id(field.node.hir_id); visitor.visit_ident(field.node.ident); visitor.visit_pat(&field.node.pat) } @@ -708,8 +694,7 @@ pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat) { PatKind::Ref(ref subpattern, _) => { visitor.visit_pat(subpattern) } - PatKind::Binding(_, canonical_id, ident, ref optional_subpattern) => { - visitor.visit_def_mention(Def::Local(canonical_id)); + PatKind::Binding(_, _hir_id, ident, ref optional_subpattern) => { visitor.visit_ident(ident); walk_list!(visitor, visit_pat, optional_subpattern); } @@ -728,9 +713,9 @@ pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat) { } pub fn walk_foreign_item<'v, V: Visitor<'v>>(visitor: &mut V, foreign_item: &'v ForeignItem) { - visitor.visit_id(foreign_item.id); + visitor.visit_id(foreign_item.hir_id); visitor.visit_vis(&foreign_item.vis); - visitor.visit_name(foreign_item.span, foreign_item.name); + visitor.visit_ident(foreign_item.ident); match foreign_item.node { ForeignItemKind::Fn(ref function_declaration, ref param_names, ref generics) => { @@ -757,7 +742,7 @@ pub fn walk_param_bound<'v, V: Visitor<'v>>(visitor: &mut V, bound: &'v GenericB } pub fn walk_generic_param<'v, V: Visitor<'v>>(visitor: &mut V, param: &'v GenericParam) { - visitor.visit_id(param.id); + visitor.visit_id(param.hir_id); walk_list!(visitor, visit_attribute, ¶m.attrs); match param.name { ParamName::Plain(ident) => visitor.visit_ident(ident), @@ -766,13 +751,14 @@ pub fn walk_generic_param<'v, V: Visitor<'v>>(visitor: &mut V, param: &'v Generi match param.kind { GenericParamKind::Lifetime { .. } => {} GenericParamKind::Type { ref default, .. } => walk_list!(visitor, visit_ty, default), + GenericParamKind::Const { ref ty } => visitor.visit_ty(ty), } walk_list!(visitor, visit_param_bound, ¶m.bounds); } pub fn walk_generics<'v, V: Visitor<'v>>(visitor: &mut V, generics: &'v Generics) { walk_list!(visitor, visit_generic_param, &generics.params); - visitor.visit_id(generics.where_clause.id); + visitor.visit_id(generics.where_clause.hir_id); walk_list!(visitor, visit_where_predicate, &generics.where_clause.predicates); } @@ -795,11 +781,11 @@ pub fn walk_where_predicate<'v, V: Visitor<'v>>( visitor.visit_lifetime(lifetime); walk_list!(visitor, visit_param_bound, bounds); } - &WherePredicate::EqPredicate(WhereEqPredicate{id, + &WherePredicate::EqPredicate(WhereEqPredicate{hir_id, ref lhs_ty, ref rhs_ty, ..}) => { - visitor.visit_id(id); + visitor.visit_id(hir_id); visitor.visit_ty(lhs_ty); visitor.visit_ty(rhs_ty); } @@ -834,7 +820,7 @@ pub fn walk_fn<'v, V: Visitor<'v>>(visitor: &mut V, function_declaration: &'v FnDecl, body_id: BodyId, _span: Span, - id: NodeId) { + id: HirId) { visitor.visit_id(id); visitor.visit_fn_decl(function_declaration); walk_fn_kind(visitor, function_kind); @@ -847,12 +833,12 @@ pub fn walk_trait_item<'v, V: Visitor<'v>>(visitor: &mut V, trait_item: &'v Trai visitor.visit_generics(&trait_item.generics); match trait_item.node { TraitItemKind::Const(ref ty, default) => { - visitor.visit_id(trait_item.id); + visitor.visit_id(trait_item.hir_id); visitor.visit_ty(ty); walk_list!(visitor, visit_nested_body, default); } TraitItemKind::Method(ref sig, TraitMethod::Required(ref param_names)) => { - visitor.visit_id(trait_item.id); + visitor.visit_id(trait_item.hir_id); visitor.visit_fn_decl(&sig.decl); for ¶m_name in param_names { visitor.visit_ident(param_name); @@ -866,10 +852,10 @@ pub fn walk_trait_item<'v, V: Visitor<'v>>(visitor: &mut V, trait_item: &'v Trai &sig.decl, body_id, trait_item.span, - trait_item.id); + trait_item.hir_id); } TraitItemKind::Type(ref bounds, ref default) => { - visitor.visit_id(trait_item.id); + visitor.visit_id(trait_item.hir_id); walk_list!(visitor, visit_param_bound, bounds); walk_list!(visitor, visit_ty, default); } @@ -888,7 +874,6 @@ pub fn walk_trait_item_ref<'v, V: Visitor<'v>>(visitor: &mut V, trait_item_ref: pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplItem) { // N.B., deliberately force a compilation error if/when new fields are added. let ImplItem { - id: _, hir_id: _, ident, ref vis, @@ -906,7 +891,7 @@ pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplIt visitor.visit_generics(generics); match *node { ImplItemKind::Const(ref ty, body) => { - visitor.visit_id(impl_item.id); + visitor.visit_id(impl_item.hir_id); visitor.visit_ty(ty); visitor.visit_nested_body(body); } @@ -918,14 +903,14 @@ pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplIt &sig.decl, body_id, impl_item.span, - impl_item.id); + impl_item.hir_id); } ImplItemKind::Type(ref ty) => { - visitor.visit_id(impl_item.id); + visitor.visit_id(impl_item.hir_id); visitor.visit_ty(ty); } ImplItemKind::Existential(ref bounds) => { - visitor.visit_id(impl_item.id); + visitor.visit_id(impl_item.hir_id); walk_list!(visitor, visit_param_bound, bounds); } } @@ -943,12 +928,14 @@ pub fn walk_impl_item_ref<'v, V: Visitor<'v>>(visitor: &mut V, impl_item_ref: &' pub fn walk_struct_def<'v, V: Visitor<'v>>(visitor: &mut V, struct_definition: &'v VariantData) { - visitor.visit_id(struct_definition.id()); + if let Some(ctor_hir_id) = struct_definition.ctor_hir_id() { + visitor.visit_id(ctor_hir_id); + } walk_list!(visitor, visit_struct_field, struct_definition.fields()); } pub fn walk_struct_field<'v, V: Visitor<'v>>(visitor: &mut V, struct_field: &'v StructField) { - visitor.visit_id(struct_field.id); + visitor.visit_id(struct_field.hir_id); visitor.visit_vis(&struct_field.vis); visitor.visit_ident(struct_field.ident); visitor.visit_ty(&struct_field.ty); @@ -956,39 +943,30 @@ pub fn walk_struct_field<'v, V: Visitor<'v>>(visitor: &mut V, struct_field: &'v } pub fn walk_block<'v, V: Visitor<'v>>(visitor: &mut V, block: &'v Block) { - visitor.visit_id(block.id); + visitor.visit_id(block.hir_id); walk_list!(visitor, visit_stmt, &block.stmts); walk_list!(visitor, visit_expr, &block.expr); } pub fn walk_stmt<'v, V: Visitor<'v>>(visitor: &mut V, statement: &'v Stmt) { + visitor.visit_id(statement.hir_id); match statement.node { - StmtKind::Decl(ref declaration, id) => { - visitor.visit_id(id); - visitor.visit_decl(declaration) - } - StmtKind::Expr(ref expression, id) | - StmtKind::Semi(ref expression, id) => { - visitor.visit_id(id); + StmtKind::Local(ref local) => visitor.visit_local(local), + StmtKind::Item(item) => visitor.visit_nested_item(item), + StmtKind::Expr(ref expression) | + StmtKind::Semi(ref expression) => { visitor.visit_expr(expression) } } } -pub fn walk_decl<'v, V: Visitor<'v>>(visitor: &mut V, declaration: &'v Decl) { - match declaration.node { - DeclKind::Local(ref local) => visitor.visit_local(local), - DeclKind::Item(item) => visitor.visit_nested_item(item), - } -} - pub fn walk_anon_const<'v, V: Visitor<'v>>(visitor: &mut V, constant: &'v AnonConst) { - visitor.visit_id(constant.id); + visitor.visit_id(constant.hir_id); visitor.visit_nested_body(constant.body); } pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) { - visitor.visit_id(expression.id); + visitor.visit_id(expression.hir_id); walk_list!(visitor, visit_attribute, expression.attrs.iter()); match expression.node { ExprKind::Box(ref subexpression) => { @@ -1004,7 +982,7 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) { ExprKind::Struct(ref qpath, ref fields, ref optional_base) => { visitor.visit_qpath(qpath, expression.hir_id, expression.span); for field in fields { - visitor.visit_id(field.id); + visitor.visit_id(field.hir_id); visitor.visit_ident(field.ident); visitor.visit_expr(&field.expr) } @@ -1056,7 +1034,7 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) { function_declaration, body, expression.span, - expression.id) + expression.hir_id) } ExprKind::Block(ref block, ref opt_label) => { walk_list!(visitor, visit_label, opt_label); @@ -1084,18 +1062,12 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) { ExprKind::Break(ref destination, ref opt_expr) => { if let Some(ref label) = destination.label { visitor.visit_label(label); - if let Ok(node_id) = destination.target_id { - visitor.visit_def_mention(Def::Label(node_id)) - } } walk_list!(visitor, visit_expr, opt_expr); } ExprKind::Continue(ref destination) => { if let Some(ref label) = destination.label { visitor.visit_label(label); - if let Ok(node_id) = destination.target_id { - visitor.visit_def_mention(Def::Label(node_id)) - } } } ExprKind::Ret(ref optional_expression) => { @@ -1109,6 +1081,7 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) { ExprKind::Yield(ref subexpression) => { visitor.visit_expr(subexpression); } + ExprKind::Err => {} } } @@ -1124,8 +1097,8 @@ pub fn walk_arm<'v, V: Visitor<'v>>(visitor: &mut V, arm: &'v Arm) { } pub fn walk_vis<'v, V: Visitor<'v>>(visitor: &mut V, vis: &'v Visibility) { - if let VisibilityKind::Restricted { ref path, id, hir_id } = vis.node { - visitor.visit_id(id); + if let VisibilityKind::Restricted { ref path, hir_id } = vis.node { + visitor.visit_id(hir_id); visitor.visit_path(path, hir_id) } } @@ -1141,57 +1114,3 @@ pub fn walk_defaultness<'v, V: Visitor<'v>>(_: &mut V, _: &'v Defaultness) { // the right thing to do, should content be added in the future, // would be to walk it. } - -#[derive(Copy, Clone, RustcEncodable, RustcDecodable, Debug)] -pub struct IdRange { - pub min: NodeId, - pub max: NodeId, -} - -impl IdRange { - pub fn max() -> IdRange { - IdRange { - min: NodeId::MAX, - max: NodeId::from_u32(0), - } - } - - pub fn empty(&self) -> bool { - self.min >= self.max - } - - pub fn contains(&self, id: NodeId) -> bool { - id >= self.min && id < self.max - } - - pub fn add(&mut self, id: NodeId) { - self.min = cmp::min(self.min, id); - self.max = cmp::max(self.max, NodeId::from_u32(id.as_u32() + 1)); - } -} - - -pub struct IdRangeComputingVisitor<'a, 'hir: 'a> { - result: IdRange, - map: &'a map::Map<'hir>, -} - -impl<'a, 'hir> IdRangeComputingVisitor<'a, 'hir> { - pub fn new(map: &'a map::Map<'hir>) -> IdRangeComputingVisitor<'a, 'hir> { - IdRangeComputingVisitor { result: IdRange::max(), map: map } - } - - pub fn result(&self) -> IdRange { - self.result - } -} - -impl<'a, 'hir> Visitor<'hir> for IdRangeComputingVisitor<'a, 'hir> { - fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'hir> { - NestedVisitorMap::OnlyBodies(&self.map) - } - - fn visit_id(&mut self, id: NodeId) { - self.result.add(id); - } -} diff --git a/src/librustc/hir/itemlikevisit.rs b/src/librustc/hir/itemlikevisit.rs index f8bc764572f02..bfc9e8f06e235 100644 --- a/src/librustc/hir/itemlikevisit.rs +++ b/src/librustc/hir/itemlikevisit.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::{Item, ImplItem, TraitItem}; use super::intravisit::Visitor; diff --git a/src/librustc/hir/lowering.rs b/src/librustc/hir/lowering.rs index 6958801d865bf..2a25552367678 100644 --- a/src/librustc/hir/lowering.rs +++ b/src/librustc/hir/lowering.rs @@ -1,36 +1,26 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Lowers the AST to the HIR. //! //! Since the AST and HIR are fairly similar, this is mostly a simple procedure, //! much like a fold. Where lowering involves a bit more work things get more //! interesting and there are some invariants you should know about. These mostly -//! concern spans and ids. +//! concern spans and IDs. //! //! Spans are assigned to AST nodes during parsing and then are modified during //! expansion to indicate the origin of a node and the process it went through -//! being expanded. Ids are assigned to AST nodes just before lowering. +//! being expanded. IDs are assigned to AST nodes just before lowering. //! -//! For the simpler lowering steps, ids and spans should be preserved. Unlike +//! For the simpler lowering steps, IDs and spans should be preserved. Unlike //! expansion we do not preserve the process of lowering in the spans, so spans //! should not be modified here. When creating a new node (as opposed to -//! 'folding' an existing one), then you create a new id using `next_id()`. +//! 'folding' an existing one), then you create a new ID using `next_id()`. //! -//! You must ensure that ids are unique. That means that you should only use the -//! id from an AST node in a single HIR node (you can assume that AST node ids -//! are unique). Every new node must have a unique id. Avoid cloning HIR nodes. -//! If you do, you must then set the new node's id to a fresh one. +//! You must ensure that IDs are unique. That means that you should only use the +//! ID from an AST node in a single HIR node (you can assume that AST node IDs +//! are unique). Every new node must have a unique ID. Avoid cloning HIR nodes. +//! If you do, you must then set the new node's ID to a fresh one. //! //! Spans are used for error messages and for tools to map semantics back to -//! source code. It is therefore not as important with spans as ids to be strict +//! source code. It is therefore not as important with spans as IDs to be strict //! about use (you can't break the compiler by screwing up a span). Obviously, a //! HIR node can only have a single span. But multiple nodes can have the same //! span and spans don't need to be kept in order, etc. Where code is preserved @@ -40,26 +30,27 @@ //! get confused if the spans from leaf AST nodes occur in multiple places //! in the HIR, especially for multiple identifiers. -use dep_graph::DepGraph; -use hir::{self, ParamName}; -use hir::HirVec; -use hir::map::{DefKey, DefPathData, Definitions}; -use hir::def_id::{DefId, DefIndex, DefIndexAddressSpace, CRATE_DEF_INDEX}; -use hir::def::{Def, PathResolution, PerNS}; -use hir::GenericArg; -use lint::builtin::{self, PARENTHESIZED_PARAMS_IN_TYPES_AND_MODULES, +use crate::dep_graph::DepGraph; +use crate::hir::{self, ParamName}; +use crate::hir::HirVec; +use crate::hir::map::{DefKey, DefPathData, Definitions}; +use crate::hir::def_id::{DefId, DefIndex, DefIndexAddressSpace, CRATE_DEF_INDEX}; +use crate::hir::def::{Def, PathResolution, PerNS}; +use crate::hir::{GenericArg, ConstArg}; +use crate::lint::builtin::{self, PARENTHESIZED_PARAMS_IN_TYPES_AND_MODULES, ELIDED_LIFETIMES_IN_PATHS}; -use middle::cstore::CrateStore; +use crate::middle::cstore::CrateStore; +use crate::session::Session; +use crate::session::config::nightly_options; +use crate::util::common::FN_OUTPUT_NAME; +use crate::util::nodemap::{DefIdMap, NodeMap}; +use errors::Applicability; use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::indexed_vec::IndexVec; use rustc_data_structures::thin_vec::ThinVec; -use session::Session; -use session::config::nightly_options; -use util::common::FN_OUTPUT_NAME; -use util::nodemap::{DefIdMap, NodeMap}; +use rustc_data_structures::sync::Lrc; -use std::collections::BTreeMap; -use std::fmt::Debug; +use std::collections::{BTreeSet, BTreeMap}; use std::mem; use smallvec::SmallVec; use syntax::attr; @@ -75,14 +66,14 @@ use syntax::symbol::{keywords, Symbol}; use syntax::tokenstream::{TokenStream, TokenTree}; use syntax::parse::token::Token; use syntax::visit::{self, Visitor}; -use syntax_pos::{Span, MultiSpan}; +use syntax_pos::Span; const HIR_ID_COUNTER_LOCKED: u32 = 0xFFFFFFFF; pub struct LoweringContext<'a> { crate_root: Option<&'static str>, - // Used to assign ids to HIR nodes that do not directly correspond to an AST node. + /// Used to assign ids to HIR nodes that do not directly correspond to an AST node. sess: &'a Session, cstore: &'a dyn CrateStore, @@ -90,15 +81,16 @@ pub struct LoweringContext<'a> { resolver: &'a mut dyn Resolver, /// The items being lowered are collected here. - items: BTreeMap, + items: BTreeMap, trait_items: BTreeMap, impl_items: BTreeMap, bodies: BTreeMap, exported_macros: Vec, - trait_impls: BTreeMap>, - trait_auto_impl: BTreeMap, + trait_impls: BTreeMap>, + + modules: BTreeMap, is_generator: bool, @@ -113,27 +105,29 @@ pub struct LoweringContext<'a> { /// written at all (e.g., `&T` or `std::cell::Ref`). anonymous_lifetime_mode: AnonymousLifetimeMode, - // Used to create lifetime definitions from in-band lifetime usages. - // e.g., `fn foo(x: &'x u8) -> &'x u8` to `fn foo<'x>(x: &'x u8) -> &'x u8` - // When a named lifetime is encountered in a function or impl header and - // has not been defined - // (i.e., it doesn't appear in the in_scope_lifetimes list), it is added - // to this list. The results of this list are then added to the list of - // lifetime definitions in the corresponding impl or function generics. + /// Used to create lifetime definitions from in-band lifetime usages. + /// e.g., `fn foo(x: &'x u8) -> &'x u8` to `fn foo<'x>(x: &'x u8) -> &'x u8` + /// When a named lifetime is encountered in a function or impl header and + /// has not been defined + /// (i.e., it doesn't appear in the in_scope_lifetimes list), it is added + /// to this list. The results of this list are then added to the list of + /// lifetime definitions in the corresponding impl or function generics. lifetimes_to_define: Vec<(Span, ParamName)>, - // Whether or not in-band lifetimes are being collected. This is used to - // indicate whether or not we're in a place where new lifetimes will result - // in in-band lifetime definitions, such a function or an impl header, - // including implicit lifetimes from `impl_header_lifetime_elision`. + /// Whether or not in-band lifetimes are being collected. This is used to + /// indicate whether or not we're in a place where new lifetimes will result + /// in in-band lifetime definitions, such a function or an impl header, + /// including implicit lifetimes from `impl_header_lifetime_elision`. is_collecting_in_band_lifetimes: bool, - // Currently in-scope lifetimes defined in impl headers, fn headers, or HRTB. - // When `is_collectin_in_band_lifetimes` is true, each lifetime is checked - // against this list to see if it is already in-scope, or if a definition - // needs to be created for it. + /// Currently in-scope lifetimes defined in impl headers, fn headers, or HRTB. + /// When `is_collectin_in_band_lifetimes` is true, each lifetime is checked + /// against this list to see if it is already in-scope, or if a definition + /// needs to be created for it. in_scope_lifetimes: Vec, + current_module: NodeId, + type_def_lifetime_params: DefIdMap, current_hir_id_owner: Vec<(DefIndex, u32)>, @@ -149,7 +143,7 @@ pub trait Resolver { is_value: bool, ) -> hir::Path; - /// Obtain the resolution for a node-id. + /// Obtain the resolution for a `NodeId`. fn get_resolution(&mut self, id: NodeId) -> Option; /// Obtain the possible resolutions for the given `use` statement. @@ -237,13 +231,14 @@ pub fn lower_crate( impl_items: BTreeMap::new(), bodies: BTreeMap::new(), trait_impls: BTreeMap::new(), - trait_auto_impl: BTreeMap::new(), + modules: BTreeMap::new(), exported_macros: Vec::new(), catch_scopes: Vec::new(), loop_scopes: Vec::new(), is_in_loop_condition: false, anonymous_lifetime_mode: AnonymousLifetimeMode::PassThrough, type_def_lifetime_params: Default::default(), + current_module: CRATE_NODE_ID, current_hir_id_owner: vec![(CRATE_DEF_INDEX, 0)], item_local_id_counters: Default::default(), node_id_to_hir_id: IndexVec::new(), @@ -276,10 +271,10 @@ enum ParenthesizedGenericArgs { } /// What to do when we encounter an **anonymous** lifetime -/// reference. Anonymous lifetime references come in two flavors. You +/// reference. Anonymous lifetime references come in two flavors. You /// have implicit, or fully elided, references to lifetimes, like the /// one in `&T` or `Ref`, and you have `'_` lifetimes, like `&'_ T` -/// or `Ref<'_, T>`. These often behave the same, but not always: +/// or `Ref<'_, T>`. These often behave the same, but not always: /// /// - certain usages of implicit references are deprecated, like /// `Ref`, and we sometimes just give hard errors in those cases @@ -323,9 +318,52 @@ enum AnonymousLifetimeMode { /// Pass responsibility to `resolve_lifetime` code for all cases. PassThrough, + + /// Used in the return types of `async fn` where there exists + /// exactly one argument-position elided lifetime. + /// + /// In `async fn`, we lower the arguments types using the `CreateParameter` + /// mode, meaning that non-`dyn` elided lifetimes are assigned a fresh name. + /// If any corresponding elided lifetimes appear in the output, we need to + /// replace them with references to the fresh name assigned to the corresponding + /// elided lifetime in the arguments. + /// + /// For **Modern cases**, replace the anonymous parameter with a + /// reference to a specific freshly-named lifetime that was + /// introduced in argument + /// + /// For **Dyn Bound** cases, pass responsibility to + /// `resole_lifetime` code. + Replace(LtReplacement), +} + +/// The type of elided lifetime replacement to perform on `async fn` return types. +#[derive(Copy, Clone)] +enum LtReplacement { + /// Fresh name introduced by the single non-dyn elided lifetime + /// in the arguments of the async fn. + Some(ParamName), + + /// There is no single non-dyn elided lifetime because no lifetimes + /// appeared in the arguments. + NoLifetimes, + + /// There is no single non-dyn elided lifetime because multiple + /// lifetimes appeared in the arguments. + MultipleLifetimes, +} + +/// Calculates the `LtReplacement` to use for elided lifetimes in the return +/// type based on the fresh elided lifetimes introduced in argument position. +fn get_elided_lt_replacement(arg_position_lifetimes: &[(Span, ParamName)]) -> LtReplacement { + match arg_position_lifetimes { + [] => LtReplacement::NoLifetimes, + [(_span, param)] => LtReplacement::Some(*param), + _ => LtReplacement::MultipleLifetimes, + } } -struct ImplTraitTypeIdVisitor<'a> { ids: &'a mut SmallVec<[hir::ItemId; 1]> } +struct ImplTraitTypeIdVisitor<'a> { ids: &'a mut SmallVec<[NodeId; 1]> } impl<'a, 'b> Visitor<'a> for ImplTraitTypeIdVisitor<'b> { fn visit_ty(&mut self, ty: &'a Ty) { @@ -334,7 +372,7 @@ impl<'a, 'b> Visitor<'a> for ImplTraitTypeIdVisitor<'b> { | TyKind::BareFn(_) => return, - TyKind::ImplTrait(id, _) => self.ids.push(hir::ItemId { id }), + TyKind::ImplTrait(id, _) => self.ids.push(id), _ => {}, } visit::walk_ty(self, ty); @@ -365,9 +403,40 @@ impl<'a> LoweringContext<'a> { lctx: &'lcx mut LoweringContext<'interner>, } + impl MiscCollector<'_, '_> { + fn allocate_use_tree_hir_id_counters( + &mut self, + tree: &UseTree, + owner: DefIndex, + ) { + match tree.kind { + UseTreeKind::Simple(_, id1, id2) => { + for &id in &[id1, id2] { + self.lctx.resolver.definitions().create_def_with_parent( + owner, + id, + DefPathData::Misc, + DefIndexAddressSpace::High, + Mark::root(), + tree.prefix.span, + ); + self.lctx.allocate_hir_id_counter(id); + } + } + UseTreeKind::Glob => (), + UseTreeKind::Nested(ref trees) => { + for &(ref use_tree, id) in trees { + let hir_id = self.lctx.allocate_hir_id_counter(id).hir_id; + self.allocate_use_tree_hir_id_counters(use_tree, hir_id.owner); + } + } + } + } + } + impl<'lcx, 'interner> Visitor<'lcx> for MiscCollector<'lcx, 'interner> { fn visit_item(&mut self, item: &'lcx Item) { - self.lctx.allocate_hir_id_counter(item.id, item); + let hir_id = self.lctx.allocate_hir_id_counter(item.id).hir_id; match item.node { ItemKind::Struct(_, ref generics) @@ -387,18 +456,21 @@ impl<'a> LoweringContext<'a> { .count(); self.lctx.type_def_lifetime_params.insert(def_id, count); } + ItemKind::Use(ref use_tree) => { + self.allocate_use_tree_hir_id_counters(use_tree, hir_id.owner); + } _ => {} } visit::walk_item(self, item); } fn visit_trait_item(&mut self, item: &'lcx TraitItem) { - self.lctx.allocate_hir_id_counter(item.id, item); + self.lctx.allocate_hir_id_counter(item.id); visit::walk_trait_item(self, item); } fn visit_impl_item(&mut self, item: &'lcx ImplItem) { - self.lctx.allocate_hir_id_counter(item.id, item); + self.lctx.allocate_hir_id_counter(item.id); visit::walk_impl_item(self, item); } } @@ -424,18 +496,30 @@ impl<'a> LoweringContext<'a> { } impl<'lcx, 'interner> Visitor<'lcx> for ItemLowerer<'lcx, 'interner> { + fn visit_mod(&mut self, m: &'lcx Mod, _s: Span, _attrs: &[Attribute], n: NodeId) { + self.lctx.modules.insert(n, hir::ModuleItems { + items: BTreeSet::new(), + trait_items: BTreeSet::new(), + impl_items: BTreeSet::new(), + }); + + let old = self.lctx.current_module; + self.lctx.current_module = n; + visit::walk_mod(self, m); + self.lctx.current_module = old; + } + fn visit_item(&mut self, item: &'lcx Item) { - let mut item_lowered = true; + let mut item_hir_id = None; self.lctx.with_hir_id_owner(item.id, |lctx| { if let Some(hir_item) = lctx.lower_item(item) { - lctx.items.insert(item.id, hir_item); - } else { - item_lowered = false; + item_hir_id = Some(hir_item.hir_id); + lctx.insert_item(hir_item); } }); - if item_lowered { - let item_generics = match self.lctx.items.get(&item.id).unwrap().node { + if let Some(hir_id) = item_hir_id { + let item_generics = match self.lctx.items.get(&hir_id).unwrap().node { hir::ItemKind::Impl(_, _, _, ref generics, ..) | hir::ItemKind::Trait(_, _, ref generics, ..) => { generics.params.clone() @@ -458,9 +542,10 @@ impl<'a> LoweringContext<'a> { fn visit_trait_item(&mut self, item: &'lcx TraitItem) { self.lctx.with_hir_id_owner(item.id, |lctx| { - let id = hir::TraitItemId { node_id: item.id }; let hir_item = lctx.lower_trait_item(item); + let id = hir::TraitItemId { hir_id: hir_item.hir_id }; lctx.trait_items.insert(id, hir_item); + lctx.modules.get_mut(&lctx.current_module).unwrap().trait_items.insert(id); }); visit::walk_trait_item(self, item); @@ -468,9 +553,10 @@ impl<'a> LoweringContext<'a> { fn visit_impl_item(&mut self, item: &'lcx ImplItem) { self.lctx.with_hir_id_owner(item.id, |lctx| { - let id = hir::ImplItemId { node_id: item.id }; let hir_item = lctx.lower_impl_item(item); + let id = hir::ImplItemId { hir_id: hir_item.hir_id }; lctx.impl_items.insert(id, hir_item); + lctx.modules.get_mut(&lctx.current_module).unwrap().impl_items.insert(id); }); visit::walk_impl_item(self, item); } @@ -501,19 +587,25 @@ impl<'a> LoweringContext<'a> { bodies: self.bodies, body_ids, trait_impls: self.trait_impls, - trait_auto_impl: self.trait_auto_impl, + modules: self.modules, } } - fn allocate_hir_id_counter(&mut self, owner: NodeId, debug: &T) -> LoweredNodeId { - if self.item_local_id_counters.insert(owner, 0).is_some() { - bug!( - "Tried to allocate item_local_id_counter for {:?} twice", - debug - ); - } + fn insert_item(&mut self, item: hir::Item) { + let id = item.hir_id; + // FIXME: Use debug_asset-rt + assert_eq!(id.local_id, hir::ItemLocalId::from_u32(0)); + self.items.insert(id, item); + self.modules.get_mut(&self.current_module).unwrap().items.insert(id); + } + + fn allocate_hir_id_counter(&mut self, owner: NodeId) -> LoweredNodeId { + // Setup the counter if needed + self.item_local_id_counters.entry(owner).or_insert(0); // Always allocate the first `HirId` for the owner itself. - self.lower_node_id_with_owner(owner, owner) + let lowered = self.lower_node_id_with_owner(owner, owner); + debug_assert_eq!(lowered.hir_id.local_id.as_u32(), 0); + lowered } fn lower_node_id_generic(&mut self, ast_node_id: NodeId, alloc_hir_id: F) -> LoweredNodeId @@ -663,13 +755,20 @@ impl<'a> LoweringContext<'a> { Ident::with_empty_ctxt(Symbol::gensym(s)) } - fn allow_internal_unstable(&self, reason: CompilerDesugaringKind, span: Span) -> Span { + /// Reuses the span but adds information like the kind of the desugaring and features that are + /// allowed inside this span. + fn mark_span_with_reason( + &self, + reason: CompilerDesugaringKind, + span: Span, + allow_internal_unstable: Option>, + ) -> Span { let mark = Mark::fresh(Mark::root()); mark.set_expn_info(source_map::ExpnInfo { call_site: span, def_site: Some(span), format: source_map::CompilerDesugaring(reason), - allow_internal_unstable: true, + allow_internal_unstable, allow_internal_unsafe: false, local_inner_macros: false, edition: source_map::hygiene::default_edition(), @@ -722,53 +821,63 @@ impl<'a> LoweringContext<'a> { let params = lifetimes_to_define .into_iter() - .map(|(span, hir_name)| { - let def_node_id = self.next_id().node_id; - - // Get the name we'll use to make the def-path. Note - // that collisions are ok here and this shouldn't - // really show up for end-user. - let (str_name, kind) = match hir_name { - ParamName::Plain(ident) => ( - ident.as_interned_str(), - hir::LifetimeParamKind::InBand, - ), - ParamName::Fresh(_) => ( - keywords::UnderscoreLifetime.name().as_interned_str(), - hir::LifetimeParamKind::Elided, - ), - ParamName::Error => ( - keywords::UnderscoreLifetime.name().as_interned_str(), - hir::LifetimeParamKind::Error, - ), - }; - - // Add a definition for the in-band lifetime def. - self.resolver.definitions().create_def_with_parent( - parent_id.index, - def_node_id, - DefPathData::LifetimeParam(str_name), - DefIndexAddressSpace::High, - Mark::root(), - span, - ); - - hir::GenericParam { - id: def_node_id, - name: hir_name, - attrs: hir_vec![], - bounds: hir_vec![], - span, - pure_wrt_drop: false, - kind: hir::GenericParamKind::Lifetime { kind } - } - }) + .map(|(span, hir_name)| self.lifetime_to_generic_param( + span, hir_name, parent_id.index, + )) .chain(in_band_ty_params.into_iter()) .collect(); (params, res) } + /// Converts a lifetime into a new generic parameter. + fn lifetime_to_generic_param( + &mut self, + span: Span, + hir_name: ParamName, + parent_index: DefIndex, + ) -> hir::GenericParam { + let LoweredNodeId { node_id, hir_id } = self.next_id(); + + // Get the name we'll use to make the def-path. Note + // that collisions are ok here and this shouldn't + // really show up for end-user. + let (str_name, kind) = match hir_name { + ParamName::Plain(ident) => ( + ident.as_interned_str(), + hir::LifetimeParamKind::InBand, + ), + ParamName::Fresh(_) => ( + keywords::UnderscoreLifetime.name().as_interned_str(), + hir::LifetimeParamKind::Elided, + ), + ParamName::Error => ( + keywords::UnderscoreLifetime.name().as_interned_str(), + hir::LifetimeParamKind::Error, + ), + }; + + // Add a definition for the in-band lifetime def. + self.resolver.definitions().create_def_with_parent( + parent_index, + node_id, + DefPathData::LifetimeParam(str_name), + DefIndexAddressSpace::High, + Mark::root(), + span, + ); + + hir::GenericParam { + hir_id, + name: hir_name, + attrs: hir_vec![], + bounds: hir_vec![], + span, + pure_wrt_drop: false, + kind: hir::GenericParamKind::Lifetime { kind } + } + } + /// When there is a reference to some lifetime `'a`, and in-band /// lifetimes are enabled, then we want to push that lifetime into /// the vector of names to define later. In that case, it will get @@ -872,6 +981,13 @@ impl<'a> LoweringContext<'a> { |this| { this.collect_in_band_defs(parent_id, anonymous_lifetime_mode, |this| { let mut params = Vec::new(); + // Note: it is necessary to lower generics *before* calling `f`. + // When lowering `async fn`, there's a final step when lowering + // the return type that assumes that all in-scope lifetimes have + // already been added to either `in_scope_lifetimes` or + // `lifetimes_to_define`. If we swapped the order of these two, + // in-band-lifetimes introduced by generics or where-clauses + // wouldn't have been added yet. let generics = this.lower_generics( generics, ImplTraitContext::Universal(&mut params), @@ -928,7 +1044,7 @@ impl<'a> LoweringContext<'a> { let decl = FnDecl { inputs: vec![], output, - variadic: false + c_variadic: false }; let body_id = self.record_body(body_expr, Some(&decl)); self.is_generator = prev_is_generator; @@ -937,7 +1053,6 @@ impl<'a> LoweringContext<'a> { let closure_hir_id = self.lower_node_id(closure_node_id).hir_id; let decl = self.lower_fn_decl(&decl, None, /* impl trait allowed */ false, None); let generator = hir::Expr { - id: closure_node_id, hir_id: closure_hir_id, node: hir::ExprKind::Closure(capture_clause, decl, body_id, span, Some(hir::GeneratorMovability::Static)), @@ -945,7 +1060,13 @@ impl<'a> LoweringContext<'a> { attrs: ThinVec::new(), }; - let unstable_span = self.allow_internal_unstable(CompilerDesugaringKind::Async, span); + let unstable_span = self.mark_span_with_reason( + CompilerDesugaringKind::Async, + span, + Some(vec![ + Symbol::intern("gen_future"), + ].into()), + ); let gen_future = self.expr_std_path( unstable_span, &["future", "from_generator"], None, ThinVec::new()); hir::ExprKind::Call(P(gen_future), hir_vec![generator]) @@ -1037,7 +1158,7 @@ impl<'a> LoweringContext<'a> { let target_id = match destination { Some((id, _)) => { if let Def::Label(loop_id) = self.expect_full_def(id) { - Ok(self.lower_node_id(loop_id).node_id) + Ok(self.lower_node_id(loop_id).hir_id) } else { Err(hir::LoopIdError::UnresolvedLabel) } @@ -1046,7 +1167,7 @@ impl<'a> LoweringContext<'a> { self.loop_scopes .last() .cloned() - .map(|id| Ok(self.lower_node_id(id).node_id)) + .map(|id| Ok(self.lower_node_id(id).hir_id)) .unwrap_or(Err(hir::LoopIdError::OutsideLoopScope)) .into() } @@ -1091,19 +1212,19 @@ impl<'a> LoweringContext<'a> { TokenTree::Delimited(span, delim, tts) => TokenTree::Delimited( span, delim, - self.lower_token_stream(tts.into()).into(), + self.lower_token_stream(tts), ).into(), } } fn lower_token(&mut self, token: Token, span: Span) -> TokenStream { match token { - Token::Interpolated(_) => {} - other => return TokenTree::Token(span, other).into(), + Token::Interpolated(nt) => { + let tts = nt.to_tokenstream(&self.sess.parse_sess, span); + self.lower_token_stream(tts) + } + other => TokenTree::Token(span, other).into(), } - - let tts = token.interpolated_to_tokenstream(&self.sess.parse_sess, span); - self.lower_token_stream(tts) } fn lower_arm(&mut self, arm: &Arm) -> hir::Arm { @@ -1120,8 +1241,10 @@ impl<'a> LoweringContext<'a> { fn lower_ty_binding(&mut self, b: &TypeBinding, itctx: ImplTraitContext<'_>) -> hir::TypeBinding { + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(b.id); + hir::TypeBinding { - id: self.lower_node_id(b.id).node_id, + hir_id, ident: b.ident, ty: self.lower_ty(&b.ty, itctx), span: b.span, @@ -1135,6 +1258,12 @@ impl<'a> LoweringContext<'a> { match arg { ast::GenericArg::Lifetime(lt) => GenericArg::Lifetime(self.lower_lifetime(<)), ast::GenericArg::Type(ty) => GenericArg::Type(self.lower_ty_direct(&ty, itctx)), + ast::GenericArg::Const(ct) => { + GenericArg::Const(ConstArg { + value: self.lower_anon_const(&ct), + span: ct.value.span, + }) + } } } @@ -1243,7 +1372,7 @@ impl<'a> LoweringContext<'a> { ) } ImplTraitContext::Universal(in_band_ty_params) => { - self.lower_node_id(def_node_id); + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(def_node_id); // Add a definition for the in-band `Param`. let def_index = self .resolver @@ -1258,7 +1387,7 @@ impl<'a> LoweringContext<'a> { // Set the name to `impl Bound1 + Bound2`. let ident = Ident::from_str(&pprust::ty_to_string(t)).with_span_pos(span); in_band_ty_params.push(hir::GenericParam { - id: def_node_id, + hir_id, name: ParamName::Plain(ident), pure_wrt_drop: false, attrs: hir_vec![], @@ -1305,11 +1434,16 @@ impl<'a> LoweringContext<'a> { } } TyKind::Mac(_) => panic!("TyMac should have been expanded by now."), + TyKind::CVarArgs => { + // Create the implicit lifetime of the "spoofed" `VaList`. + let span = self.sess.source_map().next_point(t.span.shrink_to_lo()); + let lt = self.new_implicit_lifetime(span); + hir::TyKind::CVarArgs(lt) + }, }; - let LoweredNodeId { node_id, hir_id } = self.lower_node_id(t.id); + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(t.id); hir::Ty { - id: node_id, node: kind, span: t.span, hir_id, @@ -1328,9 +1462,10 @@ impl<'a> LoweringContext<'a> { // desugaring that explicitly states that we don't want to track that. // Not tracking it makes lints in rustc and clippy very fragile as // frequently opened issues show. - let exist_ty_span = self.allow_internal_unstable( + let exist_ty_span = self.mark_span_with_reason( CompilerDesugaringKind::ExistentialReturnType, span, + None, ); let exist_ty_def_index = self @@ -1339,7 +1474,7 @@ impl<'a> LoweringContext<'a> { .opt_def_index(exist_ty_node_id) .unwrap(); - self.allocate_hir_id_counter(exist_ty_node_id, &"existential impl trait"); + self.allocate_hir_id_counter(exist_ty_node_id); let hir_bounds = self.with_hir_id_owner(exist_ty_node_id, lower_bounds); @@ -1350,43 +1485,63 @@ impl<'a> LoweringContext<'a> { ); self.with_hir_id_owner(exist_ty_node_id, |lctx| { - let exist_ty_item_kind = hir::ItemKind::Existential(hir::ExistTy { + let LoweredNodeId { node_id: _, hir_id } = lctx.next_id(); + let exist_ty_item = hir::ExistTy { generics: hir::Generics { params: lifetime_defs, where_clause: hir::WhereClause { - id: lctx.next_id().node_id, - predicates: Vec::new().into(), + hir_id, + predicates: hir_vec![], }, span, }, bounds: hir_bounds, impl_trait_fn: fn_def_id, - }); - let exist_ty_id = lctx.lower_node_id(exist_ty_node_id); - // Generate an `existential type Foo: Trait;` declaration. - trace!("creating existential type with id {:#?}", exist_ty_id); - - trace!("exist ty def index: {:#?}", exist_ty_def_index); - let exist_ty_item = hir::Item { - id: exist_ty_id.node_id, - hir_id: exist_ty_id.hir_id, - name: keywords::Invalid.name(), - attrs: Default::default(), - node: exist_ty_item_kind, - vis: respan(span.shrink_to_lo(), hir::VisibilityKind::Inherited), - span: exist_ty_span, + origin: hir::ExistTyOrigin::ReturnImplTrait, }; - // Insert the item into the global list. This usually happens - // automatically for all AST items. But this existential type item - // does not actually exist in the AST. - lctx.items.insert(exist_ty_id.node_id, exist_ty_item); + trace!("exist ty from impl trait def index: {:#?}", exist_ty_def_index); + let exist_ty_id = lctx.generate_existential_type( + exist_ty_node_id, + exist_ty_item, + span, + exist_ty_span, + ); // `impl Trait` now just becomes `Foo<'a, 'b, ..>`. - hir::TyKind::Def(hir::ItemId { id: exist_ty_id.node_id }, lifetimes) + hir::TyKind::Def(hir::ItemId { id: exist_ty_id.hir_id }, lifetimes) }) } + /// Registers a new existential type with the proper NodeIds and + /// returns the lowered node ID for the existential type. + fn generate_existential_type( + &mut self, + exist_ty_node_id: NodeId, + exist_ty_item: hir::ExistTy, + span: Span, + exist_ty_span: Span, + ) -> LoweredNodeId { + let exist_ty_item_kind = hir::ItemKind::Existential(exist_ty_item); + let exist_ty_id = self.lower_node_id(exist_ty_node_id); + // Generate an `existential type Foo: Trait;` declaration. + trace!("registering existential type with id {:#?}", exist_ty_id); + let exist_ty_item = hir::Item { + hir_id: exist_ty_id.hir_id, + ident: keywords::Invalid.ident(), + attrs: Default::default(), + node: exist_ty_item_kind, + vis: respan(span.shrink_to_lo(), hir::VisibilityKind::Inherited), + span: exist_ty_span, + }; + + // Insert the item into the global item list. This usually happens + // automatically for all AST items. But this existential type item + // does not actually exist in the AST. + self.insert_item(exist_ty_item); + exist_ty_id + } + fn lifetimes_from_impl_trait_bounds( &mut self, exist_ty_id: NodeId, @@ -1487,17 +1642,16 @@ impl<'a> LoweringContext<'a> { && !self.already_defined_lifetimes.contains(&name) { self.already_defined_lifetimes.insert(name); + let LoweredNodeId { node_id: _, hir_id } = self.context.next_id(); self.output_lifetimes.push(hir::GenericArg::Lifetime(hir::Lifetime { - id: self.context.next_id().node_id, + hir_id, span: lifetime.span, name, })); - // We need to manually create the ids here, because the - // definitions will go into the explicit `existential type` - // declaration and thus need to have their owner set to that item let def_node_id = self.context.sess.next_node_id(); - let _ = self.context.lower_node_id_with_owner(def_node_id, self.exist_ty_id); + let LoweredNodeId { node_id: _, hir_id } = + self.context.lower_node_id_with_owner(def_node_id, self.exist_ty_id); self.context.resolver.definitions().create_def_with_parent( self.parent, def_node_id, @@ -1520,7 +1674,7 @@ impl<'a> LoweringContext<'a> { }; self.output_lifetime_params.push(hir::GenericParam { - id: def_node_id, + hir_id, name, span: lifetime.span, pure_wrt_drop: false, @@ -1571,9 +1725,11 @@ impl<'a> LoweringContext<'a> { } fn lower_variant(&mut self, v: &Variant) -> hir::Variant { + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(v.node.id); Spanned { node: hir::VariantKind { - name: v.node.ident.name, + ident: v.node.ident, + id: hir_id, attrs: self.lower_attrs(&v.node.attrs), data: self.lower_variant_data(&v.node.data), disr_expr: v.node.disr_expr.as_ref().map(|e| self.lower_anon_const(e)), @@ -1789,7 +1945,7 @@ impl<'a> LoweringContext<'a> { explicit_owner: Option, ) -> hir::PathSegment { let (mut generic_args, infer_types) = if let Some(ref generic_args) = segment.args { - let msg = "parenthesized parameters may only be used with a trait"; + let msg = "parenthesized type parameters may only be used with a `Fn` trait"; match **generic_args { GenericArgs::AngleBracketed(ref data) => { self.lower_angle_bracketed_parameter_data(data, param_mode, itctx) @@ -1806,10 +1962,25 @@ impl<'a> LoweringContext<'a> { (hir::GenericArgs::none(), true) } ParenthesizedGenericArgs::Err => { - struct_span_err!(self.sess, data.span, E0214, "{}", msg) - .span_label(data.span, "only traits may use parentheses") - .emit(); - (hir::GenericArgs::none(), true) + let mut err = struct_span_err!(self.sess, data.span, E0214, "{}", msg); + err.span_label(data.span, "only `Fn` traits may use parentheses"); + if let Ok(snippet) = self.sess.source_map().span_to_snippet(data.span) { + // Do not suggest going from `Trait()` to `Trait<>` + if data.inputs.len() > 0 { + err.span_suggestion( + data.span, + "use angle brackets instead", + format!("<{}>", &snippet[1..snippet.len() - 1]), + Applicability::MaybeIncorrect, + ); + } + }; + err.emit(); + (self.lower_angle_bracketed_parameter_data( + &data.as_angle_bracketed_args(), + param_mode, + itctx).0, + false) } }, } @@ -1870,7 +2041,7 @@ impl<'a> LoweringContext<'a> { hir::PathSegment::new( segment.ident, - Some(id.node_id), + Some(id.hir_id), Some(def), generic_args, infer_types, @@ -1898,7 +2069,7 @@ impl<'a> LoweringContext<'a> { fn lower_parenthesized_parameter_data( &mut self, - data: &ParenthesisedArgs, + data: &ParenthesizedArgs, ) -> (hir::GenericArgs, bool) { // Switch to `PassThrough` mode for anonymous lifetimes: this // means that we permit things like `&Ref`, where `Ref` has @@ -1908,22 +2079,23 @@ impl<'a> LoweringContext<'a> { self.with_anonymous_lifetime_mode( AnonymousLifetimeMode::PassThrough, |this| { - let &ParenthesisedArgs { ref inputs, ref output, span } = data; + let &ParenthesizedArgs { ref inputs, ref output, span } = data; let inputs = inputs .iter() .map(|ty| this.lower_ty_direct(ty, ImplTraitContext::disallowed())) .collect(); let mk_tup = |this: &mut Self, tys, span| { - let LoweredNodeId { node_id, hir_id } = this.next_id(); - hir::Ty { node: hir::TyKind::Tup(tys), id: node_id, hir_id, span } + let LoweredNodeId { node_id: _, hir_id } = this.next_id(); + hir::Ty { node: hir::TyKind::Tup(tys), hir_id, span } }; + let LoweredNodeId { node_id: _, hir_id } = this.next_id(); ( hir::GenericArgs { args: hir_vec![GenericArg::Type(mk_tup(this, inputs, span))], bindings: hir_vec![ hir::TypeBinding { - id: this.next_id().node_id, + hir_id, ident: Ident::from_str(FN_OUTPUT_NAME), ty: output .as_ref() @@ -1940,9 +2112,9 @@ impl<'a> LoweringContext<'a> { ) } - fn lower_local(&mut self, l: &Local) -> (P, SmallVec<[hir::ItemId; 1]>) { - let LoweredNodeId { node_id, hir_id } = self.lower_node_id(l.id); - let mut ids = SmallVec::<[hir::ItemId; 1]>::new(); + fn lower_local(&mut self, l: &Local) -> (hir::Local, SmallVec<[NodeId; 1]>) { + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(l.id); + let mut ids = SmallVec::<[NodeId; 1]>::new(); if self.sess.features_untracked().impl_trait_in_bindings { if let Some(ref ty) = l.ty { let mut visitor = ImplTraitTypeIdVisitor { ids: &mut ids }; @@ -1950,8 +2122,7 @@ impl<'a> LoweringContext<'a> { } } let parent_def_id = DefId::local(self.current_hir_id_owner.last().unwrap().0); - (P(hir::Local { - id: node_id, + (hir::Local { hir_id, ty: l.ty .as_ref() @@ -1967,7 +2138,7 @@ impl<'a> LoweringContext<'a> { span: l.span, attrs: l.attrs.clone(), source: hir::LocalSource::Normal, - }), ids) + }, ids) } fn lower_mutability(&mut self, m: Mutability) -> hir::Mutability { @@ -1978,9 +2149,8 @@ impl<'a> LoweringContext<'a> { } fn lower_arg(&mut self, arg: &Arg) -> hir::Arg { - let LoweredNodeId { node_id, hir_id } = self.lower_node_id(arg.id); + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(arg.id); hir::Arg { - id: node_id, hir_id, pat: self.lower_pat(&arg.pat), } @@ -2015,23 +2185,42 @@ impl<'a> LoweringContext<'a> { impl_trait_return_allow: bool, make_ret_async: Option, ) -> P { - let inputs = decl.inputs - .iter() - .map(|arg| { - if let Some((_, ref mut ibty)) = in_band_ty_params { - self.lower_ty_direct(&arg.ty, ImplTraitContext::Universal(ibty)) - } else { - self.lower_ty_direct(&arg.ty, ImplTraitContext::disallowed()) - } - }) - .collect::>(); + let lt_mode = if make_ret_async.is_some() { + // In `async fn`, argument-position elided lifetimes + // must be transformed into fresh generic parameters so that + // they can be applied to the existential return type. + AnonymousLifetimeMode::CreateParameter + } else { + self.anonymous_lifetime_mode + }; + + // Remember how many lifetimes were already around so that we can + // only look at the lifetime parameters introduced by the arguments. + let lifetime_count_before_args = self.lifetimes_to_define.len(); + let inputs = self.with_anonymous_lifetime_mode(lt_mode, |this| { + decl.inputs + .iter() + .map(|arg| { + if let Some((_, ibty)) = &mut in_band_ty_params { + this.lower_ty_direct(&arg.ty, ImplTraitContext::Universal(ibty)) + } else { + this.lower_ty_direct(&arg.ty, ImplTraitContext::disallowed()) + } + }) + .collect::>() + }); let output = if let Some(ret_id) = make_ret_async { + // Calculate the `LtReplacement` to use for any return-position elided + // lifetimes based on the elided lifetime parameters introduced in the args. + let lt_replacement = get_elided_lt_replacement( + &self.lifetimes_to_define[lifetime_count_before_args..] + ); self.lower_async_fn_ret_ty( - &inputs, &decl.output, in_band_ty_params.expect("make_ret_async but no fn_def_id").0, ret_id, + lt_replacement, ) } else { match decl.output { @@ -2051,7 +2240,7 @@ impl<'a> LoweringContext<'a> { P(hir::FnDecl { inputs, output, - variadic: decl.variadic, + c_variadic: decl.c_variadic, implicit_self: decl.inputs.get(0).map_or( hir::ImplicitSelfKind::None, |arg| { @@ -2080,234 +2269,171 @@ impl<'a> LoweringContext<'a> { }) } - // Transform `-> T` into `-> impl Future` for `async fn` + // Transform `-> T` for `async fn` into -> ExistTy { .. } + // combined with the following definition of `ExistTy`: + // + // existential type ExistTy: Future; // - // fn_span: the span of the async function declaration. Used for error reporting. // inputs: lowered types of arguments to the function. Used to collect lifetimes. // output: unlowered output type (`T` in `-> T`) // fn_def_id: DefId of the parent function. Used to create child impl trait definition. + // exist_ty_node_id: NodeId of the existential type that should be created. + // elided_lt_replacement: replacement for elided lifetimes in the return type fn lower_async_fn_ret_ty( &mut self, - inputs: &[hir::Ty], output: &FunctionRetTy, fn_def_id: DefId, - return_impl_trait_id: NodeId, + exist_ty_node_id: NodeId, + elided_lt_replacement: LtReplacement, ) -> hir::FunctionRetTy { - // Get lifetimes used in the input arguments to the function. Our output type must also - // have the same lifetime. - // FIXME(cramertj): multiple different lifetimes are not allowed because - // `impl Trait + 'a + 'b` doesn't allow for capture `'a` and `'b` where neither is a subset - // of the other. We really want some new lifetime that is a subset of all input lifetimes, - // but that doesn't exist at the moment. - - struct AsyncFnLifetimeCollector<'r, 'a: 'r> { - context: &'r mut LoweringContext<'a>, - // Lifetimes bound by HRTB. - currently_bound_lifetimes: Vec, - // Whether to count elided lifetimes. - // Disabled inside of `Fn` or `fn` syntax. - collect_elided_lifetimes: bool, - // The lifetime found. - // Multiple different or elided lifetimes cannot appear in async fn for now. - output_lifetime: Option<(hir::LifetimeName, Span)>, - } - - impl<'r, 'a: 'r, 'v> hir::intravisit::Visitor<'v> for AsyncFnLifetimeCollector<'r, 'a> { - fn nested_visit_map<'this>( - &'this mut self, - ) -> hir::intravisit::NestedVisitorMap<'this, 'v> { - hir::intravisit::NestedVisitorMap::None - } + let span = output.span(); - fn visit_generic_args(&mut self, span: Span, parameters: &'v hir::GenericArgs) { - // Don't collect elided lifetimes used inside of `Fn()` syntax. - if parameters.parenthesized { - let old_collect_elided_lifetimes = self.collect_elided_lifetimes; - self.collect_elided_lifetimes = false; - hir::intravisit::walk_generic_args(self, span, parameters); - self.collect_elided_lifetimes = old_collect_elided_lifetimes; - } else { - hir::intravisit::walk_generic_args(self, span, parameters); - } - } - - fn visit_ty(&mut self, t: &'v hir::Ty) { - // Don't collect elided lifetimes used inside of `fn()` syntax. - if let &hir::TyKind::BareFn(_) = &t.node { - let old_collect_elided_lifetimes = self.collect_elided_lifetimes; - self.collect_elided_lifetimes = false; - - // Record the "stack height" of `for<'a>` lifetime bindings - // to be able to later fully undo their introduction. - let old_len = self.currently_bound_lifetimes.len(); - hir::intravisit::walk_ty(self, t); - self.currently_bound_lifetimes.truncate(old_len); - - self.collect_elided_lifetimes = old_collect_elided_lifetimes; - } else { - hir::intravisit::walk_ty(self, t); - } - } + let exist_ty_span = self.mark_span_with_reason( + CompilerDesugaringKind::Async, + span, + None, + ); - fn visit_poly_trait_ref( - &mut self, - trait_ref: &'v hir::PolyTraitRef, - modifier: hir::TraitBoundModifier, - ) { - // Record the "stack height" of `for<'a>` lifetime bindings - // to be able to later fully undo their introduction. - let old_len = self.currently_bound_lifetimes.len(); - hir::intravisit::walk_poly_trait_ref(self, trait_ref, modifier); - self.currently_bound_lifetimes.truncate(old_len); - } + let exist_ty_def_index = self + .resolver + .definitions() + .opt_def_index(exist_ty_node_id) + .unwrap(); - fn visit_generic_param(&mut self, param: &'v hir::GenericParam) { - // Record the introduction of 'a in `for<'a> ...` - if let hir::GenericParamKind::Lifetime { .. } = param.kind { - // Introduce lifetimes one at a time so that we can handle - // cases like `fn foo<'d>() -> impl for<'a, 'b: 'a, 'c: 'b + 'd>` - let lt_name = hir::LifetimeName::Param(param.name); - self.currently_bound_lifetimes.push(lt_name); - } + self.allocate_hir_id_counter(exist_ty_node_id); - hir::intravisit::walk_generic_param(self, param); - } + let (exist_ty_node_id, lifetime_params) = self.with_hir_id_owner(exist_ty_node_id, |this| { + let future_bound = this.with_anonymous_lifetime_mode( + AnonymousLifetimeMode::Replace(elided_lt_replacement), + |this| this.lower_async_fn_output_type_to_future_bound( + output, + fn_def_id, + span, + ), + ); - fn visit_lifetime(&mut self, lifetime: &'v hir::Lifetime) { - let name = match lifetime.name { - hir::LifetimeName::Implicit | hir::LifetimeName::Underscore => { - if self.collect_elided_lifetimes { - // Use `'_` for both implicit and underscore lifetimes in - // `abstract type Foo<'_>: SomeTrait<'_>;` - hir::LifetimeName::Underscore - } else { - return; - } - } - hir::LifetimeName::Param(_) => lifetime.name, - hir::LifetimeName::Error | hir::LifetimeName::Static => return, - }; + // Calculate all the lifetimes that should be captured + // by the existential type. This should include all in-scope + // lifetime parameters, including those defined in-band. + // + // Note: this must be done after lowering the output type, + // as the output type may introduce new in-band lifetimes. + let lifetime_params: Vec<(Span, ParamName)> = + this.in_scope_lifetimes + .iter().cloned() + .map(|ident| (ident.span, ParamName::Plain(ident))) + .chain(this.lifetimes_to_define.iter().cloned()) + .collect(); - if !self.currently_bound_lifetimes.contains(&name) { - if let Some((current_lt_name, current_lt_span)) = self.output_lifetime { - // We don't currently have a reliable way to desugar `async fn` with - // multiple potentially unrelated input lifetimes into - // `-> impl Trait + 'lt`, so we report an error in this case. - if current_lt_name != name { - struct_span_err!( - self.context.sess, - MultiSpan::from_spans(vec![current_lt_span, lifetime.span]), - E0709, - "multiple different lifetimes used in arguments of `async fn`", - ) - .span_label(current_lt_span, "first lifetime here") - .span_label(lifetime.span, "different lifetime here") - .help("`async fn` can only accept borrowed values \ - with identical lifetimes") - .emit() - } else if current_lt_name.is_elided() && name.is_elided() { - struct_span_err!( - self.context.sess, - MultiSpan::from_spans(vec![current_lt_span, lifetime.span]), - E0707, - "multiple elided lifetimes used in arguments of `async fn`", - ) - .span_label(current_lt_span, "first lifetime here") - .span_label(lifetime.span, "different lifetime here") - .help("consider giving these arguments named lifetimes") - .emit() - } - } else { - self.output_lifetime = Some((name, lifetime.span)); - } - } - } - } + let generic_params = + lifetime_params + .iter().cloned() + .map(|(span, hir_name)| { + this.lifetime_to_generic_param(span, hir_name, exist_ty_def_index) + }) + .collect(); - let bound_lifetime = { - let mut lifetime_collector = AsyncFnLifetimeCollector { - context: self, - currently_bound_lifetimes: Vec::new(), - collect_elided_lifetimes: true, - output_lifetime: None, + let LoweredNodeId { node_id: _, hir_id } = this.next_id(); + let exist_ty_item = hir::ExistTy { + generics: hir::Generics { + params: generic_params, + where_clause: hir::WhereClause { + hir_id, + predicates: hir_vec![], + }, + span, + }, + bounds: hir_vec![future_bound], + impl_trait_fn: Some(fn_def_id), + origin: hir::ExistTyOrigin::AsyncFn, }; - for arg in inputs { - hir::intravisit::walk_ty(&mut lifetime_collector, arg); - } - lifetime_collector.output_lifetime - }; + trace!("exist ty from async fn def index: {:#?}", exist_ty_def_index); + let exist_ty_id = this.generate_existential_type( + exist_ty_node_id, + exist_ty_item, + span, + exist_ty_span, + ); - let span = match output { - FunctionRetTy::Ty(ty) => ty.span, - FunctionRetTy::Default(span) => *span, - }; + (exist_ty_id.node_id, lifetime_params) + }); - let impl_trait_ty = self.lower_existential_impl_trait( - span, Some(fn_def_id), return_impl_trait_id, |this| { - let output_ty = match output { - FunctionRetTy::Ty(ty) => { - this.lower_ty(ty, ImplTraitContext::Existential(Some(fn_def_id))) - } - FunctionRetTy::Default(span) => { - let LoweredNodeId { node_id, hir_id } = this.next_id(); - P(hir::Ty { - id: node_id, - hir_id: hir_id, - node: hir::TyKind::Tup(hir_vec![]), - span: *span, + let generic_args = + lifetime_params + .iter().cloned() + .map(|(span, hir_name)| { + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); + GenericArg::Lifetime(hir::Lifetime { + hir_id, + span, + name: hir::LifetimeName::Param(hir_name), }) - } - }; - - // "" - let future_params = P(hir::GenericArgs { - args: hir_vec![], - bindings: hir_vec![hir::TypeBinding { - ident: Ident::from_str(FN_OUTPUT_NAME), - ty: output_ty, - id: this.next_id().node_id, - span, - }], - parenthesized: false, - }); + }) + .collect(); - let future_path = - this.std_path(span, &["future", "Future"], Some(future_params), false); + let exist_ty_hir_id = self.lower_node_id(exist_ty_node_id).hir_id; + let exist_ty_ref = hir::TyKind::Def(hir::ItemId { id: exist_ty_hir_id }, generic_args); - let LoweredNodeId { node_id, hir_id } = this.next_id(); - let mut bounds = vec![ - hir::GenericBound::Trait( - hir::PolyTraitRef { - trait_ref: hir::TraitRef { - path: future_path, - ref_id: node_id, - hir_ref_id: hir_id, - }, - bound_generic_params: hir_vec![], - span, - }, - hir::TraitBoundModifier::None - ), - ]; + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); + hir::FunctionRetTy::Return(P(hir::Ty { + node: exist_ty_ref, + span, + hir_id, + })) + } - if let Some((name, span)) = bound_lifetime { - bounds.push(hir::GenericBound::Outlives( - hir::Lifetime { id: this.next_id().node_id, name, span })); + /// Turns `-> T` into `Future` + fn lower_async_fn_output_type_to_future_bound( + &mut self, + output: &FunctionRetTy, + fn_def_id: DefId, + span: Span, + ) -> hir::GenericBound { + // Compute the `T` in `Future` from the return type. + let output_ty = match output { + FunctionRetTy::Ty(ty) => { + self.lower_ty(ty, ImplTraitContext::Existential(Some(fn_def_id))) + } + FunctionRetTy::Default(ret_ty_span) => { + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); + P(hir::Ty { + hir_id, + node: hir::TyKind::Tup(hir_vec![]), + span: *ret_ty_span, + }) } + }; - hir::HirVec::from(bounds) + // "" + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); + let future_params = P(hir::GenericArgs { + args: hir_vec![], + bindings: hir_vec![hir::TypeBinding { + ident: Ident::from_str(FN_OUTPUT_NAME), + ty: output_ty, + hir_id, + span, + }], + parenthesized: false, }); - let LoweredNodeId { node_id, hir_id } = self.next_id(); - let impl_trait_ty = P(hir::Ty { - id: node_id, - node: impl_trait_ty, - span, - hir_id, - }); + // ::std::future::Future + let future_path = + self.std_path(span, &["future", "Future"], Some(future_params), false); - hir::FunctionRetTy::Return(impl_trait_ty) + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); + hir::GenericBound::Trait( + hir::PolyTraitRef { + trait_ref: hir::TraitRef { + path: future_path, + hir_ref_id: hir_id, + }, + bound_generic_params: hir_vec![], + span, + }, + hir::TraitBoundModifier::None, + ) } fn lower_param_bound( @@ -2345,6 +2471,11 @@ impl<'a> LoweringContext<'a> { } AnonymousLifetimeMode::ReportError => self.new_error_lifetime(Some(l.id), span), + + AnonymousLifetimeMode::Replace(replacement) => { + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(l.id); + self.replace_elided_lifetime(hir_id, span, replacement) + } }, ident => { self.maybe_collect_in_band_lifetime(ident); @@ -2360,13 +2491,48 @@ impl<'a> LoweringContext<'a> { span: Span, name: hir::LifetimeName, ) -> hir::Lifetime { + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(id); + hir::Lifetime { - id: self.lower_node_id(id).node_id, + hir_id, span, name: name, } } + /// Replace a return-position elided lifetime with the elided lifetime + /// from the arguments. + fn replace_elided_lifetime( + &mut self, + hir_id: hir::HirId, + span: Span, + replacement: LtReplacement, + ) -> hir::Lifetime { + let multiple_or_none = match replacement { + LtReplacement::Some(name) => { + return hir::Lifetime { + hir_id, + span, + name: hir::LifetimeName::Param(name), + }; + } + LtReplacement::MultipleLifetimes => "multiple", + LtReplacement::NoLifetimes => "none", + }; + + let mut err = crate::middle::resolve_lifetime::report_missing_lifetime_specifiers( + self.sess, + span, + 1, + ); + err.note(&format!( + "return-position elided lifetimes require exactly one \ + input-position elided lifetime, found {}.", multiple_or_none)); + err.emit(); + + hir::Lifetime { hir_id, span, name: hir::LifetimeName::Error } + } + fn lower_generic_params( &mut self, params: &[GenericParam], @@ -2388,7 +2554,7 @@ impl<'a> LoweringContext<'a> { |this| this.lower_param_bounds(¶m.bounds, itctx.reborrow()), ); - match param.kind { + let (name, kind) = match param.kind { GenericParamKind::Lifetime => { let was_collecting_in_band = self.is_collecting_in_band_lifetimes; self.is_collecting_in_band_lifetimes = false; @@ -2404,21 +2570,14 @@ impl<'a> LoweringContext<'a> { | hir::LifetimeName::Static => hir::ParamName::Plain(lt.name.ident()), hir::LifetimeName::Error => ParamName::Error, }; - let param = hir::GenericParam { - id: lt.id, - name: param_name, - span: lt.span, - pure_wrt_drop: attr::contains_name(¶m.attrs, "may_dangle"), - attrs: self.lower_attrs(¶m.attrs), - bounds, - kind: hir::GenericParamKind::Lifetime { - kind: hir::LifetimeParamKind::Explicit, - } + + let kind = hir::GenericParamKind::Lifetime { + kind: hir::LifetimeParamKind::Explicit }; self.is_collecting_in_band_lifetimes = was_collecting_in_band; - param + (param_name, kind) } GenericParamKind::Type { ref default, .. } => { // Don't expose `Self` (recovered "keyword used as ident" parse error). @@ -2438,24 +2597,35 @@ impl<'a> LoweringContext<'a> { .collect(); } - hir::GenericParam { - id: self.lower_node_id(param.id).node_id, - name: hir::ParamName::Plain(ident), - pure_wrt_drop: attr::contains_name(¶m.attrs, "may_dangle"), - attrs: self.lower_attrs(¶m.attrs), - bounds, - span: ident.span, - kind: hir::GenericParamKind::Type { - default: default.as_ref().map(|x| { - self.lower_ty(x, ImplTraitContext::disallowed()) - }), - synthetic: param.attrs.iter() - .filter(|attr| attr.check_name("rustc_synthetic")) - .map(|_| hir::SyntheticTyParamKind::ImplTrait) - .next(), - } - } + let kind = hir::GenericParamKind::Type { + default: default.as_ref().map(|x| { + self.lower_ty(x, ImplTraitContext::disallowed()) + }), + synthetic: param.attrs.iter() + .filter(|attr| attr.check_name("rustc_synthetic")) + .map(|_| hir::SyntheticTyParamKind::ImplTrait) + .next(), + }; + + (hir::ParamName::Plain(ident), kind) + } + GenericParamKind::Const { ref ty } => { + (hir::ParamName::Plain(param.ident), hir::GenericParamKind::Const { + ty: self.lower_ty(&ty, ImplTraitContext::disallowed()), + }) } + }; + + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(param.id); + + hir::GenericParam { + hir_id, + name, + span: param.ident.span, + pure_wrt_drop: attr::contains_name(¶m.attrs, "may_dangle"), + attrs: self.lower_attrs(¶m.attrs), + bounds, + kind, } } @@ -2529,8 +2699,10 @@ impl<'a> LoweringContext<'a> { self.with_anonymous_lifetime_mode( AnonymousLifetimeMode::ReportError, |this| { + let LoweredNodeId { node_id: _, hir_id } = this.lower_node_id(wc.id); + hir::WhereClause { - id: this.lower_node_id(wc.id).node_id, + hir_id, predicates: wc.predicates .iter() .map(|predicate| this.lower_where_predicate(predicate)) @@ -2589,34 +2761,41 @@ impl<'a> LoweringContext<'a> { ref lhs_ty, ref rhs_ty, span, - }) => hir::WherePredicate::EqPredicate(hir::WhereEqPredicate { - id: self.lower_node_id(id).node_id, - lhs_ty: self.lower_ty(lhs_ty, ImplTraitContext::disallowed()), - rhs_ty: self.lower_ty(rhs_ty, ImplTraitContext::disallowed()), - span, - }), + }) => { + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(id); + + hir::WherePredicate::EqPredicate(hir::WhereEqPredicate { + hir_id, + lhs_ty: self.lower_ty(lhs_ty, ImplTraitContext::disallowed()), + rhs_ty: self.lower_ty(rhs_ty, ImplTraitContext::disallowed()), + span, + }) + }, } } fn lower_variant_data(&mut self, vdata: &VariantData) -> hir::VariantData { match *vdata { - VariantData::Struct(ref fields, id) => hir::VariantData::Struct( - fields - .iter() - .enumerate() - .map(|f| self.lower_struct_field(f)) - .collect(), - self.lower_node_id(id).node_id, - ), - VariantData::Tuple(ref fields, id) => hir::VariantData::Tuple( - fields - .iter() - .enumerate() - .map(|f| self.lower_struct_field(f)) - .collect(), - self.lower_node_id(id).node_id, + VariantData::Struct(ref fields, recovered) => hir::VariantData::Struct( + fields.iter().enumerate().map(|f| self.lower_struct_field(f)).collect(), + recovered, ), - VariantData::Unit(id) => hir::VariantData::Unit(self.lower_node_id(id).node_id), + VariantData::Tuple(ref fields, id) => { + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(id); + + hir::VariantData::Tuple( + fields + .iter() + .enumerate() + .map(|f| self.lower_struct_field(f)) + .collect(), + hir_id, + ) + }, + VariantData::Unit(id) => { + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(id); + hir::VariantData::Unit(hir_id) + }, } } @@ -2625,10 +2804,9 @@ impl<'a> LoweringContext<'a> { hir::QPath::Resolved(None, path) => path.and_then(|path| path), qpath => bug!("lower_trait_ref: unexpected QPath `{:?}`", qpath), }; - let LoweredNodeId { node_id, hir_id } = self.lower_node_id(p.ref_id); + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(p.ref_id); hir::TraitRef { path, - ref_id: node_id, hir_ref_id: hir_id, } } @@ -2656,9 +2834,11 @@ impl<'a> LoweringContext<'a> { } fn lower_struct_field(&mut self, (index, f): (usize, &StructField)) -> hir::StructField { + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(f.id); + hir::StructField { span: f.span, - id: self.lower_node_id(f.id).node_id, + hir_id, ident: match f.ident { Some(ident) => ident, // FIXME(jseyfried): positional field hygiene @@ -2671,8 +2851,10 @@ impl<'a> LoweringContext<'a> { } fn lower_field(&mut self, f: &Field) -> hir::Field { + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); + hir::Field { - id: self.next_id().node_id, + hir_id, ident: f.ident, expr: P(self.lower_expr(&f.expr)), span: f.span, @@ -2709,17 +2891,15 @@ impl<'a> LoweringContext<'a> { } } - let LoweredNodeId { node_id, hir_id } = self.lower_node_id(b.id); + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(b.id); P(hir::Block { - id: node_id, hir_id, stmts: stmts.into(), expr, rules: self.lower_block_check_mode(&b.rules), span: b.span, targeted_by_break, - recovered: b.recovered, }) } @@ -2748,7 +2928,7 @@ impl<'a> LoweringContext<'a> { fn lower_item_kind( &mut self, id: NodeId, - name: &mut Name, + ident: &mut Ident, attrs: &hir::HirVec, vis: &mut hir::Visibility, i: &ItemKind, @@ -2762,7 +2942,7 @@ impl<'a> LoweringContext<'a> { span: use_tree.span, }; - self.lower_use_tree(use_tree, &prefix, id, vis, name, attrs) + self.lower_use_tree(use_tree, &prefix, id, vis, ident, attrs) } ItemKind::Static(ref t, m, ref e) => { let value = self.lower_body(None, |this| this.lower_expr(e)); @@ -2800,7 +2980,7 @@ impl<'a> LoweringContext<'a> { // `impl Future` here because lower_body // only cares about the input argument patterns in the function // declaration (decl), not the return types. - let body_id = this.lower_async_body(decl, header.asyncness, body); + let body_id = this.lower_async_body(decl, header.asyncness.node, body); let (generics, fn_decl) = this.add_in_band_defs( generics, @@ -2810,7 +2990,7 @@ impl<'a> LoweringContext<'a> { decl, Some((fn_def_id, idty)), true, - header.asyncness.opt_return_id() + header.asyncness.node.opt_return_id() ), ); @@ -2833,6 +3013,7 @@ impl<'a> LoweringContext<'a> { generics: self.lower_generics(generics, ImplTraitContext::disallowed()), bounds: self.lower_param_bounds(b, ImplTraitContext::disallowed()), impl_trait_fn: None, + origin: hir::ExistTyOrigin::ExistentialType, }), ItemKind::Enum(ref enum_definition, ref generics) => hir::ItemKind::Enum( hir::EnumDef { @@ -2882,6 +3063,7 @@ impl<'a> LoweringContext<'a> { // method, it will not be considered an in-band // lifetime to be added, but rather a reference to a // parent lifetime. + let lowered_trait_impl_id = self.lower_node_id(id).hir_id; let (generics, (trait_ref, lowered_ty)) = self.add_in_band_defs( ast_generics, def_id, @@ -2893,7 +3075,8 @@ impl<'a> LoweringContext<'a> { if let Some(ref trait_ref) = trait_ref { if let Def::Trait(def_id) = trait_ref.path.def { - this.trait_impls.entry(def_id).or_default().push(id); + this.trait_impls.entry(def_id).or_default().push( + lowered_trait_impl_id); } } @@ -2954,7 +3137,7 @@ impl<'a> LoweringContext<'a> { prefix: &Path, id: NodeId, vis: &mut hir::Visibility, - name: &mut Name, + ident: &mut Ident, attrs: &hir::HirVec, ) -> hir::ItemKind { debug!("lower_use_tree(tree={:?})", tree); @@ -2970,28 +3153,27 @@ impl<'a> LoweringContext<'a> { match tree.kind { UseTreeKind::Simple(rename, id1, id2) => { - *name = tree.ident().name; + *ident = tree.ident(); - // First apply the prefix to the path + // First, apply the prefix to the path. let mut path = Path { segments, span: path.span, }; - // Correctly resolve `self` imports + // Correctly resolve `self` imports. if path.segments.len() > 1 && path.segments.last().unwrap().ident.name == keywords::SelfLower.name() { let _ = path.segments.pop(); if rename.is_none() { - *name = path.segments.last().unwrap().ident.name; + *ident = path.segments.last().unwrap().ident; } } - let parent_def_index = self.current_hir_id_owner.last().unwrap().0; let mut defs = self.expect_full_def_from_use(id); - // we want to return *something* from this function, so hang onto the first item - // for later + // We want to return *something* from this function, so hold onto the first item + // for later. let ret_def = defs.next().unwrap_or(Def::Err); // Here, we are looping over namespaces, if they exist for the definition @@ -3001,20 +3183,12 @@ impl<'a> LoweringContext<'a> { // two imports. for (def, &new_node_id) in defs.zip([id1, id2].iter()) { let vis = vis.clone(); - let name = name.clone(); + let ident = ident.clone(); let mut path = path.clone(); for seg in &mut path.segments { seg.id = self.sess.next_node_id(); } let span = path.span; - self.resolver.definitions().create_def_with_parent( - parent_def_index, - new_node_id, - DefPathData::Misc, - DefIndexAddressSpace::High, - Mark::root(), - span); - self.allocate_hir_id_counter(new_node_id, &path); self.with_hir_id_owner(new_node_id, |this| { let new_id = this.lower_node_id(new_node_id); @@ -3025,24 +3199,21 @@ impl<'a> LoweringContext<'a> { hir::VisibilityKind::Public => hir::VisibilityKind::Public, hir::VisibilityKind::Crate(sugar) => hir::VisibilityKind::Crate(sugar), hir::VisibilityKind::Inherited => hir::VisibilityKind::Inherited, - hir::VisibilityKind::Restricted { ref path, id: _, hir_id: _ } => { + hir::VisibilityKind::Restricted { ref path, hir_id: _ } => { let id = this.next_id(); let path = this.renumber_segment_ids(path); hir::VisibilityKind::Restricted { path, - id: id.node_id, hir_id: id.hir_id, } } }; let vis = respan(vis.span, vis_kind); - this.items.insert( - new_id.node_id, + this.insert_item( hir::Item { - id: new_id.node_id, hir_id: new_id.hir_id, - name: name, + ident, attrs: attrs.clone(), node: item, vis, @@ -3068,8 +3239,8 @@ impl<'a> LoweringContext<'a> { hir::ItemKind::Use(path, hir::UseKind::Glob) } UseTreeKind::Nested(ref trees) => { - // Nested imports are desugared into simple - // imports. So if we start with + // Nested imports are desugared into simple imports. + // So, if we start with // // ``` // pub(x) use foo::{a, b}; @@ -3090,27 +3261,25 @@ impl<'a> LoweringContext<'a> { // `self.items`. However, the structure of this // function also requires us to return one item, and // for that we return the `{}` import (called the - // "`ListStem`"). + // `ListStem`). let prefix = Path { segments, span: prefix.span.to(path.span), }; - // Add all the nested PathListItems to the HIR. + // Add all the nested `PathListItem`s to the HIR. for &(ref use_tree, id) in trees { - self.allocate_hir_id_counter(id, &use_tree); - let LoweredNodeId { node_id: new_id, hir_id: new_hir_id, } = self.lower_node_id(id); let mut vis = vis.clone(); - let mut name = name.clone(); + let mut ident = ident.clone(); let mut prefix = prefix.clone(); - // Give the segments new ids since they are being cloned. + // Give the segments new node-ids since they are being cloned. for seg in &mut prefix.segments { seg.id = self.sess.next_node_id(); } @@ -3125,31 +3294,28 @@ impl<'a> LoweringContext<'a> { &prefix, new_id, &mut vis, - &mut name, + &mut ident, attrs); let vis_kind = match vis.node { hir::VisibilityKind::Public => hir::VisibilityKind::Public, hir::VisibilityKind::Crate(sugar) => hir::VisibilityKind::Crate(sugar), hir::VisibilityKind::Inherited => hir::VisibilityKind::Inherited, - hir::VisibilityKind::Restricted { ref path, id: _, hir_id: _ } => { + hir::VisibilityKind::Restricted { ref path, hir_id: _ } => { let id = this.next_id(); let path = this.renumber_segment_ids(path); hir::VisibilityKind::Restricted { path: path, - id: id.node_id, hir_id: id.hir_id, } } }; let vis = respan(vis.span, vis_kind); - this.items.insert( - new_id, + this.insert_item( hir::Item { - id: new_id, hir_id: new_hir_id, - name, + ident, attrs: attrs.clone(), node: item, vis, @@ -3176,7 +3342,7 @@ impl<'a> LoweringContext<'a> { *vis = respan(prefix.span.shrink_to_lo(), hir::VisibilityKind::Inherited); } hir::VisibilityKind::Restricted { .. } => { - // do nothing here, as described in the comment on the match + // Do nothing here, as described in the comment on the match. } } @@ -3189,13 +3355,13 @@ impl<'a> LoweringContext<'a> { /// Paths like the visibility path in `pub(super) use foo::{bar, baz}` are repeated /// many times in the HIR tree; for each occurrence, we need to assign distinct - /// node-ids. (See e.g., #56128.) + /// `NodeId`s. (See, e.g., #56128.) fn renumber_segment_ids(&mut self, path: &P) -> P { debug!("renumber_segment_ids(path = {:?})", path); let mut path = path.clone(); for seg in path.segments.iter_mut() { - if seg.id.is_some() { - seg.id = Some(self.next_id().node_id); + if seg.hir_id.is_some() { + seg.hir_id = Some(self.next_id().hir_id); } } path @@ -3253,7 +3419,6 @@ impl<'a> LoweringContext<'a> { }; hir::TraitItem { - id: node_id, hir_id, ident: i.ident, attrs: self.lower_attrs(&i.attrs), @@ -3280,7 +3445,7 @@ impl<'a> LoweringContext<'a> { TraitItemKind::Macro(..) => unimplemented!(), }; hir::TraitItemRef { - id: hir::TraitItemId { node_id: i.id }, + id: hir::TraitItemId { hir_id: self.lower_node_id(i.id).hir_id }, ident: i.ident, span: i.span, defaultness: self.lower_defaultness(Defaultness::Default, has_default), @@ -3304,14 +3469,14 @@ impl<'a> LoweringContext<'a> { ) } ImplItemKind::Method(ref sig, ref body) => { - let body_id = self.lower_async_body(&sig.decl, sig.header.asyncness, body); + let body_id = self.lower_async_body(&sig.decl, sig.header.asyncness.node, body); let impl_trait_return_allow = !self.is_in_trait_impl; let (generics, sig) = self.lower_method_sig( &i.generics, sig, impl_item_def_id, impl_trait_return_allow, - sig.header.asyncness.opt_return_id(), + sig.header.asyncness.node.opt_return_id(), ); (generics, hir::ImplItemKind::Method(sig, body_id)) } @@ -3329,7 +3494,6 @@ impl<'a> LoweringContext<'a> { }; hir::ImplItem { - id: node_id, hir_id, ident: i.ident, attrs: self.lower_attrs(&i.attrs), @@ -3345,7 +3509,7 @@ impl<'a> LoweringContext<'a> { fn lower_impl_item_ref(&mut self, i: &ImplItem) -> hir::ImplItemRef { hir::ImplItemRef { - id: hir::ImplItemId { node_id: i.id }, + id: hir::ImplItemId { hir_id: self.lower_node_id(i.id).hir_id }, ident: i.ident, span: i.span, vis: self.lower_visibility(&i.vis, Some(i.id)), @@ -3372,17 +3536,17 @@ impl<'a> LoweringContext<'a> { } fn lower_item_id(&mut self, i: &Item) -> SmallVec<[hir::ItemId; 1]> { - match i.node { + let node_ids = match i.node { ItemKind::Use(ref use_tree) => { - let mut vec = smallvec![hir::ItemId { id: i.id }]; + let mut vec = smallvec![i.id]; self.lower_item_id_use_tree(use_tree, i.id, &mut vec); vec } ItemKind::MacroDef(..) => SmallVec::new(), ItemKind::Fn(..) | - ItemKind::Impl(.., None, _, _) => smallvec![hir::ItemId { id: i.id }], + ItemKind::Impl(.., None, _, _) => smallvec![i.id], ItemKind::Static(ref ty, ..) => { - let mut ids = smallvec![hir::ItemId { id: i.id }]; + let mut ids = smallvec![i.id]; if self.sess.features_untracked().impl_trait_in_bindings { let mut visitor = ImplTraitTypeIdVisitor { ids: &mut ids }; visitor.visit_ty(ty); @@ -3390,25 +3554,29 @@ impl<'a> LoweringContext<'a> { ids }, ItemKind::Const(ref ty, ..) => { - let mut ids = smallvec![hir::ItemId { id: i.id }]; + let mut ids = smallvec![i.id]; if self.sess.features_untracked().impl_trait_in_bindings { let mut visitor = ImplTraitTypeIdVisitor { ids: &mut ids }; visitor.visit_ty(ty); } ids }, - _ => smallvec![hir::ItemId { id: i.id }], - } + _ => smallvec![i.id], + }; + + node_ids.into_iter().map(|node_id| hir::ItemId { + id: self.allocate_hir_id_counter(node_id).hir_id + }).collect() } fn lower_item_id_use_tree(&mut self, tree: &UseTree, base_id: NodeId, - vec: &mut SmallVec<[hir::ItemId; 1]>) + vec: &mut SmallVec<[NodeId; 1]>) { match tree.kind { UseTreeKind::Nested(ref nested_vec) => for &(ref nested, id) in nested_vec { - vec.push(hir::ItemId { id }); + vec.push(id); self.lower_item_id_use_tree(nested, id, vec); }, UseTreeKind::Glob => {} @@ -3417,25 +3585,26 @@ impl<'a> LoweringContext<'a> { .skip(1) .zip([id1, id2].iter()) { - vec.push(hir::ItemId { id }); + vec.push(id); } }, } } pub fn lower_item(&mut self, i: &Item) -> Option { - let mut name = i.ident.name; + let mut ident = i.ident; let mut vis = self.lower_visibility(&i.vis, None); let attrs = self.lower_attrs(&i.attrs); if let ItemKind::MacroDef(ref def) = i.node { if !def.legacy || attr::contains_name(&i.attrs, "macro_export") || attr::contains_name(&i.attrs, "rustc_doc_only_macro") { let body = self.lower_token_stream(def.stream()); + let hir_id = self.lower_node_id(i.id).hir_id; self.exported_macros.push(hir::MacroDef { - name, + name: ident.name, vis, attrs, - id: i.id, + hir_id, span: i.span, body, legacy: def.legacy, @@ -3444,14 +3613,13 @@ impl<'a> LoweringContext<'a> { return None; } - let node = self.lower_item_kind(i.id, &mut name, &attrs, &mut vis, &i.node); + let node = self.lower_item_kind(i.id, &mut ident, &attrs, &mut vis, &i.node); - let LoweredNodeId { node_id, hir_id } = self.lower_node_id(i.id); + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(i.id); Some(hir::Item { - id: node_id, hir_id, - name, + ident, attrs, node, vis, @@ -3460,11 +3628,11 @@ impl<'a> LoweringContext<'a> { } fn lower_foreign_item(&mut self, i: &ForeignItem) -> hir::ForeignItem { - let node_id = self.lower_node_id(i.id).node_id; + let LoweredNodeId { node_id, hir_id } = self.lower_node_id(i.id); let def_id = self.resolver.definitions().local_def_id(node_id); hir::ForeignItem { - id: node_id, - name: i.ident.name, + hir_id, + ident: i.ident, attrs: self.lower_attrs(&i.attrs), node: match i.node { ForeignItemKind::Fn(ref fdec, ref generics) => { @@ -3528,7 +3696,7 @@ impl<'a> LoweringContext<'a> { fn lower_fn_header(&mut self, h: FnHeader) -> hir::FnHeader { hir::FnHeader { unsafety: self.lower_unsafety(h.unsafety), - asyncness: self.lower_asyncness(h.asyncness), + asyncness: self.lower_asyncness(h.asyncness.node), constness: self.lower_constness(h.constness), abi: h.abi, } @@ -3600,9 +3768,10 @@ impl<'a> LoweringContext<'a> { Some(Def::Local(id)) => id, _ => p.id, }; + hir::PatKind::Binding( self.lower_binding_mode(binding_mode), - canonical_id, + self.lower_node_id(canonical_id).hir_id, ident, sub.as_ref().map(|x| self.lower_pat(x)), ) @@ -3653,14 +3822,18 @@ impl<'a> LoweringContext<'a> { let fs = fields .iter() - .map(|f| Spanned { - span: f.span, - node: hir::FieldPat { - id: self.next_id().node_id, - ident: f.node.ident, - pat: self.lower_pat(&f.node.pat), - is_shorthand: f.node.is_shorthand, - }, + .map(|f| { + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); + + Spanned { + span: f.span, + node: hir::FieldPat { + hir_id, + ident: f.node.ident, + pat: self.lower_pat(&f.node.pat), + is_shorthand: f.node.is_shorthand, + }, + } }) .collect(); hir::PatKind::Struct(qpath, fs, etc) @@ -3686,9 +3859,8 @@ impl<'a> LoweringContext<'a> { PatKind::Mac(_) => panic!("Shouldn't exist here"), }; - let LoweredNodeId { node_id, hir_id } = self.lower_node_id(p.id); + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(p.id); P(hir::Pat { - id: node_id, hir_id, node, span: p.span, @@ -3704,9 +3876,8 @@ impl<'a> LoweringContext<'a> { fn lower_anon_const(&mut self, c: &AnonConst) -> hir::AnonConst { self.with_new_scopes(|this| { - let LoweredNodeId { node_id, hir_id } = this.lower_node_id(c.id); + let LoweredNodeId { node_id: _, hir_id } = this.lower_node_id(c.id); hir::AnonConst { - id: node_id, hir_id, body: this.lower_body(None, |this| this.lower_expr(&c.value)), } @@ -3736,7 +3907,7 @@ impl<'a> LoweringContext<'a> { hir::ExprKind::Call(f, args.iter().map(|x| self.lower_expr(x)).collect()) } ExprKind::MethodCall(ref seg, ref args) => { - let hir_seg = self.lower_path_segment( + let hir_seg = P(self.lower_path_segment( e.span, seg, ParamMode::Optional, @@ -3744,7 +3915,7 @@ impl<'a> LoweringContext<'a> { ParenthesizedGenericArgs::Err, ImplTraitContext::disallowed(), None, - ); + )); let args = args.iter().map(|x| self.lower_expr(x)).collect(); hir::ExprKind::MethodCall(hir_seg, seg.ident.span, args) } @@ -3759,7 +3930,7 @@ impl<'a> LoweringContext<'a> { let ohs = P(self.lower_expr(ohs)); hir::ExprKind::Unary(op, ohs) } - ExprKind::Lit(ref l) => hir::ExprKind::Lit(P((*l).clone())), + ExprKind::Lit(ref l) => hir::ExprKind::Lit((*l).clone()), ExprKind::Cast(ref expr, ref ty) => { let expr = P(self.lower_expr(expr)); hir::ExprKind::Cast(expr, self.lower_ty(ty, ImplTraitContext::disallowed())) @@ -3782,16 +3953,14 @@ impl<'a> LoweringContext<'a> { // Wrap the `if let` expr in a block. let span = els.span; let els = P(self.lower_expr(els)); - let LoweredNodeId { node_id, hir_id } = self.next_id(); + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); let blk = P(hir::Block { stmts: hir_vec![], expr: Some(els), - id: node_id, hir_id, rules: hir::DefaultBlock, span, targeted_by_break: false, - recovered: blk.recovered, }); P(self.expr_block(blk, ThinVec::new())) } @@ -3820,15 +3989,19 @@ impl<'a> LoweringContext<'a> { }), ExprKind::TryBlock(ref body) => { self.with_catch_scope(body.id, |this| { - let unstable_span = - this.allow_internal_unstable(CompilerDesugaringKind::TryBlock, body.span); + let unstable_span = this.mark_span_with_reason( + CompilerDesugaringKind::TryBlock, + body.span, + Some(vec![ + Symbol::intern("try_trait"), + ].into()), + ); let mut block = this.lower_block(body, true).into_inner(); let tail = block.expr.take().map_or_else( || { - let LoweredNodeId { node_id, hir_id } = this.next_id(); + let LoweredNodeId { node_id: _, hir_id } = this.next_id(); let span = this.sess.source_map().end_point(unstable_span); hir::Expr { - id: node_id, span, node: hir::ExprKind::Tup(hir_vec![]), attrs: ThinVec::new(), @@ -3862,7 +4035,7 @@ impl<'a> LoweringContext<'a> { let outer_decl = FnDecl { inputs: decl.inputs.clone(), output: FunctionRetTy::Default(fn_decl_span), - variadic: false, + c_variadic: false, }; // We need to lower the declaration outside the new scope, because we // have to conserve the state of being inside a loop condition for the @@ -4013,15 +4186,14 @@ impl<'a> LoweringContext<'a> { let struct_path = self.std_path(e.span, &struct_path, None, is_unit); let struct_path = hir::QPath::Resolved(None, P(struct_path)); - let LoweredNodeId { node_id, hir_id } = self.lower_node_id(e.id); + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(e.id); return hir::Expr { - id: node_id, hir_id, node: if is_unit { hir::ExprKind::Path(struct_path) } else { - hir::ExprKind::Struct(struct_path, fields, None) + hir::ExprKind::Struct(P(struct_path), fields, None) }, span: e.span, attrs: e.attrs.clone(), @@ -4093,13 +4265,13 @@ impl<'a> LoweringContext<'a> { hir::ExprKind::InlineAsm(P(hir_asm), outputs, inputs) } ExprKind::Struct(ref path, ref fields, ref maybe_expr) => hir::ExprKind::Struct( - self.lower_qpath( + P(self.lower_qpath( e.id, &None, path, ParamMode::Optional, ImplTraitContext::disallowed(), - ), + )), fields.iter().map(|x| self.lower_field(x)).collect(), maybe_expr.as_ref().map(|x| P(self.lower_expr(x))), ), @@ -4127,6 +4299,8 @@ impl<'a> LoweringContext<'a> { hir::ExprKind::Yield(P(expr)) } + ExprKind::Err => hir::ExprKind::Err, + // Desugar `ExprIfLet` // from: `if let = []` ExprKind::IfLet(ref pats, ref sub_expr, ref body, ref else_opt) => { @@ -4249,17 +4423,19 @@ impl<'a> LoweringContext<'a> { // } // expand - let head = self.lower_expr(head); + let mut head = self.lower_expr(head); let head_sp = head.span; - let desugared_span = self.allow_internal_unstable( + let desugared_span = self.mark_span_with_reason( CompilerDesugaringKind::ForLoop, head_sp, + None, ); + head.span = desugared_span; let iter = self.str_to_ident("iter"); let next_ident = self.str_to_ident("__next"); - let next_pat = self.pat_ident_binding_mode( + let (next_pat, next_pat_nid) = self.pat_ident_binding_mode( desugared_span, next_ident, hir::BindingAnnotation::Mutable, @@ -4268,9 +4444,9 @@ impl<'a> LoweringContext<'a> { // `::std::option::Option::Some(val) => next = val` let pat_arm = { let val_ident = self.str_to_ident("val"); - let val_pat = self.pat_ident(pat.span, val_ident); - let val_expr = P(self.expr_ident(pat.span, val_ident, val_pat.id)); - let next_expr = P(self.expr_ident(pat.span, next_ident, next_pat.id)); + let (val_pat, val_pat_nid) = self.pat_ident(pat.span, val_ident); + let val_expr = P(self.expr_ident(pat.span, val_ident, val_pat_nid)); + let next_expr = P(self.expr_ident(pat.span, next_ident, next_pat_nid)); let assign = P(self.expr( pat.span, hir::ExprKind::Assign(next_expr, val_expr), @@ -4289,7 +4465,7 @@ impl<'a> LoweringContext<'a> { }; // `mut iter` - let iter_pat = self.pat_ident_binding_mode( + let (iter_pat, iter_pat_nid) = self.pat_ident_binding_mode( desugared_span, iter, hir::BindingAnnotation::Mutable @@ -4297,7 +4473,7 @@ impl<'a> LoweringContext<'a> { // `match ::std::iter::Iterator::next(&mut iter) { ... }` let match_expr = { - let iter = P(self.expr_ident(head_sp, iter, iter_pat.id)); + let iter = P(self.expr_ident(head_sp, iter, iter_pat_nid)); let ref_mut_iter = self.expr_mut_addr_of(head_sp, iter); let next_path = &["iter", "Iterator", "next"]; let next_path = P(self.expr_std_path(head_sp, next_path, None, ThinVec::new())); @@ -4314,12 +4490,14 @@ impl<'a> LoweringContext<'a> { ThinVec::new(), )) }; - let match_stmt = respan( - head_sp, - hir::StmtKind::Expr(match_expr, self.next_id().node_id) - ); + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); + let match_stmt = hir::Stmt { + hir_id, + node: hir::StmtKind::Expr(match_expr), + span: head_sp, + }; - let next_expr = P(self.expr_ident(head_sp, next_ident, next_pat.id)); + let next_expr = P(self.expr_ident(head_sp, next_ident, next_pat_nid)); // `let mut __next` let next_let = self.stmt_let_pat( @@ -4340,10 +4518,12 @@ impl<'a> LoweringContext<'a> { let body_block = self.with_loop_scope(e.id, |this| this.lower_block(body, false)); let body_expr = P(self.expr_block(body_block, ThinVec::new())); - let body_stmt = respan( - body.span, - hir::StmtKind::Expr(body_expr, self.next_id().node_id) - ); + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); + let body_stmt = hir::Stmt { + hir_id, + node: hir::StmtKind::Expr(body_expr), + span: body.span, + }; let loop_block = P(self.block_all( e.span, @@ -4357,9 +4537,8 @@ impl<'a> LoweringContext<'a> { self.lower_label(opt_label), hir::LoopSource::ForLoop, ); - let LoweredNodeId { node_id, hir_id } = self.lower_node_id(e.id); + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(e.id); let loop_expr = P(hir::Expr { - id: node_id, hir_id, node: loop_expr, span: e.span, @@ -4410,8 +4589,13 @@ impl<'a> LoweringContext<'a> { // return Try::from_error(From::from(err)), // } - let unstable_span = - self.allow_internal_unstable(CompilerDesugaringKind::QuestionMark, e.span); + let unstable_span = self.mark_span_with_reason( + CompilerDesugaringKind::QuestionMark, + e.span, + Some(vec![ + Symbol::intern("try_trait") + ].into()), + ); // `Try::into_result()` let discr = { @@ -4440,11 +4624,11 @@ impl<'a> LoweringContext<'a> { // `Ok(val) => #[allow(unreachable_code)] val,` let ok_arm = { let val_ident = self.str_to_ident("val"); - let val_pat = self.pat_ident(e.span, val_ident); + let (val_pat, val_pat_nid) = self.pat_ident(e.span, val_ident); let val_expr = P(self.expr_ident_with_attrs( e.span, val_ident, - val_pat.id, + val_pat_nid, ThinVec::from(attrs.clone()), )); let ok_pat = self.pat_ok(e.span, val_pat); @@ -4456,12 +4640,12 @@ impl<'a> LoweringContext<'a> { // return Try::from_error(From::from(err)),` let err_arm = { let err_ident = self.str_to_ident("err"); - let err_local = self.pat_ident(e.span, err_ident); + let (err_local, err_local_nid) = self.pat_ident(e.span, err_ident); let from_expr = { let path = &["convert", "From", "from"]; let from = P(self.expr_std_path( e.span, path, None, ThinVec::new())); - let err_expr = self.expr_ident(e.span, err_ident, err_local.id); + let err_expr = self.expr_ident(e.span, err_ident, err_local_nid); self.expr_call(e.span, from, hir_vec![err_expr]) }; @@ -4470,12 +4654,13 @@ impl<'a> LoweringContext<'a> { let thin_attrs = ThinVec::from(attrs); let catch_scope = self.catch_scopes.last().map(|x| *x); let ret_expr = if let Some(catch_node) = catch_scope { + let target_id = Ok(self.lower_node_id(catch_node).hir_id); P(self.expr( e.span, hir::ExprKind::Break( hir::Destination { label: None, - target_id: Ok(catch_node), + target_id, }, Some(from_err_expr), ), @@ -4499,10 +4684,9 @@ impl<'a> LoweringContext<'a> { ExprKind::Mac(_) => panic!("Shouldn't exist here"), }; - let LoweredNodeId { node_id, hir_id } = self.lower_node_id(e.id); + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(e.id); hir::Expr { - id: node_id, hir_id, node: kind, span: e.span, @@ -4516,26 +4700,25 @@ impl<'a> LoweringContext<'a> { let (l, item_ids) = self.lower_local(l); let mut ids: SmallVec<[hir::Stmt; 1]> = item_ids .into_iter() - .map(|item_id| Spanned { - node: hir::StmtKind::Decl( - P(Spanned { - node: hir::DeclKind::Item(item_id), - span: s.span, - }), - self.next_id().node_id, - ), - span: s.span, + .map(|item_id| { + let item_id = hir::ItemId { id: self.lower_node_id(item_id).hir_id }; + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); + + hir::Stmt { + hir_id, + node: hir::StmtKind::Item(item_id), + span: s.span, + } }) .collect(); - ids.push(Spanned { - node: hir::StmtKind::Decl( - P(Spanned { - node: hir::DeclKind::Local(l), - span: s.span, - }), - self.lower_node_id(s.id).node_id, - ), - span: s.span, + ids.push({ + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(s.id); + + hir::Stmt { + hir_id, + node: hir::StmtKind::Local(P(l)), + span: s.span, + } }); return ids; }, @@ -4544,27 +4727,36 @@ impl<'a> LoweringContext<'a> { let mut id = Some(s.id); return self.lower_item_id(it) .into_iter() - .map(|item_id| Spanned { - node: hir::StmtKind::Decl( - P(Spanned { - node: hir::DeclKind::Item(item_id), - span: s.span, - }), - id.take() - .map(|id| self.lower_node_id(id).node_id) - .unwrap_or_else(|| self.next_id().node_id), - ), - span: s.span, + .map(|item_id| { + let LoweredNodeId { node_id: _, hir_id } = id.take() + .map(|id| self.lower_node_id(id)) + .unwrap_or_else(|| self.next_id()); + + hir::Stmt { + hir_id, + node: hir::StmtKind::Item(item_id), + span: s.span, + } }) .collect(); } - StmtKind::Expr(ref e) => Spanned { - node: hir::StmtKind::Expr(P(self.lower_expr(e)), self.lower_node_id(s.id).node_id), - span: s.span, + StmtKind::Expr(ref e) => { + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(s.id); + + hir::Stmt { + hir_id, + node: hir::StmtKind::Expr(P(self.lower_expr(e))), + span: s.span, + } }, - StmtKind::Semi(ref e) => Spanned { - node: hir::StmtKind::Semi(P(self.lower_expr(e)), self.lower_node_id(s.id).node_id), - span: s.span, + StmtKind::Semi(ref e) => { + let LoweredNodeId { node_id: _, hir_id } = self.lower_node_id(s.id); + + hir::Stmt { + hir_id, + node: hir::StmtKind::Semi(P(self.lower_expr(e))), + span: s.span, + } }, StmtKind::Mac(..) => panic!("Shouldn't exist here"), }] @@ -4605,7 +4797,6 @@ impl<'a> LoweringContext<'a> { ParamMode::Explicit, explicit_owner, )), - id: lowered_id.node_id, hir_id: lowered_id.hir_id, } }, @@ -4675,8 +4866,10 @@ impl<'a> LoweringContext<'a> { } fn field(&mut self, ident: Ident, expr: P, span: Span) -> hir::Field { + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); + hir::Field { - id: self.next_id().node_id, + hir_id, ident, span, expr, @@ -4759,9 +4952,8 @@ impl<'a> LoweringContext<'a> { } fn expr(&mut self, span: Span, node: hir::ExprKind, attrs: ThinVec) -> hir::Expr { - let LoweredNodeId { node_id, hir_id } = self.next_id(); + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); hir::Expr { - id: node_id, hir_id, node, span, @@ -4776,20 +4968,24 @@ impl<'a> LoweringContext<'a> { pat: P, source: hir::LocalSource, ) -> hir::Stmt { - let LoweredNodeId { node_id, hir_id } = self.next_id(); + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); - let local = P(hir::Local { + let local = hir::Local { pat, ty: None, init: ex, - id: node_id, hir_id, span: sp, attrs: ThinVec::new(), source, - }); - let decl = respan(sp, hir::DeclKind::Local(local)); - respan(sp, hir::StmtKind::Decl(P(decl), self.next_id().node_id)) + }; + + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); + hir::Stmt { + hir_id, + node: hir::StmtKind::Local(P(local)), + span: sp + } } fn stmt_let( @@ -4799,15 +4995,15 @@ impl<'a> LoweringContext<'a> { ident: Ident, ex: P, ) -> (hir::Stmt, NodeId) { - let pat = if mutbl { + let (pat, pat_nid) = if mutbl { self.pat_ident_binding_mode(sp, ident, hir::BindingAnnotation::Mutable) } else { self.pat_ident(sp, ident) }; - let pat_id = pat.id; + ( self.stmt_let_pat(sp, Some(ex), pat, hir::LocalSource::Normal), - pat_id, + pat_nid, ) } @@ -4821,17 +5017,15 @@ impl<'a> LoweringContext<'a> { stmts: hir::HirVec, expr: Option>, ) -> hir::Block { - let LoweredNodeId { node_id, hir_id } = self.next_id(); + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); hir::Block { stmts, expr, - id: node_id, hir_id, rules: hir::DefaultBlock, span, targeted_by_break: false, - recovered: false, } } @@ -4867,7 +5061,7 @@ impl<'a> LoweringContext<'a> { self.pat(span, pt) } - fn pat_ident(&mut self, span: Span, ident: Ident) -> P { + fn pat_ident(&mut self, span: Span, ident: Ident) -> (P, NodeId) { self.pat_ident_binding_mode(span, ident, hir::BindingAnnotation::Unannotated) } @@ -4876,15 +5070,17 @@ impl<'a> LoweringContext<'a> { span: Span, ident: Ident, bm: hir::BindingAnnotation, - ) -> P { + ) -> (P, NodeId) { let LoweredNodeId { node_id, hir_id } = self.next_id(); - P(hir::Pat { - id: node_id, - hir_id, - node: hir::PatKind::Binding(bm, node_id, ident.with_span_pos(span), None), - span, - }) + ( + P(hir::Pat { + hir_id, + node: hir::PatKind::Binding(bm, hir_id, ident.with_span_pos(span), None), + span, + }), + node_id + ) } fn pat_wild(&mut self, span: Span) -> P { @@ -4892,9 +5088,8 @@ impl<'a> LoweringContext<'a> { } fn pat(&mut self, span: Span, pat: hir::PatKind) -> P { - let LoweredNodeId { node_id, hir_id } = self.next_id(); + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); P(hir::Pat { - id: node_id, hir_id, node: pat, span, @@ -4917,8 +5112,8 @@ impl<'a> LoweringContext<'a> { for seg in path.segments.iter_mut() { - if let Some(id) = seg.id { - seg.id = Some(self.lower_node_id(id).node_id); + if seg.hir_id.is_some() { + seg.hir_id = Some(self.next_id().hir_id); } } path @@ -4935,7 +5130,6 @@ impl<'a> LoweringContext<'a> { bound_generic_params: hir::HirVec::new(), trait_ref: hir::TraitRef { path: path.and_then(|path| path), - ref_id: id.node_id, hir_ref_id: id.hir_id, }, span, @@ -4952,7 +5146,6 @@ impl<'a> LoweringContext<'a> { _ => hir::TyKind::Path(qpath), }; hir::Ty { - id: id.node_id, hir_id: id.hir_id, node, span, @@ -4963,13 +5156,15 @@ impl<'a> LoweringContext<'a> { /// with no explicit lifetime. fn elided_ref_lifetime(&mut self, span: Span) -> hir::Lifetime { match self.anonymous_lifetime_mode { - // Intercept when we are in an impl header and introduce an in-band lifetime. + // Intercept when we are in an impl header or async fn and introduce an in-band + // lifetime. // Hence `impl Foo for &u32` becomes `impl<'f> Foo for &'f u32` for some fresh // `'f`. AnonymousLifetimeMode::CreateParameter => { let fresh_name = self.collect_fresh_in_band_lifetime(span); + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); hir::Lifetime { - id: self.next_id().node_id, + hir_id, span, name: hir::LifetimeName::Param(fresh_name), } @@ -4978,6 +5173,10 @@ impl<'a> LoweringContext<'a> { AnonymousLifetimeMode::ReportError => self.new_error_lifetime(None, span), AnonymousLifetimeMode::PassThrough => self.new_implicit_lifetime(span), + + AnonymousLifetimeMode::Replace(replacement) => { + self.new_replacement_lifetime(replacement, span) + } } } @@ -5012,6 +5211,12 @@ impl<'a> LoweringContext<'a> { /// sorts of cases are deprecated. This may therefore report a warning or an /// error, depending on the mode. fn elided_path_lifetimes(&mut self, span: Span, count: usize) -> P<[hir::Lifetime]> { + (0..count) + .map(|_| self.elided_path_lifetime(span)) + .collect() + } + + fn elided_path_lifetime(&mut self, span: Span) -> hir::Lifetime { match self.anonymous_lifetime_mode { // N.B., We intentionally ignore the create-parameter mode here // and instead "pass through" to resolve-lifetimes, which will then @@ -5019,21 +5224,16 @@ impl<'a> LoweringContext<'a> { // impl elision for deprecated forms like // // impl Foo for std::cell::Ref // note lack of '_ - AnonymousLifetimeMode::CreateParameter => {} + AnonymousLifetimeMode::CreateParameter | + // This is the normal case. + AnonymousLifetimeMode::PassThrough => self.new_implicit_lifetime(span), - AnonymousLifetimeMode::ReportError => { - return (0..count) - .map(|_| self.new_error_lifetime(None, span)) - .collect(); + AnonymousLifetimeMode::Replace(replacement) => { + self.new_replacement_lifetime(replacement, span) } - // This is the normal case. - AnonymousLifetimeMode::PassThrough => {} + AnonymousLifetimeMode::ReportError => self.new_error_lifetime(None, span), } - - (0..count) - .map(|_| self.new_implicit_lifetime(span)) - .collect() } /// Invoked to create the lifetime argument(s) for an elided trait object @@ -5063,14 +5263,30 @@ impl<'a> LoweringContext<'a> { // This is the normal case. AnonymousLifetimeMode::PassThrough => {} + + // We don't need to do any replacement here as this lifetime + // doesn't refer to an elided lifetime elsewhere in the function + // signature. + AnonymousLifetimeMode::Replace(_) => {} } self.new_implicit_lifetime(span) } + fn new_replacement_lifetime( + &mut self, + replacement: LtReplacement, + span: Span, + ) -> hir::Lifetime { + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); + self.replace_elided_lifetime(hir_id, span, replacement) + } + fn new_implicit_lifetime(&mut self, span: Span) -> hir::Lifetime { + let LoweredNodeId { node_id: _, hir_id } = self.next_id(); + hir::Lifetime { - id: self.next_id().node_id, + hir_id, span, name: hir::LifetimeName::Implicit, } diff --git a/src/librustc/hir/map/blocks.rs b/src/librustc/hir/map/blocks.rs index 40904eaa5db62..1114ef52bbc0c 100644 --- a/src/librustc/hir/map/blocks.rs +++ b/src/librustc/hir/map/blocks.rs @@ -1,19 +1,9 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This module provides a simplified abstraction for working with -//! code blocks identified by their integer node-id. In particular, +//! code blocks identified by their integer `NodeId`. In particular, //! it captures a common set of attributes that all "function-like -//! things" (represented by `FnLike` instances) share. For example, +//! things" (represented by `FnLike` instances) share. For example, //! all `FnLike` instances have a type signature (be it explicit or -//! inferred). And all `FnLike` instances have a body, i.e., the code +//! inferred). And all `FnLike` instances have a body, i.e., the code //! that is run when the function-like thing it represents is invoked. //! //! With the above abstraction in place, one can treat the program @@ -21,11 +11,11 @@ //! nested within a uniquely determined `FnLike`), and users can ask //! for the `Code` associated with a particular NodeId. -use hir as ast; -use hir::map; -use hir::{Expr, FnDecl, Node}; -use hir::intravisit::FnKind; -use syntax::ast::{Attribute, Ident, Name, NodeId}; +use crate::hir as ast; +use crate::hir::map; +use crate::hir::{Expr, FnDecl, Node}; +use crate::hir::intravisit::FnKind; +use syntax::ast::{Attribute, Ident, NodeId}; use syntax_pos::Span; /// An FnLikeNode is a Node that is like a fn, in that it has a decl @@ -85,10 +75,10 @@ pub enum Code<'a> { } impl<'a> Code<'a> { - pub fn id(&self) -> NodeId { + pub fn id(&self) -> ast::HirId { match *self { Code::FnLike(node) => node.id(), - Code::Expr(block) => block.id, + Code::Expr(block) => block.hir_id, } } @@ -108,13 +98,13 @@ impl<'a> Code<'a> { /// These are all the components one can extract from a fn item for /// use when implementing FnLikeNode operations. struct ItemFnParts<'a> { - name: Name, + ident: Ident, decl: &'a ast::FnDecl, header: ast::FnHeader, vis: &'a ast::Visibility, generics: &'a ast::Generics, body: ast::BodyId, - id: NodeId, + id: ast::HirId, span: Span, attrs: &'a [Attribute], } @@ -124,13 +114,13 @@ struct ItemFnParts<'a> { struct ClosureParts<'a> { decl: &'a FnDecl, body: ast::BodyId, - id: NodeId, + id: ast::HirId, span: Span, attrs: &'a [Attribute], } impl<'a> ClosureParts<'a> { - fn new(d: &'a FnDecl, b: ast::BodyId, id: NodeId, s: Span, attrs: &'a [Attribute]) -> Self { + fn new(d: &'a FnDecl, b: ast::BodyId, id: ast::HirId, s: Span, attrs: &'a [Attribute]) -> Self { ClosureParts { decl: d, body: b, @@ -178,7 +168,7 @@ impl<'a> FnLikeNode<'a> { |c: ClosureParts<'_>| c.span) } - pub fn id(self) -> NodeId { + pub fn id(self) -> ast::HirId { self.handle(|i: ItemFnParts<'_>| i.id, |id, _, _: &'a ast::MethodSig, _, _, _, _| id, |c: ClosureParts<'_>| c.id) @@ -210,7 +200,7 @@ impl<'a> FnLikeNode<'a> { pub fn kind(self) -> FnKind<'a> { let item = |p: ItemFnParts<'a>| -> FnKind<'a> { - FnKind::ItemFn(p.name, p.generics, p.header, p.vis, p.attrs) + FnKind::ItemFn(p.ident, p.generics, p.header, p.vis, p.attrs) }; let closure = |c: ClosureParts<'a>| { FnKind::Closure(c.attrs) @@ -223,7 +213,7 @@ impl<'a> FnLikeNode<'a> { fn handle(self, item_fn: I, method: M, closure: C) -> A where I: FnOnce(ItemFnParts<'a>) -> A, - M: FnOnce(NodeId, + M: FnOnce(ast::HirId, Ident, &'a ast::MethodSig, Option<&'a ast::Visibility>, @@ -237,8 +227,8 @@ impl<'a> FnLikeNode<'a> { map::Node::Item(i) => match i.node { ast::ItemKind::Fn(ref decl, header, ref generics, block) => item_fn(ItemFnParts { - id: i.id, - name: i.name, + id: i.hir_id, + ident: i.ident, decl: &decl, body: block, vis: &i.vis, @@ -251,21 +241,21 @@ impl<'a> FnLikeNode<'a> { }, map::Node::TraitItem(ti) => match ti.node { ast::TraitItemKind::Method(ref sig, ast::TraitMethod::Provided(body)) => { - method(ti.id, ti.ident, sig, None, body, ti.span, &ti.attrs) + method(ti.hir_id, ti.ident, sig, None, body, ti.span, &ti.attrs) } _ => bug!("trait method FnLikeNode that is not fn-like"), }, map::Node::ImplItem(ii) => { match ii.node { ast::ImplItemKind::Method(ref sig, body) => { - method(ii.id, ii.ident, sig, Some(&ii.vis), body, ii.span, &ii.attrs) + method(ii.hir_id, ii.ident, sig, Some(&ii.vis), body, ii.span, &ii.attrs) } _ => bug!("impl method FnLikeNode that is not fn-like") } }, map::Node::Expr(e) => match e.node { ast::ExprKind::Closure(_, ref decl, block, _fn_decl_span, _gen) => - closure(ClosureParts::new(&decl, block, e.id, e.span, &e.attrs)), + closure(ClosureParts::new(&decl, block, e.hir_id, e.span, &e.attrs)), _ => bug!("expr FnLikeNode that is not fn-like"), }, _ => bug!("other FnLikeNode that is not fn-like"), diff --git a/src/librustc/hir/map/collector.rs b/src/librustc/hir/map/collector.rs index 2917fd7457acf..75d7d843dea7e 100644 --- a/src/librustc/hir/map/collector.rs +++ b/src/librustc/hir/map/collector.rs @@ -1,27 +1,19 @@ -// Copyright 2015-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::*; -use dep_graph::{DepGraph, DepKind, DepNodeIndex}; -use hir::def_id::{LOCAL_CRATE, CrateNum}; -use hir::intravisit::{Visitor, NestedVisitorMap}; +use crate::dep_graph::{DepGraph, DepKind, DepNodeIndex}; +use crate::hir; +use crate::hir::def_id::{LOCAL_CRATE, CrateNum}; +use crate::hir::intravisit::{Visitor, NestedVisitorMap}; use rustc_data_structures::svh::Svh; -use ich::Fingerprint; -use middle::cstore::CrateStore; -use session::CrateDisambiguator; +use crate::ich::Fingerprint; +use crate::middle::cstore::CrateStore; +use crate::session::CrateDisambiguator; +use crate::session::Session; use std::iter::repeat; use syntax::ast::{NodeId, CRATE_NODE_ID}; use syntax::source_map::SourceMap; use syntax_pos::Span; -use ich::StableHashingContext; +use crate::ich::StableHashingContext; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; /// A Visitor that walks over the HIR and collects Nodes into a HIR map @@ -35,7 +27,7 @@ pub(super) struct NodeCollector<'a, 'hir> { /// The node map map: Vec>>, /// The parent of this node - parent_node: NodeId, + parent_node: hir::HirId, // These fields keep track of the currently relevant DepNodes during // the visitor's traversal. @@ -46,26 +38,76 @@ pub(super) struct NodeCollector<'a, 'hir> { dep_graph: &'a DepGraph, definitions: &'a definitions::Definitions, + hir_to_node_id: &'a FxHashMap, hcx: StableHashingContext<'a>, // We are collecting DepNode::HirBody hashes here so we can compute the // crate hash from then later on. - hir_body_nodes: Vec<(DefPathHash, DepNodeIndex)>, + hir_body_nodes: Vec<(DefPathHash, Fingerprint)>, +} + +fn input_dep_node_and_hash( + dep_graph: &DepGraph, + hcx: &mut StableHashingContext<'_>, + dep_node: DepNode, + input: I, +) -> (DepNodeIndex, Fingerprint) +where + I: for<'a> HashStable>, +{ + let dep_node_index = dep_graph.input_task(dep_node, &mut *hcx, &input).1; + + let hash = if dep_graph.is_fully_enabled() { + dep_graph.fingerprint_of(dep_node_index) + } else { + let mut stable_hasher = StableHasher::new(); + input.hash_stable(hcx, &mut stable_hasher); + stable_hasher.finish() + }; + + (dep_node_index, hash) +} + +fn alloc_hir_dep_nodes( + dep_graph: &DepGraph, + hcx: &mut StableHashingContext<'_>, + def_path_hash: DefPathHash, + item_like: I, + hir_body_nodes: &mut Vec<(DefPathHash, Fingerprint)>, +) -> (DepNodeIndex, DepNodeIndex) +where + I: for<'a> HashStable>, +{ + let sig = dep_graph.input_task( + def_path_hash.to_dep_node(DepKind::Hir), + &mut *hcx, + HirItemLike { item_like: &item_like, hash_bodies: false }, + ).1; + let (full, hash) = input_dep_node_and_hash( + dep_graph, + hcx, + def_path_hash.to_dep_node(DepKind::HirBody), + HirItemLike { item_like: &item_like, hash_bodies: true }, + ); + hir_body_nodes.push((def_path_hash, hash)); + (sig, full) } impl<'a, 'hir> NodeCollector<'a, 'hir> { - pub(super) fn root(krate: &'hir Crate, + pub(super) fn root(sess: &'a Session, + krate: &'hir Crate, dep_graph: &'a DepGraph, definitions: &'a definitions::Definitions, - hcx: StableHashingContext<'a>, - source_map: &'a SourceMap) + hir_to_node_id: &'a FxHashMap, + mut hcx: StableHashingContext<'a>) -> NodeCollector<'a, 'hir> { let root_mod_def_path_hash = definitions.def_path_hash(CRATE_DEF_INDEX); + let mut hir_body_nodes = Vec::new(); + // Allocate DepNodes for the root module - let (root_mod_sig_dep_index, root_mod_full_dep_index); - { + let (root_mod_sig_dep_index, root_mod_full_dep_index) = { let Crate { ref module, // Crate attributes are not copied over to the root `Mod`, so hash @@ -79,48 +121,45 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> { impl_items: _, bodies: _, trait_impls: _, - trait_auto_impl: _, body_ids: _, + modules: _, } = *krate; - root_mod_sig_dep_index = dep_graph.input_task( - root_mod_def_path_hash.to_dep_node(DepKind::Hir), - &hcx, - HirItemLike { item_like: (module, attrs, span), hash_bodies: false }, - ).1; - root_mod_full_dep_index = dep_graph.input_task( - root_mod_def_path_hash.to_dep_node(DepKind::HirBody), - &hcx, - HirItemLike { item_like: (module, attrs, span), hash_bodies: true }, - ).1; - } + alloc_hir_dep_nodes( + dep_graph, + &mut hcx, + root_mod_def_path_hash, + (module, attrs, span), + &mut hir_body_nodes, + ) + }; { dep_graph.input_task( DepNode::new_no_params(DepKind::AllLocalTraitImpls), - &hcx, + &mut hcx, &krate.trait_impls, ); } - let hir_body_nodes = vec![(root_mod_def_path_hash, root_mod_full_dep_index)]; - let mut collector = NodeCollector { krate, - source_map, - map: vec![], - parent_node: CRATE_NODE_ID, + source_map: sess.source_map(), + map: repeat(None).take(sess.current_node_id_count()).collect(), + parent_node: hir::CRATE_HIR_ID, current_signature_dep_index: root_mod_sig_dep_index, current_full_dep_index: root_mod_full_dep_index, current_dep_node_owner: CRATE_DEF_INDEX, currently_in_body: false, dep_graph, definitions, + hir_to_node_id, hcx, hir_body_nodes, }; collector.insert_entry(CRATE_NODE_ID, Entry { parent: CRATE_NODE_ID, + parent_hir: hir::CRATE_HIR_ID, dep_node: root_mod_sig_dep_index, node: Node::Crate, }); @@ -139,10 +178,8 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> { let node_hashes = self .hir_body_nodes .iter() - .fold(Fingerprint::ZERO, |fingerprint, &(def_path_hash, dep_node_index)| { - fingerprint.combine( - def_path_hash.0.combine(self.dep_graph.fingerprint_of(dep_node_index)) - ) + .fold(Fingerprint::ZERO, |combined_fingerprint, &(def_path_hash, fingerprint)| { + combined_fingerprint.combine(def_path_hash.0.combine(fingerprint)) }); let mut upstream_crates: Vec<_> = cstore.crates_untracked().iter().map(|&cnum| { @@ -169,32 +206,31 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> { source_file_names.sort_unstable(); - let (_, crate_dep_node_index) = self - .dep_graph - .input_task(DepNode::new_no_params(DepKind::Krate), - &self.hcx, - (((node_hashes, upstream_crates), source_file_names), - (commandline_args_hash, - crate_disambiguator.to_fingerprint()))); - - let svh = Svh::new(self.dep_graph - .fingerprint_of(crate_dep_node_index) - .to_smaller_hash()); + let crate_hash_input = ( + ((node_hashes, upstream_crates), source_file_names), + (commandline_args_hash, crate_disambiguator.to_fingerprint()) + ); + + let (_, crate_hash) = input_dep_node_and_hash( + self.dep_graph, + &mut self.hcx, + DepNode::new_no_params(DepKind::Krate), + crate_hash_input, + ); + + let svh = Svh::new(crate_hash.to_smaller_hash()); (self.map, svh) } fn insert_entry(&mut self, id: NodeId, entry: Entry<'hir>) { debug!("hir_map: {:?} => {:?}", id, entry); - let len = self.map.len(); - if id.as_usize() >= len { - self.map.extend(repeat(None).take(id.as_usize() - len + 1)); - } self.map[id.as_usize()] = Some(entry); } - fn insert(&mut self, span: Span, id: NodeId, node: Node<'hir>) { + fn insert(&mut self, span: Span, hir_id: HirId, node: Node<'hir>) { let entry = Entry { - parent: self.parent_node, + parent: self.hir_to_node_id[&self.parent_node], + parent_hir: self.parent_node, dep_node: if self.currently_in_body { self.current_full_dep_index } else { @@ -203,21 +239,23 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> { node, }; + let node_id = self.hir_to_node_id[&hir_id]; + // Make sure that the DepNode of some node coincides with the HirId // owner of that node. if cfg!(debug_assertions) { - let hir_id = self.definitions.node_to_hir_id(id); + assert_eq!(self.definitions.node_to_hir_id(node_id), hir_id); if hir_id.owner != self.current_dep_node_owner { - let node_str = match self.definitions.opt_def_index(id) { + let node_str = match self.definitions.opt_def_index(node_id) { Some(def_index) => { self.definitions.def_path(def_index).to_string_no_crate() } None => format!("{:?}", node) }; - let forgot_str = if hir_id == ::hir::DUMMY_HIR_ID { - format!("\nMaybe you forgot to lower the node id {:?}?", id) + let forgot_str = if hir_id == crate::hir::DUMMY_HIR_ID { + format!("\nMaybe you forgot to lower the node id {:?}?", node_id) } else { String::new() }; @@ -239,17 +277,21 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> { } } - self.insert_entry(id, entry); + self.insert_entry(node_id, entry); } - fn with_parent(&mut self, parent_id: NodeId, f: F) { + fn with_parent( + &mut self, + parent_node_id: HirId, + f: F, + ) { let parent_node = self.parent_node; - self.parent_node = parent_id; + self.parent_node = parent_node_id; f(self); self.parent_node = parent_node; } - fn with_dep_node_owner>, + fn with_dep_node_owner HashStable>, F: FnOnce(&mut Self)>(&mut self, dep_node_owner: DefIndex, item_like: &T, @@ -261,19 +303,15 @@ impl<'a, 'hir> NodeCollector<'a, 'hir> { let def_path_hash = self.definitions.def_path_hash(dep_node_owner); - self.current_signature_dep_index = self.dep_graph.input_task( - def_path_hash.to_dep_node(DepKind::Hir), - &self.hcx, - HirItemLike { item_like, hash_bodies: false }, - ).1; - - self.current_full_dep_index = self.dep_graph.input_task( - def_path_hash.to_dep_node(DepKind::HirBody), - &self.hcx, - HirItemLike { item_like, hash_bodies: true }, - ).1; - - self.hir_body_nodes.push((def_path_hash, self.current_full_dep_index)); + let (signature_dep_index, full_dep_index) = alloc_hir_dep_nodes( + self.dep_graph, + &mut self.hcx, + def_path_hash, + item_like, + &mut self.hir_body_nodes, + ); + self.current_signature_dep_index = signature_dep_index; + self.current_full_dep_index = full_dep_index; self.current_dep_node_owner = dep_node_owner; self.currently_in_body = false; @@ -317,14 +355,14 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> { fn visit_item(&mut self, i: &'hir Item) { debug!("visit_item: {:?}", i); debug_assert_eq!(i.hir_id.owner, - self.definitions.opt_def_index(i.id).unwrap()); + self.definitions.opt_def_index(self.hir_to_node_id[&i.hir_id]).unwrap()); self.with_dep_node_owner(i.hir_id.owner, i, |this| { - this.insert(i.span, i.id, Node::Item(i)); - this.with_parent(i.id, |this| { + this.insert(i.span, i.hir_id, Node::Item(i)); + this.with_parent(i.hir_id, |this| { if let ItemKind::Struct(ref struct_def, _) = i.node { - // If this is a tuple-like struct, register the constructor. - if !struct_def.is_struct() { - this.insert(i.span, struct_def.id(), Node::StructCtor(struct_def)); + // If this is a tuple or unit-like struct, register the constructor. + if let Some(ctor_hir_id) = struct_def.ctor_hir_id() { + this.insert(i.span, ctor_hir_id, Node::Ctor(struct_def)); } } intravisit::walk_item(this, i); @@ -333,25 +371,25 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> { } fn visit_foreign_item(&mut self, foreign_item: &'hir ForeignItem) { - self.insert(foreign_item.span, foreign_item.id, Node::ForeignItem(foreign_item)); + self.insert(foreign_item.span, foreign_item.hir_id, Node::ForeignItem(foreign_item)); - self.with_parent(foreign_item.id, |this| { + self.with_parent(foreign_item.hir_id, |this| { intravisit::walk_foreign_item(this, foreign_item); }); } fn visit_generic_param(&mut self, param: &'hir GenericParam) { - self.insert(param.span, param.id, Node::GenericParam(param)); + self.insert(param.span, param.hir_id, Node::GenericParam(param)); intravisit::walk_generic_param(self, param); } fn visit_trait_item(&mut self, ti: &'hir TraitItem) { debug_assert_eq!(ti.hir_id.owner, - self.definitions.opt_def_index(ti.id).unwrap()); + self.definitions.opt_def_index(self.hir_to_node_id[&ti.hir_id]).unwrap()); self.with_dep_node_owner(ti.hir_id.owner, ti, |this| { - this.insert(ti.span, ti.id, Node::TraitItem(ti)); + this.insert(ti.span, ti.hir_id, Node::TraitItem(ti)); - this.with_parent(ti.id, |this| { + this.with_parent(ti.hir_id, |this| { intravisit::walk_trait_item(this, ti); }); }); @@ -359,11 +397,11 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> { fn visit_impl_item(&mut self, ii: &'hir ImplItem) { debug_assert_eq!(ii.hir_id.owner, - self.definitions.opt_def_index(ii.id).unwrap()); + self.definitions.opt_def_index(self.hir_to_node_id[&ii.hir_id]).unwrap()); self.with_dep_node_owner(ii.hir_id.owner, ii, |this| { - this.insert(ii.span, ii.id, Node::ImplItem(ii)); + this.insert(ii.span, ii.hir_id, Node::ImplItem(ii)); - this.with_parent(ii.id, |this| { + this.with_parent(ii.hir_id, |this| { intravisit::walk_impl_item(this, ii); }); }); @@ -375,83 +413,82 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> { } else { Node::Pat(pat) }; - self.insert(pat.span, pat.id, node); + self.insert(pat.span, pat.hir_id, node); - self.with_parent(pat.id, |this| { + self.with_parent(pat.hir_id, |this| { intravisit::walk_pat(this, pat); }); } fn visit_anon_const(&mut self, constant: &'hir AnonConst) { - self.insert(DUMMY_SP, constant.id, Node::AnonConst(constant)); + self.insert(DUMMY_SP, constant.hir_id, Node::AnonConst(constant)); - self.with_parent(constant.id, |this| { + self.with_parent(constant.hir_id, |this| { intravisit::walk_anon_const(this, constant); }); } fn visit_expr(&mut self, expr: &'hir Expr) { - self.insert(expr.span, expr.id, Node::Expr(expr)); + self.insert(expr.span, expr.hir_id, Node::Expr(expr)); - self.with_parent(expr.id, |this| { + self.with_parent(expr.hir_id, |this| { intravisit::walk_expr(this, expr); }); } fn visit_stmt(&mut self, stmt: &'hir Stmt) { - let id = stmt.node.id(); - self.insert(stmt.span, id, Node::Stmt(stmt)); + self.insert(stmt.span, stmt.hir_id, Node::Stmt(stmt)); - self.with_parent(id, |this| { + self.with_parent(stmt.hir_id, |this| { intravisit::walk_stmt(this, stmt); }); } fn visit_path_segment(&mut self, path_span: Span, path_segment: &'hir PathSegment) { - if let Some(id) = path_segment.id { - self.insert(path_span, id, Node::PathSegment(path_segment)); + if let Some(hir_id) = path_segment.hir_id { + self.insert(path_span, hir_id, Node::PathSegment(path_segment)); } intravisit::walk_path_segment(self, path_span, path_segment); } fn visit_ty(&mut self, ty: &'hir Ty) { - self.insert(ty.span, ty.id, Node::Ty(ty)); + self.insert(ty.span, ty.hir_id, Node::Ty(ty)); - self.with_parent(ty.id, |this| { + self.with_parent(ty.hir_id, |this| { intravisit::walk_ty(this, ty); }); } fn visit_trait_ref(&mut self, tr: &'hir TraitRef) { - self.insert(tr.path.span, tr.ref_id, Node::TraitRef(tr)); + self.insert(tr.path.span, tr.hir_ref_id, Node::TraitRef(tr)); - self.with_parent(tr.ref_id, |this| { + self.with_parent(tr.hir_ref_id, |this| { intravisit::walk_trait_ref(this, tr); }); } fn visit_fn(&mut self, fk: intravisit::FnKind<'hir>, fd: &'hir FnDecl, - b: BodyId, s: Span, id: NodeId) { + b: BodyId, s: Span, id: HirId) { assert_eq!(self.parent_node, id); intravisit::walk_fn(self, fk, fd, b, s, id); } fn visit_block(&mut self, block: &'hir Block) { - self.insert(block.span, block.id, Node::Block(block)); - self.with_parent(block.id, |this| { + self.insert(block.span, block.hir_id, Node::Block(block)); + self.with_parent(block.hir_id, |this| { intravisit::walk_block(this, block); }); } fn visit_local(&mut self, l: &'hir Local) { - self.insert(l.span, l.id, Node::Local(l)); - self.with_parent(l.id, |this| { + self.insert(l.span, l.hir_id, Node::Local(l)); + self.with_parent(l.hir_id, |this| { intravisit::walk_local(this, l) }) } fn visit_lifetime(&mut self, lifetime: &'hir Lifetime) { - self.insert(lifetime.span, lifetime.id, Node::Lifetime(lifetime)); + self.insert(lifetime.span, lifetime.hir_id, Node::Lifetime(lifetime)); } fn visit_vis(&mut self, visibility: &'hir Visibility) { @@ -459,9 +496,9 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> { VisibilityKind::Public | VisibilityKind::Crate(_) | VisibilityKind::Inherited => {} - VisibilityKind::Restricted { id, .. } => { - self.insert(visibility.span, id, Node::Visibility(visibility)); - self.with_parent(id, |this| { + VisibilityKind::Restricted { hir_id, .. } => { + self.insert(visibility.span, hir_id, Node::Visibility(visibility)); + self.with_parent(hir_id, |this| { intravisit::walk_vis(this, visibility); }); } @@ -469,24 +506,28 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> { } fn visit_macro_def(&mut self, macro_def: &'hir MacroDef) { - let def_index = self.definitions.opt_def_index(macro_def.id).unwrap(); + let node_id = self.hir_to_node_id[¯o_def.hir_id]; + let def_index = self.definitions.opt_def_index(node_id).unwrap(); self.with_dep_node_owner(def_index, macro_def, |this| { - this.insert(macro_def.span, macro_def.id, Node::MacroDef(macro_def)); + this.insert(macro_def.span, macro_def.hir_id, Node::MacroDef(macro_def)); }); } - fn visit_variant(&mut self, v: &'hir Variant, g: &'hir Generics, item_id: NodeId) { - let id = v.node.data.id(); - self.insert(v.span, id, Node::Variant(v)); - self.with_parent(id, |this| { + fn visit_variant(&mut self, v: &'hir Variant, g: &'hir Generics, item_id: HirId) { + self.insert(v.span, v.node.id, Node::Variant(v)); + self.with_parent(v.node.id, |this| { + // Register the constructor of this variant. + if let Some(ctor_hir_id) = v.node.data.ctor_hir_id() { + this.insert(v.span, ctor_hir_id, Node::Ctor(&v.node.data)); + } intravisit::walk_variant(this, v, g, item_id); }); } fn visit_struct_field(&mut self, field: &'hir StructField) { - self.insert(field.span, field.id, Node::Field(field)); - self.with_parent(field.id, |this| { + self.insert(field.span, field.hir_id, Node::Field(field)); + self.with_parent(field.hir_id, |this| { intravisit::walk_struct_field(this, field); }); } diff --git a/src/librustc/hir/map/def_collector.rs b/src/librustc/hir/map/def_collector.rs index 26cf8f5d2ae5c..1a3bbc5ecc49e 100644 --- a/src/librustc/hir/map/def_collector.rs +++ b/src/librustc/hir/map/def_collector.rs @@ -1,16 +1,6 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir::map::definitions::*; -use hir::def_id::{CRATE_DEF_INDEX, DefIndex, DefIndexAddressSpace}; -use session::CrateDisambiguator; +use crate::hir::map::definitions::*; +use crate::hir::def_id::{CRATE_DEF_INDEX, DefIndex, DefIndexAddressSpace}; +use crate::session::CrateDisambiguator; use syntax::ast::*; use syntax::ext::hygiene::Mark; @@ -20,9 +10,9 @@ use syntax::symbol::Symbol; use syntax::parse::token::{self, Token}; use syntax_pos::Span; -use hir::map::{ITEM_LIKE_SPACE, REGULAR_SPACE}; +use crate::hir::map::{ITEM_LIKE_SPACE, REGULAR_SPACE}; -/// Creates def ids for nodes in the AST. +/// Creates `DefId`s for nodes in the AST. pub struct DefCollector<'a> { definitions: &'a mut Definitions, parent_def: Option, @@ -83,7 +73,7 @@ impl<'a> DefCollector<'a> { decl: &'a FnDecl, body: &'a Block, ) { - let (closure_id, return_impl_trait_id) = match header.asyncness { + let (closure_id, return_impl_trait_id) = match header.asyncness.node { IsAsync::Async { closure_id, return_impl_trait_id, @@ -130,19 +120,19 @@ impl<'a> visit::Visitor<'a> for DefCollector<'a> { let def_data = match i.node { ItemKind::Impl(..) => DefPathData::Impl, ItemKind::Trait(..) => DefPathData::Trait(i.ident.as_interned_str()), + ItemKind::TraitAlias(..) => DefPathData::TraitAlias(i.ident.as_interned_str()), ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) | - ItemKind::TraitAlias(..) | ItemKind::Existential(..) | - ItemKind::ExternCrate(..) | ItemKind::ForeignMod(..) | ItemKind::Ty(..) => - DefPathData::TypeNs(i.ident.as_interned_str()), + ItemKind::Existential(..) | ItemKind::ExternCrate(..) | ItemKind::ForeignMod(..) | + ItemKind::Ty(..) => DefPathData::TypeNs(i.ident.as_interned_str()), ItemKind::Mod(..) if i.ident == keywords::Invalid.ident() => { return visit::walk_item(self, i); } ItemKind::Fn( ref decl, - ref header @ FnHeader { asyncness: IsAsync::Async { .. }, .. }, + ref header, ref generics, ref body, - ) => { + ) if header.asyncness.node.is_async() => { return self.visit_async_fn( i.id, i.ident.name, @@ -168,12 +158,9 @@ impl<'a> visit::Visitor<'a> for DefCollector<'a> { self.with_parent(def, |this| { match i.node { ItemKind::Struct(ref struct_def, _) | ItemKind::Union(ref struct_def, _) => { - // If this is a tuple-like struct, register the constructor. - if !struct_def.is_struct() { - this.create_def(struct_def.id(), - DefPathData::StructCtor, - REGULAR_SPACE, - i.span); + // If this is a unit or tuple-like struct, register the constructor. + if let Some(ctor_hir_id) = struct_def.ctor_id() { + this.create_def(ctor_hir_id, DefPathData::Ctor, REGULAR_SPACE, i.span); } } _ => {} @@ -203,11 +190,16 @@ impl<'a> visit::Visitor<'a> for DefCollector<'a> { } fn visit_variant(&mut self, v: &'a Variant, g: &'a Generics, item_id: NodeId) { - let def = self.create_def(v.node.data.id(), + let def = self.create_def(v.node.id, DefPathData::EnumVariant(v.node.ident.as_interned_str()), REGULAR_SPACE, v.span); - self.with_parent(def, |this| visit::walk_variant(this, v, g, item_id)); + self.with_parent(def, |this| { + if let Some(ctor_hir_id) = v.node.data.ctor_id() { + this.create_def(ctor_hir_id, DefPathData::Ctor, REGULAR_SPACE, v.span); + } + visit::walk_variant(this, v, g, item_id) + }); } fn visit_variant_data(&mut self, data: &'a VariantData, _: Ident, @@ -228,6 +220,7 @@ impl<'a> visit::Visitor<'a> for DefCollector<'a> { let def_path_data = match param.kind { GenericParamKind::Lifetime { .. } => DefPathData::LifetimeParam(name), GenericParamKind::Type { .. } => DefPathData::TypeParam(name), + GenericParamKind::Const { .. } => DefPathData::ConstParam(name), }; self.create_def(param.id, def_path_data, REGULAR_SPACE, param.ident.span); @@ -251,9 +244,9 @@ impl<'a> visit::Visitor<'a> for DefCollector<'a> { fn visit_impl_item(&mut self, ii: &'a ImplItem) { let def_data = match ii.node { ImplItemKind::Method(MethodSig { - header: ref header @ FnHeader { asyncness: IsAsync::Async { .. }, .. }, + ref header, ref decl, - }, ref body) => { + }, ref body) if header.asyncness.node.is_async() => { return self.visit_async_fn( ii.id, ii.ident.name, @@ -348,7 +341,7 @@ impl<'a> visit::Visitor<'a> for DefCollector<'a> { fn visit_token(&mut self, t: Token) { if let Token::Interpolated(nt) = t { - if let token::NtExpr(ref expr) = nt.0 { + if let token::NtExpr(ref expr) = *nt { if let ExprKind::Mac(..) = expr.node { self.visit_macro_invoc(expr.id); } diff --git a/src/librustc/hir/map/definitions.rs b/src/librustc/hir/map/definitions.rs index d5031efae576b..1006d813e65ed 100644 --- a/src/librustc/hir/map/definitions.rs +++ b/src/librustc/hir/map/definitions.rs @@ -1,28 +1,18 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! For each definition, we track the following data. A definition -//! here is defined somewhat circularly as "something with a def-id", +//! For each definition, we track the following data. A definition +//! here is defined somewhat circularly as "something with a `DefId`", //! but it generally corresponds to things like structs, enums, etc. //! There are also some rather random cases (like const initializer //! expressions) that are mostly just leftovers. -use hir; -use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE, DefIndexAddressSpace, +use crate::hir; +use crate::hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE, DefIndexAddressSpace, CRATE_DEF_INDEX}; -use ich::Fingerprint; +use crate::ich::Fingerprint; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::indexed_vec::{IndexVec}; use rustc_data_structures::stable_hasher::StableHasher; use serialize::{Encodable, Decodable, Encoder, Decoder}; -use session::CrateDisambiguator; +use crate::session::CrateDisambiguator; use std::borrow::Borrow; use std::fmt::Write; use std::hash::Hash; @@ -30,7 +20,7 @@ use syntax::ast; use syntax::ext::hygiene::Mark; use syntax::symbol::{Symbol, InternedString}; use syntax_pos::{Span, DUMMY_SP}; -use util::nodemap::NodeMap; +use crate::util::nodemap::NodeMap; /// The DefPathTable maps DefIndexes to DefKeys and vice versa. /// Internally the DefPathTable holds a tree of DefKeys, where each DefKey @@ -173,10 +163,10 @@ pub struct Definitions { /// any) with a `DisambiguatedDefPathData`. #[derive(Clone, PartialEq, Debug, Hash, RustcEncodable, RustcDecodable)] pub struct DefKey { - /// Parent path. + /// The parent path. pub parent: Option, - /// Identifier of this node. + /// The identifier of this node. pub disambiguated_data: DisambiguatedDefPathData, } @@ -217,12 +207,12 @@ impl DefKey { } } -/// Pair of `DefPathData` and an integer disambiguator. The integer is +/// A pair of `DefPathData` and an integer disambiguator. The integer is /// normally 0, but in the event that there are multiple defs with the /// same `parent` and `data`, we use this field to disambiguate /// between them. This introduces some artificial ordering dependency /// but means that if you have (e.g.) two impls for the same type in -/// the same module, they do get distinct def-ids. +/// the same module, they do get distinct `DefId`s. #[derive(Clone, PartialEq, Debug, Hash, RustcEncodable, RustcDecodable)] pub struct DisambiguatedDefPathData { pub data: DefPathData, @@ -231,10 +221,10 @@ pub struct DisambiguatedDefPathData { #[derive(Clone, Debug, Hash, RustcEncodable, RustcDecodable)] pub struct DefPath { - /// the path leading from the crate root to the item + /// The path leading from the crate root to the item. pub data: Vec, - /// what krate root is this path relative to? + /// The crate root this path is relative to. pub krate: CrateNum, } @@ -270,9 +260,9 @@ impl DefPath { DefPath { data: data, krate: krate } } - /// Returns a string representation of the DefPath without + /// Returns a string representation of the `DefPath` without /// the crate-prefix. This method is useful if you don't have - /// a TyCtxt available. + /// a `TyCtxt` available. pub fn to_string_no_crate(&self) -> String { let mut s = String::with_capacity(self.data.len() * 16); @@ -287,7 +277,7 @@ impl DefPath { s } - /// Return filename friendly string of the DefPah with the + /// Returns a filename-friendly string for the `DefPath`, with the /// crate-prefix. pub fn to_string_friendly(&self, crate_imported_name: F) -> String where F: FnOnce(CrateNum) -> Symbol @@ -312,9 +302,9 @@ impl DefPath { s } - /// Return filename friendly string of the DefPah without + /// Returns a filename-friendly string of the `DefPath`, without /// the crate-prefix. This method is useful if you don't have - /// a TyCtxt available. + /// a `TyCtxt` available. pub fn to_filename_friendly_no_crate(&self) -> String { let mut s = String::with_capacity(self.data.len() * 16); @@ -366,16 +356,18 @@ pub enum DefPathData { /// A closure expression ClosureExpr, // Subportions of items - /// A type parameter (generic parameter) + /// A type (generic) parameter TypeParam(InternedString), - /// A lifetime definition + /// A lifetime (generic) parameter LifetimeParam(InternedString), + /// A const (generic) parameter + ConstParam(InternedString), /// A variant of a enum EnumVariant(InternedString), /// A struct field Field(InternedString), - /// Implicit ctor for a tuple-like struct - StructCtor, + /// Implicit ctor for a unit or tuple-like struct or enum variant. + Ctor, /// A constant expression (see {ast,hir}::AnonConst). AnonConst, /// An `impl Trait` type node @@ -383,7 +375,9 @@ pub enum DefPathData { /// GlobalMetaData identifies a piece of crate metadata that is global to /// a whole crate (as opposed to just one item). GlobalMetaData components /// are only supposed to show up right below the crate root. - GlobalMetaData(InternedString) + GlobalMetaData(InternedString), + /// A trait alias. + TraitAlias(InternedString), } #[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Debug, @@ -400,18 +394,18 @@ impl Borrow for DefPathHash { } impl Definitions { - /// Create new empty definition map. + /// Creates new empty definition map. /// - /// The DefIndex returned from a new Definitions are as follows: - /// 1. At DefIndexAddressSpace::Low, + /// The `DefIndex` returned from a new `Definitions` are as follows: + /// 1. At `DefIndexAddressSpace::Low`, /// CRATE_ROOT has index 0:0, and then new indexes are allocated in /// ascending order. - /// 2. At DefIndexAddressSpace::High, - /// the first FIRST_FREE_HIGH_DEF_INDEX indexes are reserved for - /// internal use, then 1:FIRST_FREE_HIGH_DEF_INDEX are allocated in + /// 2. At `DefIndexAddressSpace::High`, + /// the first `FIRST_FREE_HIGH_DEF_INDEX` indexes are reserved for + /// internal use, then `1:FIRST_FREE_HIGH_DEF_INDEX` are allocated in /// ascending order. - /// - /// FIXME: there is probably a better place to put this comment. + // + // FIXME: there is probably a better place to put this comment. pub fn new() -> Self { Self::default() } @@ -420,7 +414,7 @@ impl Definitions { &self.table } - /// Get the number of definitions. + /// Gets the number of definitions. pub fn def_index_counts_lo_hi(&self) -> (usize, usize) { (self.table.index_to_key[DefIndexAddressSpace::Low.index()].len(), self.table.index_to_key[DefIndexAddressSpace::High.index()].len()) @@ -475,6 +469,21 @@ impl Definitions { } } + // FIXME(@ljedrz): replace the NodeId variant + #[inline] + pub fn as_local_hir_id(&self, def_id: DefId) -> Option { + if def_id.krate == LOCAL_CRATE { + let hir_id = self.def_index_to_hir_id(def_id.index); + if hir_id != hir::DUMMY_HIR_ID { + Some(hir_id) + } else { + None + } + } else { + None + } + } + #[inline] pub fn node_to_hir_id(&self, node_id: ast::NodeId) -> hir::HirId { self.node_to_hir_id[node_id] @@ -488,8 +497,8 @@ impl Definitions { self.node_to_hir_id[node_id] } - /// Retrieve the span of the given `DefId` if `DefId` is in the local crate, the span exists and - /// it's not DUMMY_SP + /// Retrieves the span of the given `DefId` if `DefId` is in the local crate, the span exists + /// and it's not `DUMMY_SP`. #[inline] pub fn opt_span(&self, def_id: DefId) -> Option { if def_id.krate == LOCAL_CRATE { @@ -499,7 +508,7 @@ impl Definitions { } } - /// Add a definition with a parent definition. + /// Adds a root definition (no parent). pub fn create_root_def(&mut self, crate_name: &str, crate_disambiguator: CrateDisambiguator) @@ -597,7 +606,7 @@ impl Definitions { index } - /// Initialize the ast::NodeId to HirId mapping once it has been generated during + /// Initialize the `ast::NodeId` to `HirId` mapping once it has been generated during /// AST to HIR lowering. pub fn init_node_id_to_hir_id_mapping(&mut self, mapping: IndexVec) { @@ -625,6 +634,7 @@ impl DefPathData { match *self { TypeNs(name) | Trait(name) | + TraitAlias(name) | AssocTypeInTrait(name) | AssocTypeInImpl(name) | AssocExistentialInImpl(name) | @@ -633,6 +643,7 @@ impl DefPathData { MacroDef(name) | TypeParam(name) | LifetimeParam(name) | + ConstParam(name) | EnumVariant(name) | Field(name) | GlobalMetaData(name) => Some(name), @@ -641,7 +652,7 @@ impl DefPathData { CrateRoot | Misc | ClosureExpr | - StructCtor | + Ctor | AnonConst | ImplTrait => None } @@ -652,6 +663,7 @@ impl DefPathData { let s = match *self { TypeNs(name) | Trait(name) | + TraitAlias(name) | AssocTypeInTrait(name) | AssocTypeInImpl(name) | AssocExistentialInImpl(name) | @@ -660,19 +672,20 @@ impl DefPathData { MacroDef(name) | TypeParam(name) | LifetimeParam(name) | + ConstParam(name) | EnumVariant(name) | Field(name) | GlobalMetaData(name) => { return name } // note that this does not show up in user printouts - CrateRoot => "{{root}}", + CrateRoot => "{{crate}}", Impl => "{{impl}}", - Misc => "{{?}}", + Misc => "{{misc}}", ClosureExpr => "{{closure}}", - StructCtor => "{{constructor}}", + Ctor => "{{constructor}}", AnonConst => "{{constant}}", - ImplTrait => "{{impl-Trait}}", + ImplTrait => "{{opaque}}", }; Symbol::intern(s).as_interned_str() diff --git a/src/librustc/hir/map/hir_id_validator.rs b/src/librustc/hir/map/hir_id_validator.rs index 58d1a780f129c..fafe671b9eb8b 100644 --- a/src/librustc/hir/map/hir_id_validator.rs +++ b/src/librustc/hir/map/hir_id_validator.rs @@ -1,31 +1,26 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir::def_id::{DefId, DefIndex, CRATE_DEF_INDEX}; -use hir::{self, intravisit, HirId, ItemLocalId}; +use crate::hir::def_id::{DefId, DefIndex, CRATE_DEF_INDEX}; +use crate::hir::{self, intravisit, HirId, ItemLocalId}; use syntax::ast::NodeId; -use hir::itemlikevisit::ItemLikeVisitor; -use rustc_data_structures::fx::FxHashMap; +use crate::hir::itemlikevisit::ItemLikeVisitor; +use rustc_data_structures::fx::FxHashSet; +use rustc_data_structures::sync::{Lock, ParallelIterator, par_iter}; pub fn check_crate<'hir>(hir_map: &hir::map::Map<'hir>) { - let mut outer_visitor = OuterVisitor { - hir_map, - errors: vec![], - }; - hir_map.dep_graph.assert_ignored(); - hir_map.krate().visit_all_item_likes(&mut outer_visitor); - if !outer_visitor.errors.is_empty() { - let message = outer_visitor - .errors + let errors = Lock::new(Vec::new()); + + par_iter(&hir_map.krate().modules).for_each(|(module_id, _)| { + hir_map.visit_item_likes_in_module(hir_map.local_def_id(*module_id), &mut OuterVisitor { + hir_map, + errors: &errors, + }); + }); + + let errors = errors.into_inner(); + + if !errors.is_empty() { + let message = errors .iter() .fold(String::new(), |s1, s2| s1 + "\n" + s2); bug!("{}", message); @@ -35,13 +30,13 @@ pub fn check_crate<'hir>(hir_map: &hir::map::Map<'hir>) { struct HirIdValidator<'a, 'hir: 'a> { hir_map: &'a hir::map::Map<'hir>, owner_def_index: Option, - hir_ids_seen: FxHashMap, - errors: Vec, + hir_ids_seen: FxHashSet, + errors: &'a Lock>, } struct OuterVisitor<'a, 'hir: 'a> { hir_map: &'a hir::map::Map<'hir>, - errors: Vec, + errors: &'a Lock>, } impl<'a, 'hir: 'a> OuterVisitor<'a, 'hir> { @@ -52,7 +47,7 @@ impl<'a, 'hir: 'a> OuterVisitor<'a, 'hir> { hir_map, owner_def_index: None, hir_ids_seen: Default::default(), - errors: Vec::new(), + errors: self.errors, } } } @@ -60,30 +55,32 @@ impl<'a, 'hir: 'a> OuterVisitor<'a, 'hir> { impl<'a, 'hir: 'a> ItemLikeVisitor<'hir> for OuterVisitor<'a, 'hir> { fn visit_item(&mut self, i: &'hir hir::Item) { let mut inner_visitor = self.new_inner_visitor(self.hir_map); - inner_visitor.check(i.id, |this| intravisit::walk_item(this, i)); - self.errors.extend(inner_visitor.errors.drain(..)); + inner_visitor.check(i.hir_id, |this| intravisit::walk_item(this, i)); } fn visit_trait_item(&mut self, i: &'hir hir::TraitItem) { let mut inner_visitor = self.new_inner_visitor(self.hir_map); - inner_visitor.check(i.id, |this| intravisit::walk_trait_item(this, i)); - self.errors.extend(inner_visitor.errors.drain(..)); + inner_visitor.check(i.hir_id, |this| intravisit::walk_trait_item(this, i)); } fn visit_impl_item(&mut self, i: &'hir hir::ImplItem) { let mut inner_visitor = self.new_inner_visitor(self.hir_map); - inner_visitor.check(i.id, |this| intravisit::walk_impl_item(this, i)); - self.errors.extend(inner_visitor.errors.drain(..)); + inner_visitor.check(i.hir_id, |this| intravisit::walk_impl_item(this, i)); } } impl<'a, 'hir: 'a> HirIdValidator<'a, 'hir> { + #[cold] + #[inline(never)] + fn error(&self, f: impl FnOnce() -> String) { + self.errors.lock().push(f()); + } fn check)>(&mut self, - node_id: NodeId, + hir_id: HirId, walk: F) { assert!(self.owner_def_index.is_none()); - let owner_def_index = self.hir_map.local_def_id(node_id).index; + let owner_def_index = self.hir_map.local_def_id_from_hir_id(hir_id).index; self.owner_def_index = Some(owner_def_index); walk(self); @@ -93,7 +90,7 @@ impl<'a, 'hir: 'a> HirIdValidator<'a, 'hir> { // There's always at least one entry for the owning item itself let max = self.hir_ids_seen - .keys() + .iter() .map(|local_id| local_id.as_usize()) .max() .expect("owning item has no entry"); @@ -101,7 +98,7 @@ impl<'a, 'hir: 'a> HirIdValidator<'a, 'hir> { if max != self.hir_ids_seen.len() - 1 { // Collect the missing ItemLocalIds let missing: Vec<_> = (0 ..= max as u32) - .filter(|&i| !self.hir_ids_seen.contains_key(&ItemLocalId::from_u32(i))) + .filter(|&i| !self.hir_ids_seen.contains(&ItemLocalId::from_u32(i))) .collect(); // Try to map those to something more useful @@ -129,15 +126,19 @@ impl<'a, 'hir: 'a> HirIdValidator<'a, 'hir> { local_id, self.hir_map.node_to_string(node_id))); } - self.errors.push(format!( + self.error(|| format!( "ItemLocalIds not assigned densely in {}. \ Max ItemLocalId = {}, missing IDs = {:?}; seens IDs = {:?}", self.hir_map.def_path(DefId::local(owner_def_index)).to_string_no_crate(), max, missing_items, self.hir_ids_seen - .values() - .map(|n| format!("({:?} {})", n, self.hir_map.node_to_string(*n))) + .iter() + .map(|&local_id| HirId { + owner: owner_def_index, + local_id, + }) + .map(|h| format!("({:?} {})", h, self.hir_map.hir_to_string(h))) .collect::>())); } } @@ -150,35 +151,24 @@ impl<'a, 'hir: 'a> intravisit::Visitor<'hir> for HirIdValidator<'a, 'hir> { intravisit::NestedVisitorMap::OnlyBodies(self.hir_map) } - fn visit_id(&mut self, node_id: NodeId) { + fn visit_id(&mut self, hir_id: HirId) { let owner = self.owner_def_index.expect("no owner_def_index"); - let stable_id = self.hir_map.definitions().node_to_hir_id[node_id]; - if stable_id == hir::DUMMY_HIR_ID { - self.errors.push(format!("HirIdValidator: No HirId assigned for NodeId {}: {:?}", - node_id, - self.hir_map.node_to_string(node_id))); + if hir_id == hir::DUMMY_HIR_ID { + self.error(|| format!("HirIdValidator: HirId {:?} is invalid", + self.hir_map.hir_to_string(hir_id))); return; } - if owner != stable_id.owner { - self.errors.push(format!( + if owner != hir_id.owner { + self.error(|| format!( "HirIdValidator: The recorded owner of {} is {} instead of {}", - self.hir_map.node_to_string(node_id), - self.hir_map.def_path(DefId::local(stable_id.owner)).to_string_no_crate(), + self.hir_map.hir_to_string(hir_id), + self.hir_map.def_path(DefId::local(hir_id.owner)).to_string_no_crate(), self.hir_map.def_path(DefId::local(owner)).to_string_no_crate())); } - if let Some(prev) = self.hir_ids_seen.insert(stable_id.local_id, node_id) { - if prev != node_id { - self.errors.push(format!( - "HirIdValidator: Same HirId {}/{} assigned for nodes {} and {}", - self.hir_map.def_path(DefId::local(stable_id.owner)).to_string_no_crate(), - stable_id.local_id.as_usize(), - self.hir_map.node_to_string(prev), - self.hir_map.node_to_string(node_id))); - } - } + self.hir_ids_seen.insert(hir_id.local_id); } fn visit_impl_item_ref(&mut self, _: &'hir hir::ImplItemRef) { diff --git a/src/librustc/hir/map/mod.rs b/src/librustc/hir/map/mod.rs index b98e279aef4b9..c0579ef0f7a96 100644 --- a/src/librustc/hir/map/mod.rs +++ b/src/librustc/hir/map/mod.rs @@ -1,23 +1,13 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use self::collector::NodeCollector; pub use self::def_collector::{DefCollector, MacroInvocationData}; pub use self::definitions::{Definitions, DefKey, DefPath, DefPathData, DisambiguatedDefPathData, DefPathHash}; -use dep_graph::{DepGraph, DepNode, DepKind, DepNodeIndex}; +use crate::dep_graph::{DepGraph, DepNode, DepKind, DepNodeIndex}; -use hir::def_id::{CRATE_DEF_INDEX, DefId, LocalDefId, DefIndexAddressSpace}; +use crate::hir::def_id::{CRATE_DEF_INDEX, DefId, LocalDefId, DefIndexAddressSpace}; -use middle::cstore::CrateStore; +use crate::middle::cstore::CrateStoreDyn; use rustc_target::spec::abi::Abi; use rustc_data_structures::svh::Svh; @@ -26,13 +16,15 @@ use syntax::source_map::Spanned; use syntax::ext::base::MacroKind; use syntax_pos::{Span, DUMMY_SP}; -use hir::*; -use hir::print::Nested; -use util::nodemap::FxHashMap; +use crate::hir::*; +use crate::hir::itemlikevisit::ItemLikeVisitor; +use crate::hir::print::Nested; +use crate::util::nodemap::FxHashMap; +use crate::util::common::time; use std::io; use std::result::Result::Err; -use ty::TyCtxt; +use crate::ty::TyCtxt; pub mod blocks; mod collector; @@ -43,10 +35,11 @@ mod hir_id_validator; pub const ITEM_LIKE_SPACE: DefIndexAddressSpace = DefIndexAddressSpace::Low; pub const REGULAR_SPACE: DefIndexAddressSpace = DefIndexAddressSpace::High; -/// Represents an entry and its parent NodeId. +/// Represents an entry and its parent `NodeId`. #[derive(Copy, Clone, Debug)] pub struct Entry<'hir> { parent: NodeId, + parent_hir: HirId, dep_node: DepNodeIndex, node: Node<'hir>, } @@ -133,9 +126,9 @@ impl<'hir> Entry<'hir> { } } - fn is_body_owner(self, node_id: NodeId) -> bool { + fn is_body_owner(self, hir_id: HirId) -> bool { match self.associated_body() { - Some(b) => b.node_id == node_id, + Some(b) => b.hir_id == hir_id, None => false, } } @@ -159,10 +152,16 @@ impl Forest { self.dep_graph.read(DepNode::new_no_params(DepKind::Krate)); &self.krate } + + /// This is used internally in the dependency tracking system. + /// Use the `krate` method to ensure your dependency on the + /// crate is tracked. + pub fn untracked_krate<'hir>(&'hir self) -> &'hir Crate { + &self.krate + } } -/// Represents a mapping from Node IDs to AST elements and their parent -/// Node IDs +/// Represents a mapping from `NodeId`s to AST elements and their parent `NodeId`s. #[derive(Clone)] pub struct Map<'hir> { /// The backing storage for all the AST nodes. @@ -208,6 +207,12 @@ impl<'hir> Map<'hir> { } } + // FIXME(@ljedrz): replace the NodeId variant + pub fn read_by_hir_id(&self, hir_id: HirId) { + let node_id = self.hir_to_node_id(hir_id); + self.read(node_id); + } + #[inline] pub fn definitions(&self) -> &'hir Definitions { self.definitions @@ -224,6 +229,11 @@ impl<'hir> Map<'hir> { }) } + // FIXME(@ljedrz): replace the NodeId variant + pub fn def_path_from_hir_id(&self, id: HirId) -> DefPath { + self.def_path(self.local_def_id_from_hir_id(id)) + } + pub fn def_path(&self, def_id: DefId) -> DefPath { assert!(def_id.is_local()); self.definitions.def_path(def_id.index) @@ -237,6 +247,23 @@ impl<'hir> Map<'hir> { }) } + // FIXME(@ljedrz): replace the NodeId variant + #[inline] + pub fn local_def_id_from_hir_id(&self, hir_id: HirId) -> DefId { + let node_id = self.hir_to_node_id(hir_id); + self.opt_local_def_id(node_id).unwrap_or_else(|| { + bug!("local_def_id_from_hir_id: no entry for `{:?}`, which has a map of `{:?}`", + hir_id, self.find_entry(node_id)) + }) + } + + // FIXME(@ljedrz): replace the NodeId variant + #[inline] + pub fn opt_local_def_id_from_hir_id(&self, hir_id: HirId) -> Option { + let node_id = self.hir_to_node_id(hir_id); + self.definitions.opt_local_def_id(node_id) + } + #[inline] pub fn opt_local_def_id(&self, node: NodeId) -> Option { self.definitions.opt_local_def_id(node) @@ -247,6 +274,12 @@ impl<'hir> Map<'hir> { self.definitions.as_local_node_id(def_id) } + // FIXME(@ljedrz): replace the NodeId variant + #[inline] + pub fn as_local_hir_id(&self, def_id: DefId) -> Option { + self.definitions.as_local_hir_id(def_id) + } + #[inline] pub fn hir_to_node_id(&self, hir_id: HirId) -> NodeId { self.hir_to_node_id[&hir_id] @@ -286,7 +319,7 @@ impl<'hir> Map<'hir> { match node { Node::Item(item) => { - let def_id = || self.local_def_id(item.id); + let def_id = || self.local_def_id_from_hir_id(item.hir_id); match item.node { ItemKind::Static(_, m, _) => Some(Def::Static(def_id(), m == MutMutable)), @@ -308,7 +341,7 @@ impl<'hir> Map<'hir> { } } Node::ForeignItem(item) => { - let def_id = self.local_def_id(item.id); + let def_id = self.local_def_id_from_hir_id(item.hir_id); match item.node { ForeignItemKind::Fn(..) => Some(Def::Fn(def_id)), ForeignItemKind::Static(_, m) => Some(Def::Static(def_id, m)), @@ -316,7 +349,7 @@ impl<'hir> Map<'hir> { } } Node::TraitItem(item) => { - let def_id = self.local_def_id(item.id); + let def_id = self.local_def_id_from_hir_id(item.hir_id); match item.node { TraitItemKind::Const(..) => Some(Def::AssociatedConst(def_id)), TraitItemKind::Method(..) => Some(Def::Method(def_id)), @@ -324,7 +357,7 @@ impl<'hir> Map<'hir> { } } Node::ImplItem(item) => { - let def_id = self.local_def_id(item.id); + let def_id = self.local_def_id_from_hir_id(item.hir_id); match item.node { ImplItemKind::Const(..) => Some(Def::AssociatedConst(def_id)), ImplItemKind::Method(..) => Some(Def::Method(def_id)), @@ -333,11 +366,21 @@ impl<'hir> Map<'hir> { } } Node::Variant(variant) => { - let def_id = self.local_def_id(variant.node.data.id()); + let def_id = self.local_def_id_from_hir_id(variant.node.id); Some(Def::Variant(def_id)) } - Node::Field(_) | + Node::Ctor(variant_data) => { + let ctor_of = match self.find(self.get_parent_node(node_id)) { + Some(Node::Item(..)) => def::CtorOf::Struct, + Some(Node::Variant(..)) => def::CtorOf::Variant, + _ => unreachable!(), + }; + variant_data.ctor_hir_id() + .map(|hir_id| self.local_def_id_from_hir_id(hir_id)) + .map(|def_id| Def::Ctor(def_id, ctor_of, def::CtorKind::from_hir(variant_data))) + } Node::AnonConst(_) | + Node::Field(_) | Node::Expr(_) | Node::Stmt(_) | Node::PathSegment(_) | @@ -345,27 +388,38 @@ impl<'hir> Map<'hir> { Node::TraitRef(_) | Node::Pat(_) | Node::Binding(_) | - Node::StructCtor(_) | Node::Lifetime(_) | Node::Visibility(_) | Node::Block(_) | Node::Crate => None, Node::Local(local) => { - Some(Def::Local(local.id)) + Some(Def::Local(self.hir_to_node_id(local.hir_id))) } Node::MacroDef(macro_def) => { - Some(Def::Macro(self.local_def_id(macro_def.id), + Some(Def::Macro(self.local_def_id_from_hir_id(macro_def.hir_id), MacroKind::Bang)) } Node::GenericParam(param) => { Some(match param.kind { - GenericParamKind::Lifetime { .. } => Def::Local(param.id), - GenericParamKind::Type { .. } => Def::TyParam(self.local_def_id(param.id)), + GenericParamKind::Lifetime { .. } => { + let node_id = self.hir_to_node_id(param.hir_id); + Def::Local(node_id) + }, + GenericParamKind::Type { .. } => Def::TyParam( + self.local_def_id_from_hir_id(param.hir_id)), + GenericParamKind::Const { .. } => Def::ConstParam( + self.local_def_id_from_hir_id(param.hir_id)), }) } } } + // FIXME(@ljedrz): replace the NodeId variant + pub fn describe_def_by_hir_id(&self, hir_id: HirId) -> Option { + let node_id = self.hir_to_node_id(hir_id); + self.describe_def(node_id) + } + fn entry_count(&self) -> usize { self.map.len() } @@ -379,7 +433,7 @@ impl<'hir> Map<'hir> { } pub fn trait_item(&self, id: TraitItemId) -> &'hir TraitItem { - self.read(id.node_id); + self.read_by_hir_id(id.hir_id); // N.B., intentionally bypass `self.forest.krate()` so that we // do not trigger a read of the whole krate here @@ -387,7 +441,7 @@ impl<'hir> Map<'hir> { } pub fn impl_item(&self, id: ImplItemId) -> &'hir ImplItem { - self.read(id.node_id); + self.read_by_hir_id(id.hir_id); // N.B., intentionally bypass `self.forest.krate()` so that we // do not trigger a read of the whole krate here @@ -395,7 +449,7 @@ impl<'hir> Map<'hir> { } pub fn body(&self, id: BodyId) -> &'hir Body { - self.read(id.node_id); + self.read_by_hir_id(id.hir_id); // N.B., intentionally bypass `self.forest.krate()` so that we // do not trigger a read of the whole krate here @@ -410,12 +464,19 @@ impl<'hir> Map<'hir> { } } + // FIXME(@ljedrz): replace the NodeId variant + pub fn fn_decl_by_hir_id(&self, hir_id: HirId) -> Option { + let node_id = self.hir_to_node_id(hir_id); + self.fn_decl(node_id) + } + /// Returns the `NodeId` that corresponds to the definition of /// which this is the body of, i.e., a `fn`, `const` or `static` /// item (possibly associated), a closure, or a `hir::AnonConst`. - pub fn body_owner(&self, BodyId { node_id }: BodyId) -> NodeId { + pub fn body_owner(&self, BodyId { hir_id }: BodyId) -> NodeId { + let node_id = self.hir_to_node_id(hir_id); let parent = self.get_parent_node(node_id); - assert!(self.map[parent.as_usize()].map_or(false, |e| e.is_body_owner(node_id))); + assert!(self.map[parent.as_usize()].map_or(false, |e| e.is_body_owner(hir_id))); parent } @@ -423,7 +484,7 @@ impl<'hir> Map<'hir> { self.local_def_id(self.body_owner(id)) } - /// Given a node id, returns the `BodyId` associated with it, + /// Given a `NodeId`, returns the `BodyId` associated with it, /// if the node is a body owner, otherwise returns `None`. pub fn maybe_body_owned_by(&self, id: NodeId) -> Option { if let Some(entry) = self.find_entry(id) { @@ -439,11 +500,17 @@ impl<'hir> Map<'hir> { } } + // FIXME(@ljedrz): replace the NodeId variant + pub fn maybe_body_owned_by_by_hir_id(&self, id: HirId) -> Option { + let node_id = self.hir_to_node_id(id); + self.maybe_body_owned_by(node_id) + } + /// Given a body owner's id, returns the `BodyId` associated with it. - pub fn body_owned_by(&self, id: NodeId) -> BodyId { - self.maybe_body_owned_by(id).unwrap_or_else(|| { - span_bug!(self.span(id), "body_owned_by: {} has no associated body", - self.node_to_string(id)); + pub fn body_owned_by(&self, id: HirId) -> BodyId { + self.maybe_body_owned_by_by_hir_id(id).unwrap_or_else(|| { + span_bug!(self.span_by_hir_id(id), "body_owned_by: {} has no associated body", + self.hir_to_string(id)); }) } @@ -455,31 +522,47 @@ impl<'hir> Map<'hir> { Node::AnonConst(_) => { BodyOwnerKind::Const } + Node::Ctor(..) | + Node::Item(&Item { node: ItemKind::Fn(..), .. }) | + Node::TraitItem(&TraitItem { node: TraitItemKind::Method(..), .. }) | + Node::ImplItem(&ImplItem { node: ImplItemKind::Method(..), .. }) => { + BodyOwnerKind::Fn + } Node::Item(&Item { node: ItemKind::Static(_, m, _), .. }) => { BodyOwnerKind::Static(m) } - // Default to function if it's not a constant or static. - _ => BodyOwnerKind::Fn + Node::Expr(&Expr { node: ExprKind::Closure(..), .. }) => { + BodyOwnerKind::Closure + } + node => bug!("{:#?} is not a body node", node), } } - pub fn ty_param_owner(&self, id: NodeId) -> NodeId { - match self.get(id) { - Node::Item(&Item { node: ItemKind::Trait(..), .. }) => id, - Node::GenericParam(_) => self.get_parent_node(id), - _ => bug!("ty_param_owner: {} not a type parameter", self.node_to_string(id)) + // FIXME(@ljedrz): replace the NodeId variant + pub fn body_owner_kind_by_hir_id(&self, id: HirId) -> BodyOwnerKind { + let node_id = self.hir_to_node_id(id); + self.body_owner_kind(node_id) + } + + pub fn ty_param_owner(&self, id: HirId) -> HirId { + match self.get_by_hir_id(id) { + Node::Item(&Item { node: ItemKind::Trait(..), .. }) | + Node::Item(&Item { node: ItemKind::TraitAlias(..), .. }) => id, + Node::GenericParam(_) => self.get_parent_node_by_hir_id(id), + _ => bug!("ty_param_owner: {} not a type parameter", self.hir_to_string(id)) } } - pub fn ty_param_name(&self, id: NodeId) -> Name { - match self.get(id) { - Node::Item(&Item { node: ItemKind::Trait(..), .. }) => keywords::SelfUpper.name(), + pub fn ty_param_name(&self, id: HirId) -> Name { + match self.get_by_hir_id(id) { + Node::Item(&Item { node: ItemKind::Trait(..), .. }) | + Node::Item(&Item { node: ItemKind::TraitAlias(..), .. }) => keywords::SelfUpper.name(), Node::GenericParam(param) => param.name.ident().name, - _ => bug!("ty_param_name: {} not a type parameter", self.node_to_string(id)), + _ => bug!("ty_param_name: {} not a type parameter", self.hir_to_string(id)), } } - pub fn trait_impls(&self, trait_did: DefId) -> &'hir [NodeId] { + pub fn trait_impls(&self, trait_did: DefId) -> &'hir [HirId] { self.dep_graph.read(DepNode::new_no_params(DepKind::AllLocalTraitImpls)); // N.B., intentionally bypass `self.forest.krate()` so that we @@ -487,19 +570,7 @@ impl<'hir> Map<'hir> { self.forest.krate.trait_impls.get(&trait_did).map_or(&[], |xs| &xs[..]) } - pub fn trait_auto_impl(&self, trait_did: DefId) -> Option { - self.dep_graph.read(DepNode::new_no_params(DepKind::AllLocalTraitImpls)); - - // N.B., intentionally bypass `self.forest.krate()` so that we - // do not trigger a read of the whole krate here - self.forest.krate.trait_auto_impl.get(&trait_did).cloned() - } - - pub fn trait_is_auto(&self, trait_did: DefId) -> bool { - self.trait_auto_impl(trait_did).is_some() - } - - /// Get the attributes on the krate. This is preferable to + /// Gets the attributes on the crate. This is preferable to /// invoking `krate.attrs` because it registers a tighter /// dep-graph access. pub fn krate_attrs(&self) -> &'hir [ast::Attribute] { @@ -509,6 +580,47 @@ impl<'hir> Map<'hir> { &self.forest.krate.attrs } + pub fn get_module(&self, module: DefId) -> (&'hir Mod, Span, HirId) { + let node_id = self.as_local_node_id(module).unwrap(); + let hir_id = self.node_to_hir_id(node_id); + self.read(node_id); + match self.find_entry(node_id).unwrap().node { + Node::Item(&Item { + span, + node: ItemKind::Mod(ref m), + .. + }) => (m, span, hir_id), + Node::Crate => (&self.forest.krate.module, self.forest.krate.span, hir_id), + _ => panic!("not a module") + } + } + + pub fn visit_item_likes_in_module(&self, module: DefId, visitor: &mut V) + where V: ItemLikeVisitor<'hir> + { + let node_id = self.as_local_node_id(module).unwrap(); + + // Read the module so we'll be re-executed if new items + // appear immediately under in the module. If some new item appears + // in some nested item in the module, we'll be re-executed due to reads + // in the expect_* calls the loops below + self.read(node_id); + + let module = &self.forest.krate.modules[&node_id]; + + for id in &module.items { + visitor.visit_item(self.expect_item_by_hir_id(*id)); + } + + for id in &module.trait_items { + visitor.visit_trait_item(self.expect_trait_item(id.hir_id)); + } + + for id in &module.impl_items { + visitor.visit_impl_item(self.expect_impl_item(id.hir_id)); + } + } + /// Retrieve the Node corresponding to `id`, panicking if it cannot /// be found. pub fn get(&self, id: NodeId) -> Node<'hir> { @@ -516,6 +628,12 @@ impl<'hir> Map<'hir> { self.find(id).unwrap_or_else(|| bug!("couldn't find node id {} in the AST map", id)) } + // FIXME(@ljedrz): replace the NodeId variant + pub fn get_by_hir_id(&self, id: HirId) -> Node<'hir> { + let node_id = self.hir_to_node_id(id); + self.get(node_id) + } + pub fn get_if_local(&self, id: DefId) -> Option> { self.as_local_node_id(id).map(|id| self.get(id)) // read recorded by `get` } @@ -547,8 +665,7 @@ impl<'hir> Map<'hir> { self.get_generics(id).map(|generics| generics.span).filter(|sp| *sp != DUMMY_SP) } - /// Retrieve the Node corresponding to `id`, returning None if - /// cannot be found. + /// Retrieves the `Node` corresponding to `id`, returning `None` if cannot be found. pub fn find(&self, id: NodeId) -> Option> { let result = self.find_entry(id).and_then(|entry| { if let Node::Crate = entry.node { @@ -563,6 +680,12 @@ impl<'hir> Map<'hir> { result } + // FIXME(@ljedrz): replace the NodeId variant + pub fn find_by_hir_id(&self, hir_id: HirId) -> Option> { + let node_id = self.hir_to_node_id(hir_id); + self.find(node_id) + } + /// Similar to `get_parent`; returns the parent node-id, or own `id` if there is /// no parent. Note that the parent may be `CRATE_NODE_ID`, which is not itself /// present in the map -- so passing the return value of get_parent_node to @@ -571,8 +694,8 @@ impl<'hir> Map<'hir> { /// returns the enclosing item. Note that this might not be the actual parent /// node in the AST - some kinds of nodes are not in the map and these will /// never appear as the parent_node. So you can always walk the `parent_nodes` - /// from a node to the root of the ast (unless you get the same id back here - /// that can happen if the id is not in the map itself or is just weird). + /// from a node to the root of the ast (unless you get the same ID back here + /// that can happen if the ID is not in the map itself or is just weird). pub fn get_parent_node(&self, id: NodeId) -> NodeId { if self.dep_graph.is_fully_enabled() { let hir_id_owner = self.node_to_hir_id(id).owner; @@ -583,6 +706,13 @@ impl<'hir> Map<'hir> { self.find_entry(id).and_then(|x| x.parent_node()).unwrap_or(id) } + // FIXME(@ljedrz): replace the NodeId variant + pub fn get_parent_node_by_hir_id(&self, id: HirId) -> HirId { + let node_id = self.hir_to_node_id(id); + let parent_node_id = self.get_parent_node(node_id); + self.node_to_hir_id(parent_node_id) + } + /// Check if the node is an argument. An argument is a local variable whose /// immediate parent is an item or a closure. pub fn is_argument(&self, id: NodeId) -> bool { @@ -606,7 +736,7 @@ impl<'hir> Map<'hir> { /// If there is some error when walking the parents (e.g., a node does not /// have a parent in the map or a node can't be found), then we return the - /// last good node id we found. Note that reaching the crate root (`id == 0`), + /// last good `NodeId` we found. Note that reaching the crate root (`id == 0`), /// is not an error, since items in the crate module have the crate root as /// parent. fn walk_parent_nodes(&self, @@ -642,7 +772,7 @@ impl<'hir> Map<'hir> { } } - /// Retrieve the `NodeId` for `id`'s enclosing method, unless there's a + /// Retrieves the `NodeId` for `id`'s enclosing method, unless there's a /// `while` or `loop` before reaching it, as block tail returns are not /// available in them. /// @@ -664,12 +794,13 @@ impl<'hir> Map<'hir> { /// false /// } /// ``` - pub fn get_return_block(&self, id: NodeId) -> Option { + pub fn get_return_block(&self, id: HirId) -> Option { let match_fn = |node: &Node<'_>| { match *node { Node::Item(_) | Node::ForeignItem(_) | Node::TraitItem(_) | + Node::Expr(Expr { node: ExprKind::Closure(..), ..}) | Node::ImplItem(_) => true, _ => false, } @@ -678,7 +809,7 @@ impl<'hir> Map<'hir> { match *node { Node::Expr(ref expr) => { match expr.node { - ExprKind::While(..) | ExprKind::Loop(..) => true, + ExprKind::While(..) | ExprKind::Loop(..) | ExprKind::Ret(..) => true, _ => false, } } @@ -686,10 +817,13 @@ impl<'hir> Map<'hir> { } }; - self.walk_parent_nodes(id, match_fn, match_non_returning_block).ok() + let node_id = self.hir_to_node_id(id); + self.walk_parent_nodes(node_id, match_fn, match_non_returning_block) + .ok() + .map(|return_node_id| self.node_to_hir_id(return_node_id)) } - /// Retrieve the `NodeId` for `id`'s parent item, or `id` itself if no + /// Retrieves the `NodeId` for `id`'s parent item, or `id` itself if no /// parent item is in this map. The "parent item" is the closest parent node /// in the HIR which is recorded by the map and is an item, either an item /// in a module, trait, or impl. @@ -706,12 +840,25 @@ impl<'hir> Map<'hir> { } } + // FIXME(@ljedrz): replace the NodeId variant + pub fn get_parent_item(&self, id: HirId) -> HirId { + let node_id = self.hir_to_node_id(id); + let parent_node_id = self.get_parent(node_id); + self.node_to_hir_id(parent_node_id) + } + /// Returns the `DefId` of `id`'s nearest module parent, or `id` itself if no /// module parent is in this map. pub fn get_module_parent(&self, id: NodeId) -> DefId { self.local_def_id(self.get_module_parent_node(id)) } + // FIXME(@ljedrz): replace the NodeId variant + pub fn get_module_parent_by_hir_id(&self, id: HirId) -> DefId { + let node_id = self.hir_to_node_id(id); + self.get_module_parent(node_id) + } + /// Returns the `NodeId` of `id`'s nearest module parent, or `id` itself if no /// module parent is in this map. pub fn get_module_parent_node(&self, id: NodeId) -> NodeId { @@ -743,6 +890,12 @@ impl<'hir> Map<'hir> { self.local_def_id(self.get_parent(id)) } + // FIXME(@ljedrz): replace the NodeId variant + pub fn get_parent_did_by_hir_id(&self, id: HirId) -> DefId { + let node_id = self.hir_to_node_id(id); + self.get_parent_did(node_id) + } + pub fn get_foreign_abi(&self, id: NodeId) -> Abi { let parent = self.get_parent(id); if let Some(entry) = self.find_entry(parent) { @@ -756,6 +909,12 @@ impl<'hir> Map<'hir> { bug!("expected foreign mod or inlined parent, found {}", self.node_to_string(parent)) } + // FIXME(@ljedrz): replace the NodeId variant + pub fn get_foreign_abi_by_hir_id(&self, id: HirId) -> Abi { + let node_id = self.hir_to_node_id(id); + self.get_foreign_abi(node_id) + } + pub fn expect_item(&self, id: NodeId) -> &'hir Item { match self.find(id) { // read recorded by `find` Some(Node::Item(item)) => item, @@ -763,46 +922,54 @@ impl<'hir> Map<'hir> { } } - pub fn expect_impl_item(&self, id: NodeId) -> &'hir ImplItem { - match self.find(id) { + // FIXME(@ljedrz): replace the NodeId variant + pub fn expect_item_by_hir_id(&self, id: HirId) -> &'hir Item { + match self.find_by_hir_id(id) { // read recorded by `find` + Some(Node::Item(item)) => item, + _ => bug!("expected item, found {}", self.hir_to_string(id)) + } + } + + pub fn expect_impl_item(&self, id: HirId) -> &'hir ImplItem { + match self.find_by_hir_id(id) { Some(Node::ImplItem(item)) => item, - _ => bug!("expected impl item, found {}", self.node_to_string(id)) + _ => bug!("expected impl item, found {}", self.hir_to_string(id)) } } - pub fn expect_trait_item(&self, id: NodeId) -> &'hir TraitItem { - match self.find(id) { + pub fn expect_trait_item(&self, id: HirId) -> &'hir TraitItem { + match self.find_by_hir_id(id) { Some(Node::TraitItem(item)) => item, - _ => bug!("expected trait item, found {}", self.node_to_string(id)) + _ => bug!("expected trait item, found {}", self.hir_to_string(id)) } } - pub fn expect_variant_data(&self, id: NodeId) -> &'hir VariantData { - match self.find(id) { + pub fn expect_variant_data(&self, id: HirId) -> &'hir VariantData { + match self.find_by_hir_id(id) { Some(Node::Item(i)) => { match i.node { ItemKind::Struct(ref struct_def, _) | ItemKind::Union(ref struct_def, _) => struct_def, - _ => bug!("struct ID bound to non-struct {}", self.node_to_string(id)) + _ => bug!("struct ID bound to non-struct {}", self.hir_to_string(id)) } } - Some(Node::StructCtor(data)) => data, Some(Node::Variant(variant)) => &variant.node.data, - _ => bug!("expected struct or variant, found {}", self.node_to_string(id)) + Some(Node::Ctor(data)) => data, + _ => bug!("expected struct or variant, found {}", self.hir_to_string(id)) } } - pub fn expect_variant(&self, id: NodeId) -> &'hir Variant { - match self.find(id) { + pub fn expect_variant(&self, id: HirId) -> &'hir Variant { + match self.find_by_hir_id(id) { Some(Node::Variant(variant)) => variant, - _ => bug!("expected variant, found {}", self.node_to_string(id)), + _ => bug!("expected variant, found {}", self.hir_to_string(id)), } } - pub fn expect_foreign_item(&self, id: NodeId) -> &'hir ForeignItem { - match self.find(id) { + pub fn expect_foreign_item(&self, id: HirId) -> &'hir ForeignItem { + match self.find_by_hir_id(id) { Some(Node::ForeignItem(item)) => item, - _ => bug!("expected foreign item, found {}", self.node_to_string(id)) + _ => bug!("expected foreign item, found {}", self.hir_to_string(id)) } } @@ -813,28 +980,41 @@ impl<'hir> Map<'hir> { } } + // FIXME(@ljedrz): replace the NodeId variant + pub fn expect_expr_by_hir_id(&self, id: HirId) -> &'hir Expr { + let node_id = self.hir_to_node_id(id); + self.expect_expr(node_id) + } + /// Returns the name associated with the given NodeId's AST. pub fn name(&self, id: NodeId) -> Name { match self.get(id) { - Node::Item(i) => i.name, - Node::ForeignItem(i) => i.name, + Node::Item(i) => i.ident.name, + Node::ForeignItem(fi) => fi.ident.name, Node::ImplItem(ii) => ii.ident.name, Node::TraitItem(ti) => ti.ident.name, - Node::Variant(v) => v.node.name, + Node::Variant(v) => v.node.ident.name, Node::Field(f) => f.ident.name, Node::Lifetime(lt) => lt.name.ident().name, Node::GenericParam(param) => param.name.ident().name, - Node::Binding(&Pat { node: PatKind::Binding(_,_,l,_), .. }) => l.name, - Node::StructCtor(_) => self.name(self.get_parent(id)), + Node::Binding(&Pat { node: PatKind::Binding(_, _, l, _), .. }) => l.name, + Node::Ctor(..) => self.name(self.get_parent(id)), _ => bug!("no name for {}", self.node_to_string(id)) } } + // FIXME(@ljedrz): replace the NodeId variant + pub fn name_by_hir_id(&self, id: HirId) -> Name { + let node_id = self.hir_to_node_id(id); + self.name(node_id) + } + /// Given a node ID, get a list of attributes associated with the AST /// corresponding to the Node ID pub fn attrs(&self, id: NodeId) -> &'hir [ast::Attribute] { self.read(id); // reveals attributes on the node - let attrs = match self.find(id) { + let attrs = match self.find_entry(id).map(|entry| entry.node) { + Some(Node::Local(l)) => Some(&l.attrs[..]), Some(Node::Item(i)) => Some(&i.attrs[..]), Some(Node::ForeignItem(fi)) => Some(&fi.attrs[..]), Some(Node::TraitItem(ref ti)) => Some(&ti.attrs[..]), @@ -844,14 +1024,21 @@ impl<'hir> Map<'hir> { Some(Node::Expr(ref e)) => Some(&*e.attrs), Some(Node::Stmt(ref s)) => Some(s.node.attrs()), Some(Node::GenericParam(param)) => Some(¶m.attrs[..]), - // unit/tuple structs take the attributes straight from - // the struct definition. - Some(Node::StructCtor(_)) => return self.attrs(self.get_parent(id)), + // Unit/tuple structs/variants take the attributes straight from + // the struct/variant definition. + Some(Node::Ctor(..)) => return self.attrs(self.get_parent(id)), + Some(Node::Crate) => Some(&self.forest.krate.attrs[..]), _ => None }; attrs.unwrap_or(&[]) } + // FIXME(@ljedrz): replace the NodeId variant + pub fn attrs_by_hir_id(&self, id: HirId) -> &'hir [ast::Attribute] { + let node_id = self.hir_to_node_id(id); + self.attrs(node_id) + } + /// Returns an iterator that yields the node id's with paths that /// match `parts`. (Requires `parts` is non-empty.) /// @@ -887,7 +1074,11 @@ impl<'hir> Map<'hir> { Some(Node::Binding(pat)) => pat.span, Some(Node::Pat(pat)) => pat.span, Some(Node::Block(block)) => block.span, - Some(Node::StructCtor(_)) => self.expect_item(self.get_parent(id)).span, + Some(Node::Ctor(..)) => match self.find(self.get_parent_node(id)) { + Some(Node::Item(item)) => item.span, + Some(Node::Variant(variant)) => variant.span, + _ => unreachable!(), + } Some(Node::Lifetime(lifetime)) => lifetime.span, Some(Node::GenericParam(param)) => param.span, Some(Node::Visibility(&Spanned { @@ -901,6 +1092,12 @@ impl<'hir> Map<'hir> { } } + // FIXME(@ljedrz): replace the NodeId variant + pub fn span_by_hir_id(&self, id: HirId) -> Span { + let node_id = self.hir_to_node_id(id); + self.span(node_id) + } + pub fn span_if_local(&self, id: DefId) -> Option { self.as_local_node_id(id).map(|id| self.span(id)) } @@ -909,13 +1106,28 @@ impl<'hir> Map<'hir> { node_id_to_string(self, id, true) } + // FIXME(@ljedrz): replace the NodeId variant + pub fn hir_to_string(&self, id: HirId) -> String { + hir_id_to_string(self, id, true) + } + pub fn node_to_user_string(&self, id: NodeId) -> String { node_id_to_string(self, id, false) } + // FIXME(@ljedrz): replace the NodeId variant + pub fn hir_to_user_string(&self, id: HirId) -> String { + hir_id_to_string(self, id, false) + } + pub fn node_to_pretty_string(&self, id: NodeId) -> String { print::to_string(self, |s| s.print_node(self.get(id))) } + + // FIXME(@ljedrz): replace the NodeId variant + pub fn hir_to_pretty_string(&self, id: HirId) -> String { + print::to_string(self, |s| s.print_node(self.get_by_hir_id(id))) + } } pub struct NodesMatchingSuffix<'a, 'hir:'a> { @@ -926,7 +1138,7 @@ pub struct NodesMatchingSuffix<'a, 'hir:'a> { } impl<'a, 'hir> NodesMatchingSuffix<'a, 'hir> { - /// Returns true only if some suffix of the module path for parent + /// Returns `true` only if some suffix of the module path for parent /// matches `self.in_which`. /// /// In other words: let `[x_0,x_1,...,x_k]` be `self.in_which`; @@ -956,7 +1168,7 @@ impl<'a, 'hir> NodesMatchingSuffix<'a, 'hir> { loop { if let Node::Item(item) = map.find(id)? { if item_is_mod(&item) { - return Some((id, item.name)) + return Some((id, item.ident.name)) } } let parent = map.get_parent(id); @@ -1012,33 +1224,40 @@ trait Named { impl Named for Spanned { fn name(&self) -> Name { self.node.name() } } -impl Named for Item { fn name(&self) -> Name { self.name } } -impl Named for ForeignItem { fn name(&self) -> Name { self.name } } -impl Named for VariantKind { fn name(&self) -> Name { self.name } } +impl Named for Item { fn name(&self) -> Name { self.ident.name } } +impl Named for ForeignItem { fn name(&self) -> Name { self.ident.name } } +impl Named for VariantKind { fn name(&self) -> Name { self.ident.name } } impl Named for StructField { fn name(&self) -> Name { self.ident.name } } impl Named for TraitItem { fn name(&self) -> Name { self.ident.name } } impl Named for ImplItem { fn name(&self) -> Name { self.ident.name } } -pub fn map_crate<'hir>(sess: &::session::Session, - cstore: &dyn CrateStore, - forest: &'hir mut Forest, +pub fn map_crate<'hir>(sess: &crate::session::Session, + cstore: &CrateStoreDyn, + forest: &'hir Forest, definitions: &'hir Definitions) -> Map<'hir> { + // Build the reverse mapping of `node_to_hir_id`. + let hir_to_node_id = definitions.node_to_hir_id.iter_enumerated() + .map(|(node_id, &hir_id)| (hir_id, node_id)).collect(); + let (map, crate_hash) = { - let hcx = ::ich::StableHashingContext::new(sess, &forest.krate, definitions, cstore); + let hcx = crate::ich::StableHashingContext::new(sess, &forest.krate, definitions, cstore); - let mut collector = NodeCollector::root(&forest.krate, + let mut collector = NodeCollector::root(sess, + &forest.krate, &forest.dep_graph, &definitions, - hcx, - sess.source_map()); + &hir_to_node_id, + hcx); intravisit::walk_crate(&mut collector, &forest.krate); let crate_disambiguator = sess.local_crate_disambiguator(); let cmdline_args = sess.opts.dep_tracking_hash(); - collector.finalize_and_compute_crate_hash(crate_disambiguator, - cstore, - cmdline_args) + collector.finalize_and_compute_crate_hash( + crate_disambiguator, + cstore, + cmdline_args + ) }; if log_enabled!(::log::Level::Debug) { @@ -1053,10 +1272,6 @@ pub fn map_crate<'hir>(sess: &::session::Session, entries, vector_length, (entries as f64 / vector_length as f64) * 100.); } - // Build the reverse mapping of `node_to_hir_id`. - let hir_to_node_id = definitions.node_to_hir_id.iter_enumerated() - .map(|(node_id, &hir_id)| (hir_id, node_id)).collect(); - let map = Map { forest, dep_graph: forest.dep_graph.clone(), @@ -1066,7 +1281,9 @@ pub fn map_crate<'hir>(sess: &::session::Session, definitions, }; - hir_id_validator::check_crate(&map); + time(sess, "validate hir map", || { + hir_id_validator::check_crate(&map); + }); map } @@ -1076,7 +1293,7 @@ pub fn map_crate<'hir>(sess: &::session::Session, impl<'hir> print::PpAnn for Map<'hir> { fn nested(&self, state: &mut print::State<'_>, nested: print::Nested) -> io::Result<()> { match nested { - Nested::Item(id) => state.print_item(self.expect_item(id.id)), + Nested::Item(id) => state.print_item(self.expect_item_by_hir_id(id.id)), Nested::TraitItem(id) => state.print_trait_item(self.trait_item(id)), Nested::ImplItem(id) => state.print_impl_item(self.impl_item(id)), Nested::Body(id) => state.print_expr(&self.body(id).value), @@ -1117,7 +1334,7 @@ impl<'a> print::State<'a> { // these cases do not carry enough information in the // hir_map to reconstruct their full structure for pretty // printing. - Node::StructCtor(_) => bug!("cannot print isolated StructCtor"), + Node::Ctor(..) => bug!("cannot print isolated Ctor"), Node::Local(a) => self.print_local_decl(&a), Node::MacroDef(_) => bug!("cannot print MacroDef"), Node::Crate => bug!("cannot print Crate"), @@ -1132,9 +1349,10 @@ fn node_id_to_string(map: &Map<'_>, id: NodeId, include_id: bool) -> String { let path_str = || { // This functionality is used for debugging, try to use TyCtxt to get // the user-friendly path, otherwise fall back to stringifying DefPath. - ::ty::tls::with_opt(|tcx| { + crate::ty::tls::with_opt(|tcx| { if let Some(tcx) = tcx { - tcx.node_path_str(id) + let def_id = map.local_def_id(id); + tcx.def_path_str(def_id) } else if let Some(path) = map.def_path_from_id(id) { path.data.into_iter().map(|elem| { elem.data.to_string() @@ -1197,7 +1415,7 @@ fn node_id_to_string(map: &Map<'_>, id: NodeId, include_id: bool) -> String { } Some(Node::Variant(ref variant)) => { format!("variant {} in {}{}", - variant.node.name, + variant.node.ident, path_str(), id_str) } Some(Node::Field(ref field)) => { @@ -1235,8 +1453,8 @@ fn node_id_to_string(map: &Map<'_>, id: NodeId, include_id: bool) -> String { Some(Node::Local(_)) => { format!("local {}{}", map.node_to_pretty_string(id), id_str) } - Some(Node::StructCtor(_)) => { - format!("struct_ctor {}{}", path_str(), id_str) + Some(Node::Ctor(..)) => { + format!("ctor {}{}", path_str(), id_str) } Some(Node::Lifetime(_)) => { format!("lifetime {}{}", map.node_to_pretty_string(id), id_str) @@ -1255,6 +1473,12 @@ fn node_id_to_string(map: &Map<'_>, id: NodeId, include_id: bool) -> String { } } +// FIXME(@ljedrz): replace the NodeId variant +fn hir_id_to_string(map: &Map<'_>, id: HirId, include_id: bool) -> String { + let node_id = map.hir_to_node_id(id); + node_id_to_string(map, node_id, include_id) +} + pub fn describe_def(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Option { if let Some(node_id) = tcx.hir().as_local_node_id(def_id) { tcx.hir().describe_def(node_id) diff --git a/src/librustc/hir/mod.rs b/src/librustc/hir/mod.rs index 156d55b9e2fe6..58a27d3f78eec 100644 --- a/src/librustc/hir/mod.rs +++ b/src/librustc/hir/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // HIR datatypes. See the [rustc guide] for more info. //! //! [rustc guide]: https://rust-lang.github.io/rustc-guide/hir.html @@ -20,30 +10,32 @@ pub use self::PrimTy::*; pub use self::UnOp::*; pub use self::UnsafeSource::*; -use hir::def::Def; -use hir::def_id::{DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX}; -use util::nodemap::{NodeMap, FxHashSet}; -use mir::mono::Linkage; +use crate::hir::def::Def; +use crate::hir::def_id::{DefId, DefIndex, LocalDefId, CRATE_DEF_INDEX}; +use crate::util::nodemap::{NodeMap, FxHashSet}; +use crate::mir::mono::Linkage; +use errors::FatalError; use syntax_pos::{Span, DUMMY_SP, symbol::InternedString}; -use syntax::source_map::{self, Spanned}; +use syntax::source_map::Spanned; use rustc_target::spec::abi::Abi; -use syntax::ast::{self, CrateSugar, Ident, Name, NodeId, DUMMY_NODE_ID, AsmDialect}; -use syntax::ast::{Attribute, Lit, StrStyle, FloatTy, IntTy, UintTy}; -use syntax::attr::InlineAttr; +use syntax::ast::{self, CrateSugar, Ident, Name, NodeId, AsmDialect}; +use syntax::ast::{Attribute, Label, Lit, StrStyle, FloatTy, IntTy, UintTy}; +use syntax::attr::{InlineAttr, OptimizeAttr}; use syntax::ext::hygiene::SyntaxContext; use syntax::ptr::P; use syntax::symbol::{Symbol, keywords}; use syntax::tokenstream::TokenStream; use syntax::util::parser::ExprPrecedence; -use ty::AdtKind; -use ty::query::Providers; +use crate::ty::AdtKind; +use crate::ty::query::Providers; -use rustc_data_structures::sync::{ParallelIterator, par_iter, Send, Sync, scope}; +use rustc_data_structures::sync::{par_for_each_in, Send, Sync}; use rustc_data_structures::thin_vec::ThinVec; +use rustc_macros::HashStable; use serialize::{self, Encoder, Encodable, Decoder, Decodable}; -use std::collections::BTreeMap; +use std::collections::{BTreeSet, BTreeMap}; use std::fmt; /// HIR doesn't commit to a concrete storage type and has its own alias for a vector. @@ -71,17 +63,17 @@ pub mod map; pub mod pat_util; pub mod print; -/// A HirId uniquely identifies a node in the HIR of the current crate. It is -/// composed of the `owner`, which is the DefIndex of the directly enclosing -/// hir::Item, hir::TraitItem, or hir::ImplItem (i.e., the closest "item-like"), +/// Uniquely identifies a node in the HIR of the current crate. It is +/// composed of the `owner`, which is the `DefIndex` of the directly enclosing +/// `hir::Item`, `hir::TraitItem`, or `hir::ImplItem` (i.e., the closest "item-like"), /// and the `local_id` which is unique within the given owner. /// /// This two-level structure makes for more stable values: One can move an item /// around within the source code, or add or remove stuff before it, without -/// the local_id part of the HirId changing, which is a very useful property in +/// the `local_id` part of the `HirId` changing, which is a very useful property in /// incremental compilation where we have to persist things through changes to /// the code base. -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)] pub struct HirId { pub owner: DefIndex, pub local_id: ItemLocalId, @@ -121,25 +113,34 @@ impl serialize::UseSpecializedDecodable for HirId { } } +impl fmt::Display for HirId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + // hack to ensure that we don't try to access the private parts of `ItemLocalId` in this module mod item_local_id_inner { use rustc_data_structures::indexed_vec::Idx; - /// An `ItemLocalId` uniquely identifies something within a given "item-like", - /// that is within a hir::Item, hir::TraitItem, or hir::ImplItem. There is no - /// guarantee that the numerical value of a given `ItemLocalId` corresponds to - /// the node's position within the owning item in any way, but there is a - /// guarantee that the `LocalItemId`s within an owner occupy a dense range of - /// integers starting at zero, so a mapping that maps all or most nodes within - /// an "item-like" to something else can be implement by a `Vec` instead of a - /// tree or hash map. + use rustc_macros::HashStable; newtype_index! { - pub struct ItemLocalId { .. } + /// An `ItemLocalId` uniquely identifies something within a given "item-like", + /// that is within a hir::Item, hir::TraitItem, or hir::ImplItem. There is no + /// guarantee that the numerical value of a given `ItemLocalId` corresponds to + /// the node's position within the owning item in any way, but there is a + /// guarantee that the `LocalItemId`s within an owner occupy a dense range of + /// integers starting at zero, so a mapping that maps all or most nodes within + /// an "item-like" to something else can be implement by a `Vec` instead of a + /// tree or hash map. + pub struct ItemLocalId { + derive [HashStable] + } } } pub use self::item_local_id_inner::ItemLocalId; -/// The `HirId` corresponding to CRATE_NODE_ID and CRATE_DEF_INDEX +/// The `HirId` corresponding to `CRATE_NODE_ID` and `CRATE_DEF_INDEX`. pub const CRATE_HIR_ID: HirId = HirId { owner: CRATE_DEF_INDEX, local_id: ItemLocalId::from_u32_const(0) @@ -152,24 +153,13 @@ pub const DUMMY_HIR_ID: HirId = HirId { pub const DUMMY_ITEM_LOCAL_ID: ItemLocalId = ItemLocalId::MAX; -#[derive(Clone, RustcEncodable, RustcDecodable, Copy)] -pub struct Label { - pub ident: Ident, -} - -impl fmt::Debug for Label { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "label({:?})", self.ident) - } -} - -#[derive(Clone, RustcEncodable, RustcDecodable, Copy)] +#[derive(Clone, RustcEncodable, RustcDecodable, Copy, HashStable)] pub struct Lifetime { - pub id: NodeId, + pub hir_id: HirId, pub span: Span, - /// Either "'a", referring to a named lifetime definition, - /// or "" (aka keywords::Invalid), for elision placeholders. + /// Either "`'a`", referring to a named lifetime definition, + /// or "``" (i.e., `keywords::Invalid`), for elision placeholders. /// /// HIR lowering inserts these placeholders in type paths that /// refer to type definitions needing lifetime parameters, @@ -177,13 +167,14 @@ pub struct Lifetime { pub name: LifetimeName, } -#[derive(Debug, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)] +#[derive(Debug, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy, HashStable)] pub enum ParamName { /// Some user-given name like `T` or `'x`. Plain(Ident), - /// Synthetic name generated when user elided a lifetime in an impl header, - /// e.g., the lifetimes in cases like these: + /// Synthetic name generated when user elided a lifetime in an impl header. + /// + /// E.g., the lifetimes in cases like these: /// /// impl Foo for &u32 /// impl Foo<'_> for u32 @@ -199,7 +190,7 @@ pub enum ParamName { /// Indicates an illegal name was given and an error has been /// repored (so we should squelch other derived errors). Occurs - /// when e.g., `'_` is used in the wrong place. + /// when, e.g., `'_` is used in the wrong place. Error, } @@ -219,22 +210,22 @@ impl ParamName { } } -#[derive(Debug, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy)] +#[derive(Debug, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Copy, HashStable)] pub enum LifetimeName { /// User-given names or fresh (synthetic) names. Param(ParamName), - /// User typed nothing. e.g., the lifetime in `&u32`. + /// User wrote nothing (e.g., the lifetime in `&u32`). Implicit, /// Indicates an error during lowering (usually `'_` in wrong place) /// that was already reported. Error, - /// User typed `'_`. + /// User wrote specifies `'_`. Underscore, - /// User wrote `'static` + /// User wrote `'static`. Static, } @@ -284,7 +275,7 @@ impl fmt::Debug for Lifetime { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "lifetime({}: {})", - self.id, + self.hir_id, print::to_string(print::NO_ANN, |s| s.print_lifetime(self))) } } @@ -299,10 +290,10 @@ impl Lifetime { } } -/// A "Path" is essentially Rust's notion of a name; for instance: +/// A `Path` is essentially Rust's notion of a name; for instance, /// `std::cmp::PartialEq`. It's represented as a sequence of identifiers, /// along with a bunch of supporting information. -#[derive(Clone, RustcEncodable, RustcDecodable)] +#[derive(Clone, RustcEncodable, RustcDecodable, HashStable)] pub struct Path { pub span: Span, /// The definition that the path resolved to. @@ -331,16 +322,17 @@ impl fmt::Display for Path { /// A segment of a path: an identifier, an optional lifetime, and a set of /// types. -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct PathSegment { /// The identifier portion of this path segment. + #[stable_hasher(project(name))] pub ident: Ident, // `id` and `def` are optional. We currently only use these in save-analysis, // any path segments without these will not have save-analysis info and // therefore will not have 'jump to def' in IDEs, but otherwise will not be // affected. (In general, we don't bother to get the defs for synthesized // segments, only for segments which have come from the AST). - pub id: Option, + pub hir_id: Option, pub def: Option, /// Type/lifetime parameters attached to this path. They come in @@ -358,11 +350,11 @@ pub struct PathSegment { } impl PathSegment { - /// Convert an identifier to the corresponding segment. + /// Converts an identifier to the corresponding segment. pub fn from_ident(ident: Ident) -> PathSegment { PathSegment { ident, - id: None, + hir_id: None, def: None, infer_types: true, args: None, @@ -371,14 +363,14 @@ impl PathSegment { pub fn new( ident: Ident, - id: Option, + hir_id: Option, def: Option, args: GenericArgs, infer_types: bool, ) -> Self { PathSegment { ident, - id, + hir_id, def, infer_types, args: if args.is_empty() { @@ -403,10 +395,17 @@ impl PathSegment { } } -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] +pub struct ConstArg { + pub value: AnonConst, + pub span: Span, +} + +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub enum GenericArg { Lifetime(Lifetime), Type(Ty), + Const(ConstArg), } impl GenericArg { @@ -414,23 +413,25 @@ impl GenericArg { match self { GenericArg::Lifetime(l) => l.span, GenericArg::Type(t) => t.span, + GenericArg::Const(c) => c.span, } } - pub fn id(&self) -> NodeId { + pub fn id(&self) -> HirId { match self { - GenericArg::Lifetime(l) => l.id, - GenericArg::Type(t) => t.id, + GenericArg::Lifetime(l) => l.hir_id, + GenericArg::Type(t) => t.hir_id, + GenericArg::Const(c) => c.value.hir_id, } } } -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct GenericArgs { /// The generic arguments for this path segment. pub args: HirVec, /// Bindings (equality constraints) on associated types, if present. - /// E.g., `Foo`. + /// E.g., `Foo`. pub bindings: HirVec, /// Were arguments written in parenthesized form `Fn(T) -> U`? /// This is required mostly for pretty-printing and diagnostics, @@ -462,6 +463,7 @@ impl GenericArgs { } break; } + GenericArg::Const(_) => {} } } } @@ -478,6 +480,7 @@ impl GenericArgs { match arg { GenericArg::Lifetime(_) => own_counts.lifetimes += 1, GenericArg::Type(_) => own_counts.types += 1, + GenericArg::Const(_) => own_counts.consts += 1, }; } @@ -487,7 +490,7 @@ impl GenericArgs { /// A modifier on a bound, currently this is only used for `?Sized`, where the /// modifier is `Maybe`. Negative bounds should also be handled here. -#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, HashStable)] pub enum TraitBoundModifier { None, Maybe, @@ -497,7 +500,7 @@ pub enum TraitBoundModifier { /// `typeck::collect::compute_bounds` matches these against /// the "special" built-in traits (see `middle::lang_items`) and /// detects `Copy`, `Send` and `Sync`. -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub enum GenericBound { Trait(PolyTraitRef, TraitBoundModifier), Outlives(Lifetime), @@ -514,7 +517,7 @@ impl GenericBound { pub type GenericBounds = HirVec; -#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Debug, HashStable)] pub enum LifetimeParamKind { // Indicates that the lifetime definition was explicitly declared (e.g., in // `fn foo<'a>(x: &'a u8) -> &'a u8 { x }`). @@ -533,7 +536,7 @@ pub enum LifetimeParamKind { Error, } -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub enum GenericParamKind { /// A lifetime definition (e.g., `'a: 'b + 'c + 'd`). Lifetime { @@ -542,12 +545,15 @@ pub enum GenericParamKind { Type { default: Option>, synthetic: Option, + }, + Const { + ty: P, } } -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct GenericParam { - pub id: NodeId, + pub hir_id: HirId, pub name: ParamName, pub attrs: HirVec, pub bounds: GenericBounds, @@ -561,11 +567,12 @@ pub struct GenericParam { pub struct GenericParamCount { pub lifetimes: usize, pub types: usize, + pub consts: usize, } /// Represents lifetimes and type parameters attached to a declaration /// of a function, enum, trait, etc. -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct Generics { pub params: HirVec, pub where_clause: WhereClause, @@ -577,7 +584,7 @@ impl Generics { Generics { params: HirVec::new(), where_clause: WhereClause { - id: DUMMY_NODE_ID, + hir_id: DUMMY_HIR_ID, predicates: HirVec::new(), }, span: DUMMY_SP, @@ -594,6 +601,7 @@ impl Generics { match param.kind { GenericParamKind::Lifetime { .. } => own_counts.lifetimes += 1, GenericParamKind::Type { .. } => own_counts.types += 1, + GenericParamKind::Const { .. } => own_counts.consts += 1, }; } @@ -610,17 +618,17 @@ impl Generics { } } -/// Synthetic Type Parameters are converted to an other form during lowering, this allows -/// to track the original form they had. Useful for error messages. -#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] +/// Synthetic type parameters are converted to another form during lowering; this allows +/// us to track the original form they had, and is useful for error messages. +#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, HashStable)] pub enum SyntheticTyParamKind { ImplTrait } -/// A `where` clause in a definition -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +/// A where-clause in a definition. +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct WhereClause { - pub id: NodeId, + pub hir_id: HirId, pub predicates: HirVec, } @@ -636,8 +644,8 @@ impl WhereClause { } } -/// A single predicate in a `where` clause -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +/// A single predicate in a where-clause. +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub enum WherePredicate { /// A type binding (e.g., `for<'c> Foo: Send + Clone + 'c`). BoundPredicate(WhereBoundPredicate), @@ -657,35 +665,44 @@ impl WherePredicate { } } -/// A type bound, eg `for<'c> Foo: Send+Clone+'c` -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +/// A type bound (e.g., `for<'c> Foo: Send + Clone + 'c`). +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct WhereBoundPredicate { pub span: Span, - /// Any generics from a `for` binding + /// Any generics from a `for` binding. pub bound_generic_params: HirVec, - /// The type being bounded + /// The type being bounded. pub bounded_ty: P, - /// Trait and lifetime bounds (`Clone+Send+'static`) + /// Trait and lifetime bounds (e.g., `Clone + Send + 'static`). pub bounds: GenericBounds, } -/// A lifetime predicate, e.g., `'a: 'b+'c` -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +/// A lifetime predicate (e.g., `'a: 'b + 'c`). +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct WhereRegionPredicate { pub span: Span, pub lifetime: Lifetime, pub bounds: GenericBounds, } -/// An equality predicate (unsupported), e.g., `T=int` -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +/// An equality predicate (e.g., `T = int`); currently unsupported. +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct WhereEqPredicate { - pub id: NodeId, + pub hir_id: HirId, pub span: Span, pub lhs_ty: P, pub rhs_ty: P, } +#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +pub struct ModuleItems { + // Use BTreeSets here so items are in the same order as in the + // list of all items in Crate + pub items: BTreeSet, + pub trait_items: BTreeSet, + pub impl_items: BTreeSet, +} + /// The top-level data structure that stores the entire contents of /// the crate currently being compiled. /// @@ -705,23 +722,26 @@ pub struct Crate { // does, because it can affect the order in which errors are // detected, which in turn can make compile-fail tests yield // slightly different results. - pub items: BTreeMap, + pub items: BTreeMap, pub trait_items: BTreeMap, pub impl_items: BTreeMap, pub bodies: BTreeMap, - pub trait_impls: BTreeMap>, - pub trait_auto_impl: BTreeMap, + pub trait_impls: BTreeMap>, /// A list of the body ids written out in the order in which they /// appear in the crate. If you're going to process all the bodies /// in the crate, you should iterate over this list rather than the keys /// of bodies. pub body_ids: Vec, + + /// A list of modules written out in the order in which they + /// appear in the crate. This includes the main crate module. + pub modules: BTreeMap, } impl Crate { - pub fn item(&self, id: NodeId) -> &Item { + pub fn item(&self, id: HirId) -> &Item { &self.items[&id] } @@ -757,27 +777,21 @@ impl Crate { } } - /// A parallel version of visit_all_item_likes + /// A parallel version of `visit_all_item_likes`. pub fn par_visit_all_item_likes<'hir, V>(&'hir self, visitor: &V) where V: itemlikevisit::ParItemLikeVisitor<'hir> + Sync + Send { - scope(|s| { - s.spawn(|_| { - par_iter(&self.items).for_each(|(_, item)| { - visitor.visit_item(item); - }); + parallel!({ + par_for_each_in(&self.items, |(_, item)| { + visitor.visit_item(item); }); - - s.spawn(|_| { - par_iter(&self.trait_items).for_each(|(_, trait_item)| { - visitor.visit_trait_item(trait_item); - }); + }, { + par_for_each_in(&self.trait_items, |(_, trait_item)| { + visitor.visit_trait_item(trait_item); }); - - s.spawn(|_| { - par_iter(&self.impl_items).for_each(|(_, impl_item)| { - visitor.visit_impl_item(impl_item); - }); + }, { + par_for_each_in(&self.impl_items, |(_, impl_item)| { + visitor.visit_impl_item(impl_item); }); }); } @@ -790,43 +804,41 @@ impl Crate { /// A macro definition, in this crate or imported from another. /// /// Not parsed directly, but created on macro import or `macro_rules!` expansion. -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct MacroDef { pub name: Name, pub vis: Visibility, pub attrs: HirVec, - pub id: NodeId, + pub hir_id: HirId, pub span: Span, pub body: TokenStream, pub legacy: bool, } -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +/// A block of statements `{ .. }`, which may have a label (in this case the +/// `targeted_by_break` field will be `true`) and may be `unsafe` by means of +/// the `rules` being anything but `DefaultBlock`. +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct Block { - /// Statements in a block + /// Statements in a block. pub stmts: HirVec, /// An expression at the end of the block - /// without a semicolon, if any + /// without a semicolon, if any. pub expr: Option>, - pub id: NodeId, + #[stable_hasher(ignore)] pub hir_id: HirId, - /// Distinguishes between `unsafe { ... }` and `{ ... }` + /// Distinguishes between `unsafe { ... }` and `{ ... }`. pub rules: BlockCheckMode, pub span: Span, /// If true, then there may exist `break 'a` values that aim to /// break out of this block early. /// Used by `'label: {}` blocks and by `catch` statements. pub targeted_by_break: bool, - /// If true, don't emit return value type errors as the parser had - /// to recover from a parse error so this block will not have an - /// appropriate type. A parse error will have been emitted so the - /// compilation will never succeed if this is true. - pub recovered: bool, } -#[derive(Clone, RustcEncodable, RustcDecodable)] +#[derive(Clone, RustcEncodable, RustcDecodable, HashStable)] pub struct Pat { - pub id: NodeId, + #[stable_hasher(ignore)] pub hir_id: HirId, pub node: PatKind, pub span: Span, @@ -834,7 +846,7 @@ pub struct Pat { impl fmt::Debug for Pat { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "pat({}: {})", self.id, + write!(f, "pat({}: {})", self.hir_id, print::to_string(print::NO_ANN, |s| s.print_pat(self))) } } @@ -882,17 +894,19 @@ impl Pat { } } -/// A single field in a struct pattern +/// A single field in a struct pattern. /// /// Patterns like the fields of Foo `{ x, ref y, ref mut z }` /// are treated the same as` x: x, y: ref y, z: ref mut z`, -/// except is_shorthand is true -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +/// except `is_shorthand` is true. +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct FieldPat { - pub id: NodeId, - /// The identifier for the field + #[stable_hasher(ignore)] + pub hir_id: HirId, + /// The identifier for the field. + #[stable_hasher(project(name))] pub ident: Ident, - /// The pattern the field is destructured to + /// The pattern the field is destructured to. pub pat: P, pub is_shorthand: bool, } @@ -900,7 +914,7 @@ pub struct FieldPat { /// Explicit binding annotations given in the HIR for a binding. Note /// that this is not the final binding *mode* that we infer after type /// inference. -#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable, Debug, Copy)] +#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable, Debug, Copy, HashStable)] pub enum BindingAnnotation { /// No binding annotation given: this means that the final binding mode /// will depend on whether we have skipped through a `&` reference @@ -921,60 +935,66 @@ pub enum BindingAnnotation { RefMut, } -#[derive(Copy, Clone, PartialEq, RustcEncodable, RustcDecodable, Debug)] +#[derive(Copy, Clone, PartialEq, RustcEncodable, RustcDecodable, Debug, HashStable)] pub enum RangeEnd { Included, Excluded, } -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub enum PatKind { - /// Represents a wildcard pattern (`_`) + /// Represents a wildcard pattern (i.e., `_`). Wild, /// A fresh binding `ref mut binding @ OPT_SUBPATTERN`. - /// The `NodeId` is the canonical ID for the variable being bound, - /// e.g., in `Ok(x) | Err(x)`, both `x` use the same canonical ID, + /// The `HirId` is the canonical ID for the variable being bound, + /// (e.g., in `Ok(x) | Err(x)`, both `x` use the same canonical ID), /// which is the pattern ID of the first `x`. - Binding(BindingAnnotation, NodeId, Ident, Option>), + Binding(BindingAnnotation, HirId, Ident, Option>), - /// A struct or struct variant pattern, e.g., `Variant {x, y, ..}`. + /// A struct or struct variant pattern (e.g., `Variant {x, y, ..}`). /// The `bool` is `true` in the presence of a `..`. Struct(QPath, HirVec>, bool), /// A tuple struct/variant pattern `Variant(x, y, .., z)`. /// If the `..` pattern fragment is present, then `Option` denotes its position. - /// 0 <= position <= subpats.len() + /// `0 <= position <= subpats.len()` TupleStruct(QPath, HirVec>, Option), /// A path pattern for an unit struct/variant or a (maybe-associated) constant. Path(QPath), - /// A tuple pattern `(a, b)`. + /// A tuple pattern (e.g., `(a, b)`). /// If the `..` pattern fragment is present, then `Option` denotes its position. - /// 0 <= position <= subpats.len() + /// `0 <= position <= subpats.len()` Tuple(HirVec>, Option), - /// A `box` pattern + + /// A `box` pattern. Box(P), - /// A reference pattern, e.g., `&mut (a, b)` + + /// A reference pattern (e.g., `&mut (a, b)`). Ref(P, Mutability), - /// A literal + + /// A literal. Lit(P), - /// A range pattern, e.g., `1...2` or `1..2` + + /// A range pattern (e.g., `1...2` or `1..2`). Range(P, P, RangeEnd), + /// `[a, b, ..i, y, z]` is represented as: - /// `PatKind::Slice(box [a, b], Some(i), box [y, z])` + /// `PatKind::Slice(box [a, b], Some(i), box [y, z])`. Slice(HirVec>, Option>, HirVec>), } -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, HashStable, + RustcEncodable, RustcDecodable, Hash, Debug, Copy)] pub enum Mutability { MutMutable, MutImmutable, } impl Mutability { - /// Return MutMutable only if both arguments are mutable. + /// Returns `MutMutable` only if both arguments are mutable. pub fn and(self, other: Self) -> Self { match self { MutMutable => other, @@ -983,43 +1003,43 @@ impl Mutability { } } -#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable, Debug, Copy, Hash)] +#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable, Debug, Copy, Hash, HashStable)] pub enum BinOpKind { - /// The `+` operator (addition) + /// The `+` operator (addition). Add, - /// The `-` operator (subtraction) + /// The `-` operator (subtraction). Sub, - /// The `*` operator (multiplication) + /// The `*` operator (multiplication). Mul, - /// The `/` operator (division) + /// The `/` operator (division). Div, - /// The `%` operator (modulus) + /// The `%` operator (modulus). Rem, - /// The `&&` operator (logical and) + /// The `&&` operator (logical and). And, - /// The `||` operator (logical or) + /// The `||` operator (logical or). Or, - /// The `^` operator (bitwise xor) + /// The `^` operator (bitwise xor). BitXor, - /// The `&` operator (bitwise and) + /// The `&` operator (bitwise and). BitAnd, - /// The `|` operator (bitwise or) + /// The `|` operator (bitwise or). BitOr, - /// The `<<` operator (shift left) + /// The `<<` operator (shift left). Shl, - /// The `>>` operator (shift right) + /// The `>>` operator (shift right). Shr, - /// The `==` operator (equality) + /// The `==` operator (equality). Eq, - /// The `<` operator (less than) + /// The `<` operator (less than). Lt, - /// The `<=` operator (less than or equal to) + /// The `<=` operator (less than or equal to). Le, - /// The `!=` operator (not equal to) + /// The `!=` operator (not equal to). Ne, - /// The `>=` operator (greater than or equal to) + /// The `>=` operator (greater than or equal to). Ge, - /// The `>` operator (greater than) + /// The `>` operator (greater than). Gt, } @@ -1084,7 +1104,7 @@ impl BinOpKind { } } - /// Returns `true` if the binary operator takes its arguments by value + /// Returns `true` if the binary operator takes its arguments by value. pub fn is_by_value(self) -> bool { !self.is_comparison() } @@ -1117,13 +1137,13 @@ impl Into for BinOpKind { pub type BinOp = Spanned; -#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable, Debug, Copy, Hash)] +#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable, Debug, Copy, Hash, HashStable)] pub enum UnOp { - /// The `*` operator for dereferencing + /// The `*` operator (deferencing). UnDeref, - /// The `!` operator for logical inversion + /// The `!` operator (logical negation). UnNot, - /// The `-` operator for negation + /// The `-` operator (negation). UnNeg, } @@ -1136,7 +1156,7 @@ impl UnOp { } } - /// Returns `true` if the unary operator takes its argument by value + /// Returns `true` if the unary operator takes its argument by value. pub fn is_by_value(self) -> bool { match self { UnNeg | UnNot => true, @@ -1145,114 +1165,93 @@ impl UnOp { } } -/// A statement -pub type Stmt = Spanned; +/// A statement. +#[derive(Clone, RustcEncodable, RustcDecodable)] +pub struct Stmt { + pub hir_id: HirId, + pub node: StmtKind, + pub span: Span, +} -impl fmt::Debug for StmtKind { +impl fmt::Debug for Stmt { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Sadness. - let spanned = source_map::dummy_spanned(self.clone()); - write!(f, - "stmt({}: {})", - spanned.node.id(), - print::to_string(print::NO_ANN, |s| s.print_stmt(&spanned))) + write!(f, "stmt({}: {})", self.hir_id, + print::to_string(print::NO_ANN, |s| s.print_stmt(self))) } } -#[derive(Clone, RustcEncodable, RustcDecodable)] +/// The contents of a statement. +#[derive(Clone, RustcEncodable, RustcDecodable, HashStable)] pub enum StmtKind { - /// Could be an item or a local (let) binding: - Decl(P, NodeId), + /// A local (`let`) binding. + Local(P), - /// Expr without trailing semi-colon (must have unit type): - Expr(P, NodeId), + /// An item binding. + Item(ItemId), - /// Expr with trailing semi-colon (may have any type): - Semi(P, NodeId), + /// An expression without a trailing semi-colon (must have unit type). + Expr(P), + + /// An expression with a trailing semi-colon (may have any type). + Semi(P), } impl StmtKind { pub fn attrs(&self) -> &[Attribute] { match *self { - StmtKind::Decl(ref d, _) => d.node.attrs(), - StmtKind::Expr(ref e, _) | - StmtKind::Semi(ref e, _) => &e.attrs, - } - } - - pub fn id(&self) -> NodeId { - match *self { - StmtKind::Decl(_, id) | - StmtKind::Expr(_, id) | - StmtKind::Semi(_, id) => id, + StmtKind::Local(ref l) => &l.attrs, + StmtKind::Item(_) => &[], + StmtKind::Expr(ref e) | + StmtKind::Semi(ref e) => &e.attrs, } } } -/// Local represents a `let` statement, e.g., `let : = ;` -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +/// Represents a `let` statement (i.e., `let : = ;`). +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct Local { pub pat: P, + /// Type annotation, if any (otherwise the type will be inferred). pub ty: Option>, - /// Initializer expression to set the value, if any + /// Initializer expression to set the value, if any. pub init: Option>, - pub id: NodeId, pub hir_id: HirId, pub span: Span, pub attrs: ThinVec, + /// Can be `ForLoopDesugar` if the `let` statement is part of a `for` loop + /// desugaring. Otherwise will be `Normal`. pub source: LocalSource, } -pub type Decl = Spanned; - -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] -pub enum DeclKind { - /// A local (let) binding: - Local(P), - /// An item binding: - Item(ItemId), -} - -impl DeclKind { - pub fn attrs(&self) -> &[Attribute] { - match *self { - DeclKind::Local(ref l) => &l.attrs, - DeclKind::Item(_) => &[] - } - } - - pub fn is_local(&self) -> bool { - match *self { - DeclKind::Local(_) => true, - _ => false, - } - } -} - -/// represents one arm of a 'match' -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +/// Represents a single arm of a `match` expression, e.g. +/// ` (if ) => `. +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct Arm { pub attrs: HirVec, + /// Multiple patterns can be combined with `|` pub pats: HirVec>, + /// Optional guard clause. pub guard: Option, + /// The expression the arm evaluates to if this arm matches. pub body: P, } -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub enum Guard { If(P), } -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct Field { - pub id: NodeId, + #[stable_hasher(ignore)] + pub hir_id: HirId, pub ident: Ident, pub expr: P, pub span: Span, pub is_shorthand: bool, } -#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable, Debug, Copy)] +#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable, Debug, Copy, HashStable)] pub enum BlockCheckMode { DefaultBlock, UnsafeBlock(UnsafeSource), @@ -1260,7 +1259,7 @@ pub enum BlockCheckMode { PopUnsafeBlock(UnsafeSource), } -#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable, Debug, Copy)] +#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable, Debug, Copy, HashStable)] pub enum UnsafeSource { CompilerGenerated, UserProvided, @@ -1268,7 +1267,7 @@ pub enum UnsafeSource { #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, RustcEncodable, RustcDecodable, Hash, Debug)] pub struct BodyId { - pub node_id: NodeId, + pub hir_id: HirId, } /// The body of a function, closure, or constant value. In the case of @@ -1302,7 +1301,7 @@ pub struct Body { impl Body { pub fn id(&self) -> BodyId { BodyId { - node_id: self.value.id + hir_id: self.value.hir_id, } } } @@ -1312,6 +1311,9 @@ pub enum BodyOwnerKind { /// Functions and methods. Fn, + /// Closures + Closure, + /// Constants and associated constants. Const, @@ -1319,14 +1321,22 @@ pub enum BodyOwnerKind { Static(Mutability), } +impl BodyOwnerKind { + pub fn is_fn_or_closure(self) -> bool { + match self { + BodyOwnerKind::Fn | BodyOwnerKind::Closure => true, + BodyOwnerKind::Const | BodyOwnerKind::Static(_) => false, + } + } +} + /// A constant (expression) that's not an item or associated item, /// but needs its own `DefId` for type-checking, const-eval, etc. /// These are usually found nested inside types (e.g., array lengths) /// or expressions (e.g., repeat counts), and also used to define /// explicit discriminant values for enum variants. -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct AnonConst { - pub id: NodeId, pub hir_id: HirId, pub body: BodyId, } @@ -1334,13 +1344,16 @@ pub struct AnonConst { /// An expression #[derive(Clone, RustcEncodable, RustcDecodable)] pub struct Expr { - pub id: NodeId, pub span: Span, pub node: ExprKind, pub attrs: ThinVec, pub hir_id: HirId, } +// `Expr` is used a lot. Make sure it doesn't unintentionally get bigger. +#[cfg(target_arch = "x86_64")] +static_assert!(MEM_SIZE_OF_EXPR: std::mem::size_of::() == 72); + impl Expr { pub fn precedence(&self) -> ExprPrecedence { match self.node { @@ -1372,6 +1385,7 @@ impl Expr { ExprKind::Struct(..) => ExprPrecedence::Struct, ExprKind::Repeat(..) => ExprPrecedence::Repeat, ExprKind::Yield(..) => ExprPrecedence::Yield, + ExprKind::Err => ExprPrecedence::Err, } } @@ -1422,7 +1436,8 @@ impl Expr { ExprKind::AddrOf(..) | ExprKind::Binary(..) | ExprKind::Yield(..) | - ExprKind::Cast(..) => { + ExprKind::Cast(..) | + ExprKind::Err => { false } } @@ -1431,25 +1446,25 @@ impl Expr { impl fmt::Debug for Expr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "expr({}: {})", self.id, + write!(f, "expr({}: {})", self.hir_id, print::to_string(print::NO_ANN, |s| s.print_expr(self))) } } -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)] pub enum ExprKind { /// A `box x` expression. Box(P), - /// An array (`[a, b, c, d]`) + /// An array (e.g., `[a, b, c, d]`). Array(HirVec), - /// A function call + /// A function call. /// /// The first field resolves to the function itself (usually an `ExprKind::Path`), /// and the second field is the list of arguments. /// This also represents calling the constructor of /// tuple-like ADTs such as tuple structs and enum variants. Call(P, HirVec), - /// A method call (`x.foo::<'static, Bar, Baz>(a, b, c, d)`) + /// A method call (e.g., `x.foo::<'static, Bar, Baz>(a, b, c, d)`). /// /// The `PathSegment`/`Span` represent the method name and its generic arguments /// (within the angle brackets). @@ -1458,64 +1473,65 @@ pub enum ExprKind { /// and the remaining elements are the rest of the arguments. /// Thus, `x.foo::(a, b, c, d)` is represented as /// `ExprKind::MethodCall(PathSegment { foo, [Bar, Baz] }, [x, a, b, c, d])`. - MethodCall(PathSegment, Span, HirVec), - /// A tuple (`(a, b, c ,d)`) + MethodCall(P, Span, HirVec), + /// A tuple (e.g., `(a, b, c ,d)`). Tup(HirVec), - /// A binary operation (For example: `a + b`, `a * b`) + /// A binary operation (e.g., `a + b`, `a * b`). Binary(BinOp, P, P), - /// A unary operation (For example: `!x`, `*x`) + /// A unary operation (e.g., `!x`, `*x`). Unary(UnOp, P), - /// A literal (For example: `1`, `"foo"`) - Lit(P), - /// A cast (`foo as f64`) + /// A literal (e.g., `1`, `"foo"`). + Lit(Lit), + /// A cast (e.g., `foo as f64`). Cast(P, P), + /// A type reference (e.g., `Foo`). Type(P, P), - /// An `if` block, with an optional else block + /// An `if` block, with an optional else block. /// - /// `if expr { expr } else { expr }` + /// I.e., `if { } else { }`. If(P, P, Option>), /// A while loop, with an optional label /// - /// `'label: while expr { block }` + /// I.e., `'label: while expr { }`. While(P, P, Option::new` and -/// so forth. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub enum UserTypeAnnotation<'tcx> { - Ty(CanonicalTy<'tcx>), - - /// The canonical type is the result of `type_of(def_id)` with the - /// given substitutions applied. - TypeOf(DefId, CanonicalUserSubsts<'tcx>), -} - -EnumTypeFoldableImpl! { - impl<'tcx> TypeFoldable<'tcx> for UserTypeAnnotation<'tcx> { - (UserTypeAnnotation::Ty)(ty), - (UserTypeAnnotation::TypeOf)(def, substs), - } -} - -EnumLiftImpl! { - impl<'a, 'tcx> Lift<'tcx> for UserTypeAnnotation<'a> { - type Lifted = UserTypeAnnotation<'tcx>; - (UserTypeAnnotation::Ty)(ty), - (UserTypeAnnotation::TypeOf)(def, substs), - } -} - /// A collection of projections into user types. /// /// They are projections because a binding can occur a part of a @@ -2506,33 +2549,75 @@ EnumLiftImpl! { /// The first will lead to the constraint `w: &'1 str` (for some /// inferred region `'1`). The second will lead to the constraint `w: /// &'static str`. -#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub struct UserTypeProjections<'tcx> { - pub(crate) contents: Vec<(UserTypeProjection<'tcx>, Span)>, +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] +pub struct UserTypeProjections { + pub(crate) contents: Vec<(UserTypeProjection, Span)>, } BraceStructTypeFoldableImpl! { - impl<'tcx> TypeFoldable<'tcx> for UserTypeProjections<'tcx> { + impl<'tcx> TypeFoldable<'tcx> for UserTypeProjections { contents } } -impl<'tcx> UserTypeProjections<'tcx> { +impl<'tcx> UserTypeProjections { pub fn none() -> Self { UserTypeProjections { contents: vec![] } } - pub fn from_projections(projs: impl Iterator, Span)>) -> Self { + pub fn from_projections(projs: impl Iterator) -> Self { UserTypeProjections { contents: projs.collect() } } - pub fn projections_and_spans(&self) -> impl Iterator, Span)> { + pub fn projections_and_spans(&self) -> impl Iterator { self.contents.iter() } - pub fn projections(&self) -> impl Iterator> { + pub fn projections(&self) -> impl Iterator { self.contents.iter().map(|&(ref user_type, _span)| user_type) } + + pub fn push_projection( + mut self, + user_ty: &UserTypeProjection, + span: Span, + ) -> Self { + self.contents.push((user_ty.clone(), span)); + self + } + + fn map_projections( + mut self, + mut f: impl FnMut(UserTypeProjection) -> UserTypeProjection + ) -> Self { + self.contents = self.contents.drain(..).map(|(proj, span)| (f(proj), span)).collect(); + self + } + + pub fn index(self) -> Self { + self.map_projections(|pat_ty_proj| pat_ty_proj.index()) + } + + pub fn subslice(self, from: u32, to: u32) -> Self { + self.map_projections(|pat_ty_proj| pat_ty_proj.subslice(from, to)) + } + + pub fn deref(self) -> Self { + self.map_projections(|pat_ty_proj| pat_ty_proj.deref()) + } + + pub fn leaf(self, field: Field) -> Self { + self.map_projections(|pat_ty_proj| pat_ty_proj.leaf(field)) + } + + pub fn variant( + self, + adt_def: &'tcx AdtDef, + variant_index: VariantIdx, + field: Field, + ) -> Self { + self.map_projections(|pat_ty_proj| pat_ty_proj.variant(adt_def, variant_index, field)) + } } /// Encodes the effect of a user-supplied type annotation on the @@ -2550,19 +2635,54 @@ impl<'tcx> UserTypeProjections<'tcx> { /// * `let (x, _): T = ...` -- here, the `projs` vector would contain /// `field[0]` (aka `.0`), indicating that the type of `s` is /// determined by finding the type of the `.0` field from `T`. -#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] -pub struct UserTypeProjection<'tcx> { - pub base: UserTypeAnnotation<'tcx>, - pub projs: Vec>, +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] +pub struct UserTypeProjection { + pub base: UserTypeAnnotationIndex, + pub projs: Vec>, } -impl<'tcx> Copy for ProjectionKind<'tcx> { } +impl Copy for ProjectionKind { } -CloneTypeFoldableAndLiftImpls! { ProjectionKind<'tcx>, } +impl UserTypeProjection { + pub(crate) fn index(mut self) -> Self { + self.projs.push(ProjectionElem::Index(())); + self + } + + pub(crate) fn subslice(mut self, from: u32, to: u32) -> Self { + self.projs.push(ProjectionElem::Subslice { from, to }); + self + } -impl<'tcx> TypeFoldable<'tcx> for UserTypeProjection<'tcx> { + pub(crate) fn deref(mut self) -> Self { + self.projs.push(ProjectionElem::Deref); + self + } + + pub(crate) fn leaf(mut self, field: Field) -> Self { + self.projs.push(ProjectionElem::Field(field, ())); + self + } + + pub(crate) fn variant( + mut self, + adt_def: &'tcx AdtDef, + variant_index: VariantIdx, + field: Field, + ) -> Self { + self.projs.push(ProjectionElem::Downcast( + Some(adt_def.variants[variant_index].ident.name), + variant_index)); + self.projs.push(ProjectionElem::Field(field, ())); + self + } +} + +CloneTypeFoldableAndLiftImpls! { ProjectionKind, } + +impl<'tcx> TypeFoldable<'tcx> for UserTypeProjection { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - use mir::ProjectionElem::*; + use crate::mir::ProjectionElem::*; let base = self.base.fold_with(folder); let projs: Vec<_> = self.projs @@ -2587,6 +2707,7 @@ impl<'tcx> TypeFoldable<'tcx> for UserTypeProjection<'tcx> { newtype_index! { pub struct Promoted { + derive [HashStable] DEBUG_FORMAT = "promoted[{}]" } } @@ -2594,13 +2715,12 @@ newtype_index! { impl<'tcx> Debug for Constant<'tcx> { fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { write!(fmt, "const ")?; - fmt_const_val(fmt, self.literal) + fmt_const_val(fmt, *self.literal) } } - /// Write a `ConstValue` in a way closer to the original source code than the `Debug` output. -pub fn fmt_const_val(f: &mut impl Write, const_val: &ty::Const<'_>) -> fmt::Result { - use ty::TyKind::*; +pub fn fmt_const_val(f: &mut impl Write, const_val: ty::Const<'_>) -> fmt::Result { + use crate::ty::TyKind::*; let value = const_val.val; let ty = const_val.ty; // print some primitives @@ -2628,35 +2748,33 @@ pub fn fmt_const_val(f: &mut impl Write, const_val: &ty::Const<'_>) -> fmt::Resu } // print function definitions if let FnDef(did, _) = ty.sty { - return write!(f, "{}", item_path_str(did)); + return write!(f, "{}", def_path_str(did)); } // print string literals - if let ConstValue::ScalarPair(ptr, len) = value { + if let ConstValue::Slice(ptr, len) = value { if let Scalar::Ptr(ptr) = ptr { - if let Scalar::Bits { bits: len, .. } = len { - if let Ref(_, &ty::TyS { sty: Str, .. }, _) = ty.sty { - return ty::tls::with(|tcx| { - let alloc = tcx.alloc_map.lock().get(ptr.alloc_id); - if let Some(interpret::AllocKind::Memory(alloc)) = alloc { - assert_eq!(len as usize as u128, len); - let slice = - &alloc.bytes[(ptr.offset.bytes() as usize)..][..(len as usize)]; - let s = ::std::str::from_utf8(slice).expect("non utf8 str from miri"); - write!(f, "{:?}", s) - } else { - write!(f, "pointer to erroneous constant {:?}, {:?}", ptr, len) - } - }); - } + if let Ref(_, &ty::TyS { sty: Str, .. }, _) = ty.sty { + return ty::tls::with(|tcx| { + let alloc = tcx.alloc_map.lock().get(ptr.alloc_id); + if let Some(interpret::AllocKind::Memory(alloc)) = alloc { + assert_eq!(len as usize as u64, len); + let slice = + &alloc.bytes[(ptr.offset.bytes() as usize)..][..(len as usize)]; + let s = ::std::str::from_utf8(slice).expect("non utf8 str from miri"); + write!(f, "{:?}", s) + } else { + write!(f, "pointer to erroneous constant {:?}, {:?}", ptr, len) + } + }); } } } // just raw dump everything else - write!(f, "{:?}:{}", value, ty) + write!(f, "{:?} : {}", value, ty) } -fn item_path_str(def_id: DefId) -> String { - ty::tls::with(|tcx| tcx.item_path_str(def_id)) +fn def_path_str(def_id: DefId) -> String { + ty::tls::with(|tcx| tcx.def_path_str(def_id)) } impl<'tcx> graph::DirectedGraph for Mir<'tcx> { @@ -2703,7 +2821,7 @@ impl<'a, 'b> graph::GraphSuccessors<'b> for Mir<'a> { type Iter = iter::Cloned>; } -#[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, HashStable)] pub struct Location { /// the location is within this block pub block: BasicBlock, @@ -2775,19 +2893,16 @@ impl Location { } } -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] pub enum UnsafetyViolationKind { General, - /// Right now function calls to `const unsafe fn` are only permitted behind a feature gate - /// Also, even `const unsafe fn` need an `unsafe` block to do the allowed operations. - GatedConstFnCall, - /// Permitted in const fn and regular fns + /// Permitted in const fn and regular fns. GeneralAndConstFn, - ExternStatic(ast::NodeId), - BorrowPacked(ast::NodeId), + ExternStatic(hir::HirId), + BorrowPacked(hir::HirId), } -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] pub struct UnsafetyViolation { pub source_info: SourceInfo, pub description: InternedString, @@ -2795,22 +2910,22 @@ pub struct UnsafetyViolation { pub kind: UnsafetyViolationKind, } -#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] pub struct UnsafetyCheckResult { /// Violations that are propagated *upwards* from this function pub violations: Lrc<[UnsafetyViolation]>, /// unsafe blocks in this function, along with whether they are used. This is /// used for the "unused_unsafe" lint. - pub unsafe_blocks: Lrc<[(ast::NodeId, bool)]>, + pub unsafe_blocks: Lrc<[(hir::HirId, bool)]>, } /// The layout of generator state -#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, Debug, RustcEncodable, RustcDecodable, HashStable)] pub struct GeneratorLayout<'tcx> { pub fields: Vec>, } -#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, Debug, RustcEncodable, RustcDecodable, HashStable)] pub struct BorrowCheckResult<'gcx> { pub closure_requirements: Option>, pub used_mut_upvars: SmallVec<[Field; 8]>, @@ -2818,7 +2933,7 @@ pub struct BorrowCheckResult<'gcx> { /// After we borrow check a closure, we are left with various /// requirements that we have inferred between the free regions that -/// appear in the closure's signature or on its field types. These +/// appear in the closure's signature or on its field types. These /// requirements are then verified and proved by the closure's /// creating function. This struct encodes those requirements. /// @@ -2866,9 +2981,9 @@ pub struct BorrowCheckResult<'gcx> { /// that case because the regions must be allocated in the global /// TyCtxt, and hence we cannot use `ReVar` (which is what we use /// internally within the rest of the NLL code). -#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, Debug, RustcEncodable, RustcDecodable, HashStable)] pub struct ClosureRegionRequirements<'gcx> { - /// The number of external regions defined on the closure. In our + /// The number of external regions defined on the closure. In our /// example above, it would be 3 -- one for `'static`, then `'1` /// and `'2`. This is just used for a sanity check later on, to /// make sure that the number of regions we see at the callsite @@ -2882,7 +2997,7 @@ pub struct ClosureRegionRequirements<'gcx> { /// Indicates an outlives constraint between a type or between two /// free-regions declared on the closure. -#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, HashStable)] pub struct ClosureOutlivesRequirement<'tcx> { // This region or type ... pub subject: ClosureOutlivesSubject<'tcx>, @@ -2902,7 +3017,8 @@ pub struct ClosureOutlivesRequirement<'tcx> { /// order of the category, thereby influencing diagnostic output. /// /// See also [rustc_mir::borrow_check::nll::constraints] -#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, + Hash, RustcEncodable, RustcDecodable, HashStable)] pub enum ConstraintCategory { Return, Yield, @@ -2939,7 +3055,7 @@ pub enum ConstraintCategory { /// The subject of a ClosureOutlivesRequirement -- that is, the thing /// that must outlive some region. -#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, HashStable)] pub enum ClosureOutlivesSubject<'tcx> { /// Subject is a type, typically a type parameter, but could also /// be a projection. Indicates a requirement like `T: 'a` being @@ -2965,9 +3081,11 @@ CloneTypeFoldableAndLiftImpls! { SourceInfo, UpvarDecl, FakeReadCause, + RetagKind, SourceScope, SourceScopeData, SourceScopeLocalData, + UserTypeAnnotationIndex, } BraceStructTypeFoldableImpl! { @@ -2981,9 +3099,11 @@ BraceStructTypeFoldableImpl! { generator_drop, generator_layout, local_decls, + user_type_annotations, arg_count, upvar_decls, spread_arg, + control_flow_destroyed, span, cache, } @@ -3030,14 +3150,21 @@ EnumTypeFoldableImpl! { (StatementKind::SetDiscriminant) { place, variant_index }, (StatementKind::StorageLive)(a), (StatementKind::StorageDead)(a), - (StatementKind::InlineAsm) { asm, outputs, inputs }, - (StatementKind::Retag) { fn_entry, two_phase, place }, - (StatementKind::EscapeToRaw)(place), + (StatementKind::InlineAsm)(a), + (StatementKind::Retag)(kind, place), (StatementKind::AscribeUserType)(a, v, b), (StatementKind::Nop), } } +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for InlineAsm<'tcx> { + asm, + outputs, + inputs, + } +} + EnumTypeFoldableImpl! { impl<'tcx, T> TypeFoldable<'tcx> for ClearCrossCrate { (ClearCrossCrate::Clear), @@ -3047,7 +3174,7 @@ EnumTypeFoldableImpl! { impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - use mir::TerminatorKind::*; + use crate::mir::TerminatorKind::*; let kind = match self.kind { Goto { target } => Goto { target }, @@ -3117,8 +3244,8 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { target, cleanup, } => { - let msg = if let EvalErrorKind::BoundsCheck { ref len, ref index } = *msg { - EvalErrorKind::BoundsCheck { + let msg = if let InterpError::BoundsCheck { ref len, ref index } = *msg { + InterpError::BoundsCheck { len: len.fold_with(folder), index: index.fold_with(folder), } @@ -3160,7 +3287,7 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { } fn super_visit_with>(&self, visitor: &mut V) -> bool { - use mir::TerminatorKind::*; + use crate::mir::TerminatorKind::*; match self.kind { SwitchInt { @@ -3192,7 +3319,7 @@ impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { ref cond, ref msg, .. } => { if cond.visit_with(visitor) { - if let EvalErrorKind::BoundsCheck { ref len, ref index } = *msg { + if let InterpError::BoundsCheck { ref len, ref index } = *msg { len.visit_with(visitor) || index.visit_with(visitor) } else { false @@ -3232,7 +3359,7 @@ impl<'tcx> TypeFoldable<'tcx> for Place<'tcx> { impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - use mir::Rvalue::*; + use crate::mir::Rvalue::*; match *self { Use(ref op) => Use(op.fold_with(folder)), Repeat(ref op, len) => Repeat(op.fold_with(folder), len), @@ -3274,7 +3401,7 @@ impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { } fn super_visit_with>(&self, visitor: &mut V) -> bool { - use mir::Rvalue::*; + use crate::mir::Rvalue::*; match *self { Use(ref op) => op.visit_with(visitor), Repeat(ref op, _) => op.visit_with(visitor), @@ -3319,14 +3446,14 @@ impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> { } } -impl<'tcx, B, V, T> TypeFoldable<'tcx> for Projection<'tcx, B, V, T> +impl<'tcx, B, V, T> TypeFoldable<'tcx> for Projection where B: TypeFoldable<'tcx>, V: TypeFoldable<'tcx>, T: TypeFoldable<'tcx>, { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - use mir::ProjectionElem::*; + use crate::mir::ProjectionElem::*; let base = self.base.fold_with(folder); let elem = match self.elem { @@ -3340,7 +3467,7 @@ where } fn super_visit_with>(&self, visitor: &mut Vs) -> bool { - use mir::ProjectionElem::*; + use crate::mir::ProjectionElem::*; self.base.visit_with(visitor) || match self.elem { Field(_, ref ty) => ty.visit_with(visitor), diff --git a/src/librustc/mir/mono.rs b/src/librustc/mir/mono.rs index c96cbd40efaf8..c75f7d7d15946 100644 --- a/src/librustc/mir/mono.rs +++ b/src/librustc/mir/mono.rs @@ -1,22 +1,12 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir::def_id::{DefId, CrateNum, LOCAL_CRATE}; -use syntax::ast::NodeId; +use crate::hir::def_id::{DefId, CrateNum, LOCAL_CRATE}; +use crate::hir::HirId; use syntax::symbol::{Symbol, InternedString}; -use ty::{Instance, TyCtxt}; -use util::nodemap::FxHashMap; +use crate::ty::{Instance, TyCtxt}; +use crate::util::nodemap::FxHashMap; use rustc_data_structures::base_n; use rustc_data_structures::stable_hasher::{HashStable, StableHasherResult, StableHasher}; -use ich::{Fingerprint, StableHashingContext, NodeIdHashingMode}; +use crate::ich::{Fingerprint, StableHashingContext, NodeIdHashingMode}; use std::fmt; use std::hash::Hash; @@ -24,7 +14,7 @@ use std::hash::Hash; pub enum MonoItem<'tcx> { Fn(Instance<'tcx>), Static(DefId), - GlobalAsm(NodeId), + GlobalAsm(HirId), } impl<'tcx> MonoItem<'tcx> { @@ -67,7 +57,7 @@ impl<'a, 'tcx> HashStable> for MonoItem<'tcx> { pub struct CodegenUnit<'tcx> { /// A name for this CGU. Incremental compilation requires that - /// name be unique amongst **all** crates. Therefore, it should + /// name be unique amongst **all** crates. Therefore, it should /// contain something unique to this crate (e.g., a module path) /// as well as the crate name and disambiguator. name: InternedString, diff --git a/src/librustc/mir/tcx.rs b/src/librustc/mir/tcx.rs index baa88dba45915..23be1bbf6c687 100644 --- a/src/librustc/mir/tcx.rs +++ b/src/librustc/mir/tcx.rs @@ -1,34 +1,20 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /*! * Methods for the various MIR types. These are intended for use after * building is complete. */ -use mir::*; -use ty::subst::{Subst, Substs}; -use ty::{self, AdtDef, Ty, TyCtxt}; -use ty::layout::VariantIdx; -use hir; -use ty::util::IntTypeExt; +use crate::mir::*; +use crate::ty::subst::Subst; +use crate::ty::{self, Ty, TyCtxt}; +use crate::ty::layout::VariantIdx; +use crate::hir; +use crate::ty::util::IntTypeExt; #[derive(Copy, Clone, Debug)] -pub enum PlaceTy<'tcx> { - /// Normal type. - Ty { ty: Ty<'tcx> }, - - /// Downcast to a particular variant of an enum. - Downcast { adt_def: &'tcx AdtDef, - substs: &'tcx Substs<'tcx>, - variant_index: VariantIdx }, +pub struct PlaceTy<'tcx> { + pub ty: Ty<'tcx>, + /// Downcast to a particular variant of an enum, if included. + pub variant_index: Option, } static_assert!(PLACE_TY_IS_3_PTRS_LARGE: @@ -37,16 +23,7 @@ static_assert!(PLACE_TY_IS_3_PTRS_LARGE: impl<'a, 'gcx, 'tcx> PlaceTy<'tcx> { pub fn from_ty(ty: Ty<'tcx>) -> PlaceTy<'tcx> { - PlaceTy::Ty { ty } - } - - pub fn to_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { - match *self { - PlaceTy::Ty { ty } => - ty, - PlaceTy::Downcast { adt_def, substs, variant_index: _ } => - tcx.mk_adt(adt_def, substs), - } + PlaceTy { ty, variant_index: None } } /// `place_ty.field_ty(tcx, f)` computes the type at a given field @@ -58,21 +35,20 @@ impl<'a, 'gcx, 'tcx> PlaceTy<'tcx> { /// Note that the resulting type has not been normalized. pub fn field_ty(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, f: &Field) -> Ty<'tcx> { - // Pass `0` here so it can be used as a "default" variant_index in first arm below - let answer = match (self, VariantIdx::new(0)) { - (PlaceTy::Ty { - ty: &ty::TyS { sty: ty::TyKind::Adt(adt_def, substs), .. } }, variant_index) | - (PlaceTy::Downcast { adt_def, substs, variant_index }, _) => { - let variant_def = &adt_def.variants[variant_index]; + let answer = match self.ty.sty { + ty::Adt(adt_def, substs) => { + let variant_def = match self.variant_index { + None => adt_def.non_enum_variant(), + Some(variant_index) => { + assert!(adt_def.is_enum()); + &adt_def.variants[variant_index] + } + }; let field_def = &variant_def.fields[f.index()]; field_def.ty(tcx, substs) } - (PlaceTy::Ty { ty }, _) => { - match ty.sty { - ty::Tuple(ref tys) => tys[f.index()], - _ => bug!("extracting field of non-tuple non-adt: {:?}", self), - } - } + ty::Tuple(ref tys) => tys[f.index()], + _ => bug!("extracting field of non-tuple non-adt: {:?}", self), }; debug!("field_ty self: {:?} f: {:?} yields: {:?}", self, f, answer); answer @@ -85,8 +61,7 @@ impl<'a, 'gcx, 'tcx> PlaceTy<'tcx> { elem: &PlaceElem<'tcx>) -> PlaceTy<'tcx> { - self.projection_ty_core(tcx, elem, |_, _, ty| -> Result, ()> { Ok(ty) }) - .unwrap() + self.projection_ty_core(tcx, elem, |_, _, ty| ty) } /// `place_ty.projection_ty_core(tcx, elem, |...| { ... })` @@ -94,73 +69,54 @@ impl<'a, 'gcx, 'tcx> PlaceTy<'tcx> { /// `Ty` or downcast variant corresponding to that projection. /// The `handle_field` callback must map a `Field` to its `Ty`, /// (which should be trivial when `T` = `Ty`). - pub fn projection_ty_core( + pub fn projection_ty_core( self, tcx: TyCtxt<'a, 'gcx, 'tcx>, - elem: &ProjectionElem<'tcx, V, T>, - mut handle_field: impl FnMut(&Self, &Field, &T) -> Result, E>) - -> Result, E> + elem: &ProjectionElem, + mut handle_field: impl FnMut(&Self, &Field, &T) -> Ty<'tcx>) + -> PlaceTy<'tcx> where V: ::std::fmt::Debug, T: ::std::fmt::Debug { let answer = match *elem { ProjectionElem::Deref => { - let ty = self.to_ty(tcx) + let ty = self.ty .builtin_deref(true) .unwrap_or_else(|| { bug!("deref projection of non-dereferencable ty {:?}", self) }) .ty; - PlaceTy::Ty { - ty, - } + PlaceTy::from_ty(ty) } ProjectionElem::Index(_) | ProjectionElem::ConstantIndex { .. } => - PlaceTy::Ty { - ty: self.to_ty(tcx).builtin_index().unwrap() - }, + PlaceTy::from_ty(self.ty.builtin_index().unwrap()), ProjectionElem::Subslice { from, to } => { - let ty = self.to_ty(tcx); - PlaceTy::Ty { - ty: match ty.sty { - ty::Array(inner, size) => { - let size = size.unwrap_usize(tcx); - let len = size - (from as u64) - (to as u64); - tcx.mk_array(inner, len) - } - ty::Slice(..) => ty, - _ => { - bug!("cannot subslice non-array type: `{:?}`", self) - } - } - } - } - ProjectionElem::Downcast(adt_def1, index) => - match self.to_ty(tcx).sty { - ty::Adt(adt_def, substs) => { - assert!(adt_def.is_enum()); - assert!(index.as_usize() < adt_def.variants.len()); - assert_eq!(adt_def, adt_def1); - PlaceTy::Downcast { adt_def, - substs, - variant_index: index } + PlaceTy::from_ty(match self.ty.sty { + ty::Array(inner, size) => { + let size = size.unwrap_usize(tcx); + let len = size - (from as u64) - (to as u64); + tcx.mk_array(inner, len) } + ty::Slice(..) => self.ty, _ => { - bug!("cannot downcast non-ADT type: `{:?}`", self) + bug!("cannot subslice non-array type: `{:?}`", self) } - }, + }) + } + ProjectionElem::Downcast(_name, index) => + PlaceTy { ty: self.ty, variant_index: Some(index) }, ProjectionElem::Field(ref f, ref fty) => - PlaceTy::Ty { ty: handle_field(&self, f, fty)? }, + PlaceTy::from_ty(handle_field(&self, f, fty)), }; debug!("projection_ty self: {:?} elem: {:?} yields: {:?}", self, elem, answer); - Ok(answer) + answer } } -EnumTypeFoldableImpl! { +BraceStructTypeFoldableImpl! { impl<'tcx> TypeFoldable<'tcx> for PlaceTy<'tcx> { - (PlaceTy::Ty) { ty }, - (PlaceTy::Downcast) { adt_def, substs, variant_index }, + ty, + variant_index, } } @@ -169,11 +125,10 @@ impl<'tcx> Place<'tcx> { where D: HasLocalDecls<'tcx> { match *self { - Place::Local(index) => - PlaceTy::Ty { ty: local_decls.local_decls()[index].ty }, - Place::Promoted(ref data) => PlaceTy::Ty { ty: data.1 }, - Place::Static(ref data) => - PlaceTy::Ty { ty: data.ty }, + Place::Base(PlaceBase::Local(index)) => + PlaceTy::from_ty(local_decls.local_decls()[index].ty), + Place::Base(PlaceBase::Static(ref data)) => + PlaceTy::from_ty(data.ty), Place::Projection(ref proj) => proj.base.ty(local_decls, tcx).projection_ty(tcx, &proj.elem), } @@ -198,7 +153,7 @@ impl<'tcx> Place<'tcx> { match place { Place::Projection(ref proj) => match proj.elem { ProjectionElem::Field(field, _ty) => { - let base_ty = proj.base.ty(mir, *tcx).to_ty(*tcx); + let base_ty = proj.base.ty(mir, *tcx).ty; if (base_ty.is_closure() || base_ty.is_generator()) && (!by_ref || mir.upvar_decls[field.index()].by_ref) @@ -230,7 +185,7 @@ impl<'tcx> Rvalue<'tcx> { tcx.mk_array(operand.ty(local_decls, tcx), count) } Rvalue::Ref(reg, bk, ref place) => { - let place_ty = place.ty(local_decls, tcx).to_ty(tcx); + let place_ty = place.ty(local_decls, tcx).ty; tcx.mk_ref(reg, ty::TypeAndMut { ty: place_ty, @@ -256,7 +211,7 @@ impl<'tcx> Rvalue<'tcx> { operand.ty(local_decls, tcx) } Rvalue::Discriminant(ref place) => { - let ty = place.ty(local_decls, tcx).to_ty(tcx); + let ty = place.ty(local_decls, tcx).ty; if let ty::Adt(adt_def, _) = ty.sty { adt_def.repr.discr_type().to_ty(tcx) } else { @@ -289,7 +244,7 @@ impl<'tcx> Rvalue<'tcx> { } #[inline] - /// Returns whether this rvalue is deeply initialized (most rvalues) or + /// Returns `true` if this rvalue is deeply initialized (most rvalues) or /// whether its only shallowly initialized (`Rvalue::Box`). pub fn initialization_state(&self) -> RvalueInitializationState { match *self { @@ -305,7 +260,7 @@ impl<'tcx> Operand<'tcx> { { match self { &Operand::Copy(ref l) | - &Operand::Move(ref l) => l.ty(local_decls, tcx).to_ty(tcx), + &Operand::Move(ref l) => l.ty(local_decls, tcx).ty, &Operand::Constant(ref c) => c.ty, } } diff --git a/src/librustc/mir/traversal.rs b/src/librustc/mir/traversal.rs index 4424ba0a4f7a1..f8398c27cc2da 100644 --- a/src/librustc/mir/traversal.rs +++ b/src/librustc/mir/traversal.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc_data_structures::bit_set::BitSet; use super::*; diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs index 237f6bc9c7b45..b04c28cde571c 100644 --- a/src/librustc/mir/visit.rs +++ b/src/librustc/mir/visit.rs @@ -1,17 +1,7 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir::def_id::DefId; -use ty::subst::Substs; -use ty::{ClosureSubsts, GeneratorSubsts, Region, Ty}; -use mir::*; +use crate::hir::def_id::DefId; +use crate::ty::subst::SubstsRef; +use crate::ty::{CanonicalUserTypeAnnotation, ClosureSubsts, GeneratorSubsts, Region, Ty}; +use crate::mir::*; use syntax_pos::Span; // # The MIR Visitor @@ -48,10 +38,10 @@ use syntax_pos::Span; // ```rust // fn super_basic_block_data(&mut self, // block: BasicBlock, -// data: & $($mutability)* BasicBlockData<'tcx>) { +// data: & $($mutability)? BasicBlockData<'tcx>) { // let BasicBlockData { -// ref $($mutability)* statements, -// ref $($mutability)* terminator, +// statements, +// terminator, // is_cleanup: _ // } = *data; // @@ -77,112 +67,104 @@ use syntax_pos::Span; // `is_cleanup` above. macro_rules! make_mir_visitor { - ($visitor_trait_name:ident, $($mutability:ident)*) => { + ($visitor_trait_name:ident, $($mutability:ident)?) => { pub trait $visitor_trait_name<'tcx> { // Override these, and call `self.super_xxx` to revert back to the // default behavior. - fn visit_mir(&mut self, mir: & $($mutability)* Mir<'tcx>) { + fn visit_mir(&mut self, mir: & $($mutability)? Mir<'tcx>) { self.super_mir(mir); } fn visit_basic_block_data(&mut self, block: BasicBlock, - data: & $($mutability)* BasicBlockData<'tcx>) { + data: & $($mutability)? BasicBlockData<'tcx>) { self.super_basic_block_data(block, data); } fn visit_source_scope_data(&mut self, - scope_data: & $($mutability)* SourceScopeData) { + scope_data: & $($mutability)? SourceScopeData) { self.super_source_scope_data(scope_data); } fn visit_statement(&mut self, block: BasicBlock, - statement: & $($mutability)* Statement<'tcx>, + statement: & $($mutability)? Statement<'tcx>, location: Location) { self.super_statement(block, statement, location); } fn visit_assign(&mut self, block: BasicBlock, - place: & $($mutability)* Place<'tcx>, - rvalue: & $($mutability)* Rvalue<'tcx>, + place: & $($mutability)? Place<'tcx>, + rvalue: & $($mutability)? Rvalue<'tcx>, location: Location) { self.super_assign(block, place, rvalue, location); } fn visit_terminator(&mut self, block: BasicBlock, - terminator: & $($mutability)* Terminator<'tcx>, + terminator: & $($mutability)? Terminator<'tcx>, location: Location) { self.super_terminator(block, terminator, location); } fn visit_terminator_kind(&mut self, block: BasicBlock, - kind: & $($mutability)* TerminatorKind<'tcx>, + kind: & $($mutability)? TerminatorKind<'tcx>, location: Location) { self.super_terminator_kind(block, kind, location); } fn visit_assert_message(&mut self, - msg: & $($mutability)* AssertMessage<'tcx>, + msg: & $($mutability)? AssertMessage<'tcx>, location: Location) { self.super_assert_message(msg, location); } fn visit_rvalue(&mut self, - rvalue: & $($mutability)* Rvalue<'tcx>, + rvalue: & $($mutability)? Rvalue<'tcx>, location: Location) { self.super_rvalue(rvalue, location); } fn visit_operand(&mut self, - operand: & $($mutability)* Operand<'tcx>, + operand: & $($mutability)? Operand<'tcx>, location: Location) { self.super_operand(operand, location); } fn visit_ascribe_user_ty(&mut self, - place: & $($mutability)* Place<'tcx>, - variance: & $($mutability)* ty::Variance, - user_ty: & $($mutability)* UserTypeProjection<'tcx>, + place: & $($mutability)? Place<'tcx>, + variance: & $($mutability)? ty::Variance, + user_ty: & $($mutability)? UserTypeProjection, location: Location) { self.super_ascribe_user_ty(place, variance, user_ty, location); } fn visit_retag(&mut self, - fn_entry: & $($mutability)* bool, - two_phase: & $($mutability)* bool, - place: & $($mutability)* Place<'tcx>, + kind: & $($mutability)? RetagKind, + place: & $($mutability)? Place<'tcx>, location: Location) { - self.super_retag(fn_entry, two_phase, place, location); + self.super_retag(kind, place, location); } fn visit_place(&mut self, - place: & $($mutability)* Place<'tcx>, + place: & $($mutability)? Place<'tcx>, context: PlaceContext<'tcx>, location: Location) { self.super_place(place, context, location); } - fn visit_static(&mut self, - static_: & $($mutability)* Static<'tcx>, - context: PlaceContext<'tcx>, - location: Location) { - self.super_static(static_, context, location); - } - fn visit_projection(&mut self, - place: & $($mutability)* PlaceProjection<'tcx>, + place: & $($mutability)? PlaceProjection<'tcx>, context: PlaceContext<'tcx>, location: Location) { self.super_projection(place, context, location); } fn visit_projection_elem(&mut self, - place: & $($mutability)* PlaceElem<'tcx>, + place: & $($mutability)? PlaceElem<'tcx>, location: Location) { self.super_projection_elem(place, location); } @@ -194,91 +176,92 @@ macro_rules! make_mir_visitor { } fn visit_constant(&mut self, - constant: & $($mutability)* Constant<'tcx>, + constant: & $($mutability)? Constant<'tcx>, location: Location) { self.super_constant(constant, location); } fn visit_def_id(&mut self, - def_id: & $($mutability)* DefId, + def_id: & $($mutability)? DefId, _: Location) { self.super_def_id(def_id); } fn visit_span(&mut self, - span: & $($mutability)* Span) { + span: & $($mutability)? Span) { self.super_span(span); } fn visit_source_info(&mut self, - source_info: & $($mutability)* SourceInfo) { + source_info: & $($mutability)? SourceInfo) { self.super_source_info(source_info); } fn visit_ty(&mut self, - ty: & $($mutability)* Ty<'tcx>, + ty: & $($mutability)? Ty<'tcx>, _: TyContext) { self.super_ty(ty); } fn visit_user_type_projection( &mut self, - ty: & $($mutability)* UserTypeProjection<'tcx>, + ty: & $($mutability)? UserTypeProjection, ) { self.super_user_type_projection(ty); } fn visit_user_type_annotation( &mut self, - ty: & $($mutability)* UserTypeAnnotation<'tcx>, + index: UserTypeAnnotationIndex, + ty: & $($mutability)? CanonicalUserTypeAnnotation<'tcx>, ) { - self.super_user_type_annotation(ty); + self.super_user_type_annotation(index, ty); } fn visit_region(&mut self, - region: & $($mutability)* ty::Region<'tcx>, + region: & $($mutability)? ty::Region<'tcx>, _: Location) { self.super_region(region); } fn visit_const(&mut self, - constant: & $($mutability)* &'tcx ty::Const<'tcx>, + constant: & $($mutability)? &'tcx ty::Const<'tcx>, _: Location) { self.super_const(constant); } fn visit_substs(&mut self, - substs: & $($mutability)* &'tcx Substs<'tcx>, + substs: & $($mutability)? SubstsRef<'tcx>, _: Location) { self.super_substs(substs); } fn visit_closure_substs(&mut self, - substs: & $($mutability)* ClosureSubsts<'tcx>, + substs: & $($mutability)? ClosureSubsts<'tcx>, _: Location) { self.super_closure_substs(substs); } fn visit_generator_substs(&mut self, - substs: & $($mutability)* GeneratorSubsts<'tcx>, + substs: & $($mutability)? GeneratorSubsts<'tcx>, _: Location) { self.super_generator_substs(substs); } fn visit_local_decl(&mut self, local: Local, - local_decl: & $($mutability)* LocalDecl<'tcx>) { + local_decl: & $($mutability)? LocalDecl<'tcx>) { self.super_local_decl(local, local_decl); } fn visit_local(&mut self, - _local: & $($mutability)* Local, + _local: & $($mutability)? Local, _context: PlaceContext<'tcx>, _location: Location) { } fn visit_source_scope(&mut self, - scope: & $($mutability)* SourceScope) { + scope: & $($mutability)? SourceScope) { self.super_source_scope(scope); } @@ -286,8 +269,8 @@ macro_rules! make_mir_visitor { // not meant to be overridden. fn super_mir(&mut self, - mir: & $($mutability)* Mir<'tcx>) { - if let Some(yield_ty) = &$($mutability)* mir.yield_ty { + mir: & $($mutability)? Mir<'tcx>) { + if let Some(yield_ty) = &$($mutability)? mir.yield_ty { self.visit_ty(yield_ty, TyContext::YieldTy(SourceInfo { span: mir.span, scope: OUTERMOST_SOURCE_SCOPE, @@ -301,34 +284,45 @@ macro_rules! make_mir_visitor { (mut) => (mir.basic_blocks_mut().iter_enumerated_mut()); () => (mir.basic_blocks().iter_enumerated()); }; - for (bb, data) in basic_blocks!($($mutability)*) { + for (bb, data) in basic_blocks!($($mutability)?) { self.visit_basic_block_data(bb, data); } - for scope in &$($mutability)* mir.source_scopes { + for scope in &$($mutability)? mir.source_scopes { self.visit_source_scope_data(scope); } - self.visit_ty(&$($mutability)* mir.return_ty(), TyContext::ReturnTy(SourceInfo { + self.visit_ty(&$($mutability)? mir.return_ty(), TyContext::ReturnTy(SourceInfo { span: mir.span, scope: OUTERMOST_SOURCE_SCOPE, })); for local in mir.local_decls.indices() { - self.visit_local_decl(local, & $($mutability)* mir.local_decls[local]); + self.visit_local_decl(local, & $($mutability)? mir.local_decls[local]); } - self.visit_span(&$($mutability)* mir.span); + macro_rules! type_annotations { + (mut) => (mir.user_type_annotations.iter_enumerated_mut()); + () => (mir.user_type_annotations.iter_enumerated()); + }; + + for (index, annotation) in type_annotations!($($mutability)?) { + self.visit_user_type_annotation( + index, annotation + ); + } + + self.visit_span(&$($mutability)? mir.span); } fn super_basic_block_data(&mut self, block: BasicBlock, - data: & $($mutability)* BasicBlockData<'tcx>) { + data: & $($mutability)? BasicBlockData<'tcx>) { let BasicBlockData { - ref $($mutability)* statements, - ref $($mutability)* terminator, + statements, + terminator, is_cleanup: _ - } = *data; + } = data; let mut index = 0; for statement in statements { @@ -337,96 +331,83 @@ macro_rules! make_mir_visitor { index += 1; } - if let Some(ref $($mutability)* terminator) = *terminator { + if let Some(terminator) = terminator { let location = Location { block: block, statement_index: index }; self.visit_terminator(block, terminator, location); } } - fn super_source_scope_data(&mut self, - scope_data: & $($mutability)* SourceScopeData) { + fn super_source_scope_data(&mut self, scope_data: & $($mutability)? SourceScopeData) { let SourceScopeData { - ref $($mutability)* span, - ref $($mutability)* parent_scope, - } = *scope_data; + span, + parent_scope, + } = scope_data; self.visit_span(span); - if let Some(ref $($mutability)* parent_scope) = *parent_scope { + if let Some(parent_scope) = parent_scope { self.visit_source_scope(parent_scope); } } fn super_statement(&mut self, block: BasicBlock, - statement: & $($mutability)* Statement<'tcx>, + statement: & $($mutability)? Statement<'tcx>, location: Location) { let Statement { - ref $($mutability)* source_info, - ref $($mutability)* kind, - } = *statement; + source_info, + kind, + } = statement; self.visit_source_info(source_info); - match *kind { - StatementKind::Assign(ref $($mutability)* place, - ref $($mutability)* rvalue) => { + match kind { + StatementKind::Assign(place, rvalue) => { self.visit_assign(block, place, rvalue, location); } - StatementKind::FakeRead(_, ref $($mutability)* place) => { + StatementKind::FakeRead(_, place) => { self.visit_place( place, PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect), location ); } - StatementKind::SetDiscriminant{ ref $($mutability)* place, .. } => { + StatementKind::SetDiscriminant { place, .. } => { self.visit_place( place, PlaceContext::MutatingUse(MutatingUseContext::Store), location ); } - StatementKind::EscapeToRaw(ref $($mutability)* op) => { - self.visit_operand(op, location); - } - StatementKind::StorageLive(ref $($mutability)* local) => { + StatementKind::StorageLive(local) => { self.visit_local( local, PlaceContext::NonUse(NonUseContext::StorageLive), location ); } - StatementKind::StorageDead(ref $($mutability)* local) => { + StatementKind::StorageDead(local) => { self.visit_local( local, PlaceContext::NonUse(NonUseContext::StorageDead), location ); } - StatementKind::InlineAsm { ref $($mutability)* outputs, - ref $($mutability)* inputs, - asm: _ } => { - for output in & $($mutability)* outputs[..] { + StatementKind::InlineAsm(asm) => { + for output in & $($mutability)? asm.outputs[..] { self.visit_place( output, PlaceContext::MutatingUse(MutatingUseContext::AsmOutput), location ); } - for (span, input) in & $($mutability)* inputs[..] { + for (span, input) in & $($mutability)? asm.inputs[..] { self.visit_span(span); self.visit_operand(input, location); } } - StatementKind::Retag { ref $($mutability)* fn_entry, - ref $($mutability)* two_phase, - ref $($mutability)* place } => { - self.visit_retag(fn_entry, two_phase, place, location); + StatementKind::Retag(kind, place) => { + self.visit_retag(kind, place, location); } - StatementKind::AscribeUserType( - ref $($mutability)* place, - ref $($mutability)* variance, - ref $($mutability)* user_ty, - ) => { + StatementKind::AscribeUserType(place, variance, user_ty) => { self.visit_ascribe_user_ty(place, variance, user_ty, location); } StatementKind::Nop => {} @@ -435,8 +416,8 @@ macro_rules! make_mir_visitor { fn super_assign(&mut self, _block: BasicBlock, - place: &$($mutability)* Place<'tcx>, - rvalue: &$($mutability)* Rvalue<'tcx>, + place: &$($mutability)? Place<'tcx>, + rvalue: &$($mutability)? Rvalue<'tcx>, location: Location) { self.visit_place( place, @@ -448,12 +429,9 @@ macro_rules! make_mir_visitor { fn super_terminator(&mut self, block: BasicBlock, - terminator: &$($mutability)* Terminator<'tcx>, + terminator: &$($mutability)? Terminator<'tcx>, location: Location) { - let Terminator { - ref $($mutability)* source_info, - ref $($mutability)* kind, - } = *terminator; + let Terminator { source_info, kind } = terminator; self.visit_source_info(source_info); self.visit_terminator_kind(block, kind, location); @@ -461,21 +439,23 @@ macro_rules! make_mir_visitor { fn super_terminator_kind(&mut self, block: BasicBlock, - kind: & $($mutability)* TerminatorKind<'tcx>, + kind: & $($mutability)? TerminatorKind<'tcx>, source_location: Location) { - match *kind { + match kind { TerminatorKind::Goto { target } => { - self.visit_branch(block, target); + self.visit_branch(block, *target); } - TerminatorKind::SwitchInt { ref $($mutability)* discr, - ref $($mutability)* switch_ty, - values: _, - ref targets } => { + TerminatorKind::SwitchInt { + discr, + switch_ty, + values: _, + targets + } => { self.visit_operand(discr, source_location); self.visit_ty(switch_ty, TyContext::Location(source_location)); - for &target in targets { - self.visit_branch(block, target); + for target in targets { + self.visit_branch(block, *target); } } @@ -486,113 +466,120 @@ macro_rules! make_mir_visitor { TerminatorKind::Unreachable => { } - TerminatorKind::Drop { ref $($mutability)* location, - target, - unwind } => { + TerminatorKind::Drop { + location, + target, + unwind, + } => { self.visit_place( location, PlaceContext::MutatingUse(MutatingUseContext::Drop), source_location ); - self.visit_branch(block, target); + self.visit_branch(block, *target); unwind.map(|t| self.visit_branch(block, t)); } - TerminatorKind::DropAndReplace { ref $($mutability)* location, - ref $($mutability)* value, - target, - unwind } => { + TerminatorKind::DropAndReplace { + location, + value, + target, + unwind, + } => { self.visit_place( location, PlaceContext::MutatingUse(MutatingUseContext::Drop), source_location ); self.visit_operand(value, source_location); - self.visit_branch(block, target); + self.visit_branch(block, *target); unwind.map(|t| self.visit_branch(block, t)); } - TerminatorKind::Call { ref $($mutability)* func, - ref $($mutability)* args, - ref $($mutability)* destination, - cleanup, - from_hir_call: _, } => { + TerminatorKind::Call { + func, + args, + destination, + cleanup, + from_hir_call: _, + } => { self.visit_operand(func, source_location); for arg in args { self.visit_operand(arg, source_location); } - if let Some((ref $($mutability)* destination, target)) = *destination { + if let Some((destination, target)) = destination { self.visit_place( destination, PlaceContext::MutatingUse(MutatingUseContext::Call), source_location ); - self.visit_branch(block, target); + self.visit_branch(block, *target); } cleanup.map(|t| self.visit_branch(block, t)); } - TerminatorKind::Assert { ref $($mutability)* cond, - expected: _, - ref $($mutability)* msg, - target, - cleanup } => { + TerminatorKind::Assert { + cond, + expected: _, + msg, + target, + cleanup, + } => { self.visit_operand(cond, source_location); self.visit_assert_message(msg, source_location); - self.visit_branch(block, target); + self.visit_branch(block, *target); cleanup.map(|t| self.visit_branch(block, t)); } - TerminatorKind::Yield { ref $($mutability)* value, - resume, - drop } => { + TerminatorKind::Yield { + value, + resume, + drop, + } => { self.visit_operand(value, source_location); - self.visit_branch(block, resume); + self.visit_branch(block, *resume); drop.map(|t| self.visit_branch(block, t)); } - TerminatorKind::FalseEdges { real_target, ref imaginary_targets} => { - self.visit_branch(block, real_target); + TerminatorKind::FalseEdges { real_target, imaginary_targets } => { + self.visit_branch(block, *real_target); for target in imaginary_targets { self.visit_branch(block, *target); } } TerminatorKind::FalseUnwind { real_target, unwind } => { - self.visit_branch(block, real_target); + self.visit_branch(block, *real_target); if let Some(unwind) = unwind { - self.visit_branch(block, unwind); + self.visit_branch(block, *unwind); } } } } fn super_assert_message(&mut self, - msg: & $($mutability)* AssertMessage<'tcx>, + msg: & $($mutability)? AssertMessage<'tcx>, location: Location) { - use mir::interpret::EvalErrorKind::*; - if let BoundsCheck { - ref $($mutability)* len, - ref $($mutability)* index - } = *msg { + use crate::mir::interpret::InterpError::*; + if let BoundsCheck { len, index } = msg { self.visit_operand(len, location); self.visit_operand(index, location); } } fn super_rvalue(&mut self, - rvalue: & $($mutability)* Rvalue<'tcx>, + rvalue: & $($mutability)? Rvalue<'tcx>, location: Location) { - match *rvalue { - Rvalue::Use(ref $($mutability)* operand) => { + match rvalue { + Rvalue::Use(operand) => { self.visit_operand(operand, location); } - Rvalue::Repeat(ref $($mutability)* value, _) => { + Rvalue::Repeat(value, _) => { self.visit_operand(value, location); } - Rvalue::Ref(ref $($mutability)* r, bk, ref $($mutability)* path) => { + Rvalue::Ref(r, bk, path) => { self.visit_region(r, location); let ctx = match bk { BorrowKind::Shared => PlaceContext::NonMutatingUse( @@ -610,7 +597,7 @@ macro_rules! make_mir_visitor { self.visit_place(path, ctx, location); } - Rvalue::Len(ref $($mutability)* path) => { + Rvalue::Len(path) => { self.visit_place( path, PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect), @@ -618,28 +605,22 @@ macro_rules! make_mir_visitor { ); } - Rvalue::Cast(_cast_kind, - ref $($mutability)* operand, - ref $($mutability)* ty) => { + Rvalue::Cast(_cast_kind, operand, ty) => { self.visit_operand(operand, location); self.visit_ty(ty, TyContext::Location(location)); } - Rvalue::BinaryOp(_bin_op, - ref $($mutability)* lhs, - ref $($mutability)* rhs) | - Rvalue::CheckedBinaryOp(_bin_op, - ref $($mutability)* lhs, - ref $($mutability)* rhs) => { + Rvalue::BinaryOp(_bin_op, lhs, rhs) + | Rvalue::CheckedBinaryOp(_bin_op, lhs, rhs) => { self.visit_operand(lhs, location); self.visit_operand(rhs, location); } - Rvalue::UnaryOp(_un_op, ref $($mutability)* op) => { + Rvalue::UnaryOp(_un_op, op) => { self.visit_operand(op, location); } - Rvalue::Discriminant(ref $($mutability)* place) => { + Rvalue::Discriminant(place) => { self.visit_place( place, PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect), @@ -647,34 +628,39 @@ macro_rules! make_mir_visitor { ); } - Rvalue::NullaryOp(_op, ref $($mutability)* ty) => { + Rvalue::NullaryOp(_op, ty) => { self.visit_ty(ty, TyContext::Location(location)); } - Rvalue::Aggregate(ref $($mutability)* kind, - ref $($mutability)* operands) => { - let kind = &$($mutability)* **kind; - match *kind { - AggregateKind::Array(ref $($mutability)* ty) => { + Rvalue::Aggregate(kind, operands) => { + let kind = &$($mutability)? **kind; + match kind { + AggregateKind::Array(ty) => { self.visit_ty(ty, TyContext::Location(location)); } AggregateKind::Tuple => { } - AggregateKind::Adt(_adt_def, - _variant_index, - ref $($mutability)* substs, - _user_substs, - _active_field_index) => { + AggregateKind::Adt( + _adt_def, + _variant_index, + substs, + _user_substs, + _active_field_index + ) => { self.visit_substs(substs, location); } - AggregateKind::Closure(ref $($mutability)* def_id, - ref $($mutability)* closure_substs) => { + AggregateKind::Closure( + def_id, + closure_substs + ) => { self.visit_def_id(def_id, location); self.visit_closure_substs(closure_substs, location); } - AggregateKind::Generator(ref $($mutability)* def_id, - ref $($mutability)* generator_substs, - _movability) => { + AggregateKind::Generator( + def_id, + generator_substs, + _movability, + ) => { self.visit_def_id(def_id, location); self.visit_generator_substs(generator_substs, location); } @@ -688,33 +674,33 @@ macro_rules! make_mir_visitor { } fn super_operand(&mut self, - operand: & $($mutability)* Operand<'tcx>, + operand: & $($mutability)? Operand<'tcx>, location: Location) { - match *operand { - Operand::Copy(ref $($mutability)* place) => { + match operand { + Operand::Copy(place) => { self.visit_place( place, PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy), location ); } - Operand::Move(ref $($mutability)* place) => { + Operand::Move(place) => { self.visit_place( place, PlaceContext::NonMutatingUse(NonMutatingUseContext::Move), location ); } - Operand::Constant(ref $($mutability)* constant) => { + Operand::Constant(constant) => { self.visit_constant(constant, location); } } } fn super_ascribe_user_ty(&mut self, - place: & $($mutability)* Place<'tcx>, - _variance: & $($mutability)* ty::Variance, - user_ty: & $($mutability)* UserTypeProjection<'tcx>, + place: & $($mutability)? Place<'tcx>, + _variance: & $($mutability)? ty::Variance, + user_ty: & $($mutability)? UserTypeProjection, location: Location) { self.visit_place( place, @@ -725,9 +711,8 @@ macro_rules! make_mir_visitor { } fn super_retag(&mut self, - _fn_entry: & $($mutability)* bool, - _two_phase: & $($mutability)* bool, - place: & $($mutability)* Place<'tcx>, + _kind: & $($mutability)? RetagKind, + place: & $($mutability)? Place<'tcx>, location: Location) { self.visit_place( place, @@ -737,45 +722,30 @@ macro_rules! make_mir_visitor { } fn super_place(&mut self, - place: & $($mutability)* Place<'tcx>, + place: & $($mutability)? Place<'tcx>, context: PlaceContext<'tcx>, location: Location) { - match *place { - Place::Local(ref $($mutability)* local) => { + match place { + Place::Base(PlaceBase::Local(local)) => { self.visit_local(local, context, location); } - Place::Static(ref $($mutability)* static_) => { - self.visit_static(static_, context, location); + Place::Base(PlaceBase::Static(box Static { kind, ty })) => { + if let StaticKind::Static(def_id) = kind { + self.visit_def_id(& $($mutability)? *def_id, location) + } + self.visit_ty(& $($mutability)? *ty, TyContext::Location(location)); } - Place::Promoted(ref $($mutability)* promoted) => { - self.visit_ty(& $($mutability)* promoted.1, TyContext::Location(location)); - }, - Place::Projection(ref $($mutability)* proj) => { + Place::Projection(proj) => { self.visit_projection(proj, context, location); } } } - fn super_static(&mut self, - static_: & $($mutability)* Static<'tcx>, - _context: PlaceContext<'tcx>, - location: Location) { - let Static { - ref $($mutability)* def_id, - ref $($mutability)* ty, - } = *static_; - self.visit_def_id(def_id, location); - self.visit_ty(ty, TyContext::Location(location)); - } - fn super_projection(&mut self, - proj: & $($mutability)* PlaceProjection<'tcx>, + proj: & $($mutability)? PlaceProjection<'tcx>, context: PlaceContext<'tcx>, location: Location) { - let Projection { - ref $($mutability)* base, - ref $($mutability)* elem, - } = *proj; + let Projection { base, elem } = proj; let context = if context.is_mutating_use() { PlaceContext::MutatingUse(MutatingUseContext::Projection) } else { @@ -786,17 +756,17 @@ macro_rules! make_mir_visitor { } fn super_projection_elem(&mut self, - proj: & $($mutability)* PlaceElem<'tcx>, + proj: & $($mutability)? PlaceElem<'tcx>, location: Location) { - match *proj { + match proj { ProjectionElem::Deref => { } ProjectionElem::Subslice { from: _, to: _ } => { } - ProjectionElem::Field(_field, ref $($mutability)* ty) => { + ProjectionElem::Field(_field, ty) => { self.visit_ty(ty, TyContext::Location(location)); } - ProjectionElem::Index(ref $($mutability)* local) => { + ProjectionElem::Index(local) => { self.visit_local( local, PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy), @@ -807,31 +777,31 @@ macro_rules! make_mir_visitor { min_length: _, from_end: _ } => { } - ProjectionElem::Downcast(_adt_def, _variant_index) => { + ProjectionElem::Downcast(_name, _variant_index) => { } } } fn super_local_decl(&mut self, local: Local, - local_decl: & $($mutability)* LocalDecl<'tcx>) { + local_decl: & $($mutability)? LocalDecl<'tcx>) { let LocalDecl { mutability: _, - ref $($mutability)* ty, - ref $($mutability)* user_ty, + ty, + user_ty, name: _, - ref $($mutability)* source_info, - ref $($mutability)* visibility_scope, + source_info, + visibility_scope, internal: _, is_user_variable: _, is_block_tail: _, - } = *local_decl; + } = local_decl; self.visit_ty(ty, TyContext::LocalDecl { local, source_info: *source_info, }); - for (user_ty, _) in & $($mutability)* user_ty.contents { + for (user_ty, _) in & $($mutability)? user_ty.contents { self.visit_user_type_projection(user_ty); } self.visit_source_info(source_info); @@ -839,7 +809,7 @@ macro_rules! make_mir_visitor { } fn super_source_scope(&mut self, - _scope: & $($mutability)* SourceScope) { + _scope: & $($mutability)? SourceScope) { } fn super_branch(&mut self, @@ -848,14 +818,14 @@ macro_rules! make_mir_visitor { } fn super_constant(&mut self, - constant: & $($mutability)* Constant<'tcx>, + constant: & $($mutability)? Constant<'tcx>, location: Location) { let Constant { - ref $($mutability)* span, - ref $($mutability)* ty, - ref $($mutability)* user_ty, - ref $($mutability)* literal, - } = *constant; + span, + ty, + user_ty, + literal, + } = constant; self.visit_span(span); self.visit_ty(ty, TyContext::Location(location)); @@ -863,17 +833,17 @@ macro_rules! make_mir_visitor { self.visit_const(literal, location); } - fn super_def_id(&mut self, _def_id: & $($mutability)* DefId) { + fn super_def_id(&mut self, _def_id: & $($mutability)? DefId) { } - fn super_span(&mut self, _span: & $($mutability)* Span) { + fn super_span(&mut self, _span: & $($mutability)? Span) { } - fn super_source_info(&mut self, source_info: & $($mutability)* SourceInfo) { + fn super_source_info(&mut self, source_info: & $($mutability)? SourceInfo) { let SourceInfo { - ref $($mutability)* span, - ref $($mutability)* scope, - } = *source_info; + span, + scope, + } = source_info; self.visit_span(span); self.visit_source_scope(scope); @@ -881,51 +851,49 @@ macro_rules! make_mir_visitor { fn super_user_type_projection( &mut self, - ty: & $($mutability)* UserTypeProjection<'tcx>, + _ty: & $($mutability)? UserTypeProjection, ) { - let UserTypeProjection { - ref $($mutability)* base, - projs: _, // Note: Does not visit projection elems! - } = *ty; - self.visit_user_type_annotation(base); } fn super_user_type_annotation( &mut self, - _ty: & $($mutability)* UserTypeAnnotation<'tcx>, + _index: UserTypeAnnotationIndex, + ty: & $($mutability)? CanonicalUserTypeAnnotation<'tcx>, ) { + self.visit_span(& $($mutability)? ty.span); + self.visit_ty(& $($mutability)? ty.inferred_ty, TyContext::UserTy(ty.span)); } - fn super_ty(&mut self, _ty: & $($mutability)* Ty<'tcx>) { + fn super_ty(&mut self, _ty: & $($mutability)? Ty<'tcx>) { } - fn super_region(&mut self, _region: & $($mutability)* ty::Region<'tcx>) { + fn super_region(&mut self, _region: & $($mutability)? ty::Region<'tcx>) { } - fn super_const(&mut self, _const: & $($mutability)* &'tcx ty::Const<'tcx>) { + fn super_const(&mut self, _const: & $($mutability)? &'tcx ty::Const<'tcx>) { } - fn super_substs(&mut self, _substs: & $($mutability)* &'tcx Substs<'tcx>) { + fn super_substs(&mut self, _substs: & $($mutability)? SubstsRef<'tcx>) { } fn super_generator_substs(&mut self, - _substs: & $($mutability)* GeneratorSubsts<'tcx>) { + _substs: & $($mutability)? GeneratorSubsts<'tcx>) { } fn super_closure_substs(&mut self, - _substs: & $($mutability)* ClosureSubsts<'tcx>) { + _substs: & $($mutability)? ClosureSubsts<'tcx>) { } // Convenience methods - fn visit_location(&mut self, mir: & $($mutability)* Mir<'tcx>, location: Location) { - let basic_block = & $($mutability)* mir[location.block]; + fn visit_location(&mut self, mir: & $($mutability)? Mir<'tcx>, location: Location) { + let basic_block = & $($mutability)? mir[location.block]; if basic_block.statements.len() == location.statement_index { - if let Some(ref $($mutability)* terminator) = basic_block.terminator { + if let Some(ref $($mutability)? terminator) = basic_block.terminator { self.visit_terminator(location.block, terminator, location) } } else { - let statement = & $($mutability)* + let statement = & $($mutability)? basic_block.statements[location.statement_index]; self.visit_statement(location.block, statement, location) } @@ -974,6 +942,9 @@ pub enum TyContext { source_info: SourceInfo, }, + /// The inferred type of a user type annotation. + UserTy(Span), + /// The return type of the function. ReturnTy(SourceInfo), diff --git a/src/librustc/query/mod.rs b/src/librustc/query/mod.rs new file mode 100644 index 0000000000000..d0ad2c90668a5 --- /dev/null +++ b/src/librustc/query/mod.rs @@ -0,0 +1,1067 @@ +use crate::ty::query::QueryDescription; +use crate::ty::query::queries; +use crate::ty::{self, ParamEnvAnd, Ty, TyCtxt}; +use crate::ty::subst::SubstsRef; +use crate::dep_graph::SerializedDepNodeIndex; +use crate::hir::def_id::{CrateNum, DefId, DefIndex}; +use crate::mir::interpret::GlobalId; +use crate::traits; +use crate::traits::query::{ + CanonicalPredicateGoal, CanonicalProjectionGoal, + CanonicalTyGoal, CanonicalTypeOpAscribeUserTypeGoal, + CanonicalTypeOpEqGoal, CanonicalTypeOpSubtypeGoal, CanonicalTypeOpProvePredicateGoal, + CanonicalTypeOpNormalizeGoal, +}; + +use std::borrow::Cow; +use syntax_pos::symbol::InternedString; + + +// Each of these queries corresponds to a function pointer field in the +// `Providers` struct for requesting a value of that type, and a method +// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way +// which memoizes and does dep-graph tracking, wrapping around the actual +// `Providers` that the driver creates (using several `rustc_*` crates). +// +// The result type of each query must implement `Clone`, and additionally +// `ty::query::values::Value`, which produces an appropriate placeholder +// (error) value if the query resulted in a query cycle. +// Queries marked with `fatal_cycle` do not need the latter implementation, +// as they will raise an fatal error on query cycles instead. +rustc_queries! { + Other { + /// Records the type of every item. + query type_of(key: DefId) -> Ty<'tcx> { + cache { key.is_local() } + } + + /// Maps from the `DefId` of an item (trait/struct/enum/fn) to its + /// associated generics. + query generics_of(key: DefId) -> &'tcx ty::Generics { + cache { key.is_local() } + load_cached(tcx, id) { + let generics: Option = tcx.queries.on_disk_cache + .try_load_query_result(tcx, id); + generics.map(|x| tcx.alloc_generics(x)) + } + } + + /// Maps from the `DefId` of an item (trait/struct/enum/fn) to the + /// predicates (where-clauses) that must be proven true in order + /// to reference it. This is almost always the "predicates query" + /// that you want. + /// + /// `predicates_of` builds on `predicates_defined_on` -- in fact, + /// it is almost always the same as that query, except for the + /// case of traits. For traits, `predicates_of` contains + /// an additional `Self: Trait<...>` predicate that users don't + /// actually write. This reflects the fact that to invoke the + /// trait (e.g., via `Default::default`) you must supply types + /// that actually implement the trait. (However, this extra + /// predicate gets in the way of some checks, which are intended + /// to operate over only the actual where-clauses written by the + /// user.) + query predicates_of(_: DefId) -> Lrc> {} + + query native_libraries(_: CrateNum) -> Lrc> { + desc { "looking up the native libraries of a linked crate" } + } + + query lint_levels(_: CrateNum) -> Lrc { + eval_always + desc { "computing the lint levels for items in this crate" } + } + } + + Codegen { + query is_panic_runtime(_: CrateNum) -> bool { + fatal_cycle + desc { "checking if the crate is_panic_runtime" } + } + } + + Codegen { + /// Set of all the `DefId`s in this crate that have MIR associated with + /// them. This includes all the body owners, but also things like struct + /// constructors. + query mir_keys(_: CrateNum) -> &'tcx DefIdSet { + desc { "getting a list of all mir_keys" } + } + + /// Maps DefId's that have an associated Mir to the result + /// of the MIR qualify_consts pass. The actual meaning of + /// the value isn't known except to the pass itself. + query mir_const_qualif(key: DefId) -> (u8, Lrc>) { + cache { key.is_local() } + } + + /// Fetch the MIR for a given `DefId` right after it's built - this includes + /// unreachable code. + query mir_built(_: DefId) -> &'tcx Steal> {} + + /// Fetch the MIR for a given `DefId` up till the point where it is + /// ready for const evaluation. + /// + /// See the README for the `mir` module for details. + query mir_const(_: DefId) -> &'tcx Steal> { + no_hash + } + + query mir_validated(_: DefId) -> &'tcx Steal> { + no_hash + } + + /// MIR after our optimization passes have run. This is MIR that is ready + /// for codegen. This is also the only query that can fetch non-local MIR, at present. + query optimized_mir(key: DefId) -> &'tcx mir::Mir<'tcx> { + cache { key.is_local() } + load_cached(tcx, id) { + let mir: Option> = tcx.queries.on_disk_cache + .try_load_query_result(tcx, id); + mir.map(|x| tcx.alloc_mir(x)) + } + } + } + + TypeChecking { + // Erases regions from `ty` to yield a new type. + // Normally you would just use `tcx.erase_regions(&value)`, + // however, which uses this query as a kind of cache. + query erase_regions_ty(ty: Ty<'tcx>) -> Ty<'tcx> { + // This query is not expected to have input -- as a result, it + // is not a good candidates for "replay" because it is essentially a + // pure function of its input (and hence the expectation is that + // no caller would be green **apart** from just these + // queries). Making it anonymous avoids hashing the result, which + // may save a bit of time. + anon + no_force + desc { "erasing regions from `{:?}`", ty } + } + + query program_clauses_for(_: DefId) -> Clauses<'tcx> { + desc { "generating chalk-style clauses" } + } + + query program_clauses_for_env(_: traits::Environment<'tcx>) -> Clauses<'tcx> { + no_force + desc { "generating chalk-style clauses for environment" } + } + + // Get the chalk-style environment of the given item. + query environment(_: DefId) -> traits::Environment<'tcx> { + desc { "return a chalk-style environment" } + } + } + + Linking { + query wasm_import_module_map(_: CrateNum) -> Lrc> { + desc { "wasm import module map" } + } + } + + Other { + /// Maps from the `DefId` of an item (trait/struct/enum/fn) to the + /// predicates (where-clauses) directly defined on it. This is + /// equal to the `explicit_predicates_of` predicates plus the + /// `inferred_outlives_of` predicates. + query predicates_defined_on(_: DefId) + -> Lrc> {} + + /// Returns the predicates written explicit by the user. + query explicit_predicates_of(_: DefId) + -> Lrc> {} + + /// Returns the inferred outlives predicates (e.g., for `struct + /// Foo<'a, T> { x: &'a T }`, this would return `T: 'a`). + query inferred_outlives_of(_: DefId) -> Lrc>> {} + + /// Maps from the `DefId` of a trait to the list of + /// super-predicates. This is a subset of the full list of + /// predicates. We store these in a separate map because we must + /// evaluate them even during type conversion, often before the + /// full predicates are available (note that supertraits have + /// additional acyclicity requirements). + query super_predicates_of(key: DefId) -> Lrc> { + desc { |tcx| "computing the supertraits of `{}`", tcx.def_path_str(key) } + } + + /// To avoid cycles within the predicates of a single item we compute + /// per-type-parameter predicates for resolving `T::AssocTy`. + query type_param_predicates(key: (DefId, DefId)) + -> Lrc> { + no_force + desc { |tcx| "computing the bounds for type parameter `{}`", { + let id = tcx.hir().as_local_hir_id(key.1).unwrap(); + tcx.hir().ty_param_name(id) + }} + } + + query trait_def(_: DefId) -> &'tcx ty::TraitDef {} + query adt_def(_: DefId) -> &'tcx ty::AdtDef {} + query adt_destructor(_: DefId) -> Option {} + + // The cycle error here should be reported as an error by `check_representable`. + // We consider the type as Sized in the meanwhile to avoid + // further errors (done in impl Value for AdtSizedConstraint). + // Use `cycle_delay_bug` to delay the cycle error here to be emitted later + // in case we accidentally otherwise don't emit an error. + query adt_sized_constraint( + _: DefId + ) -> AdtSizedConstraint<'tcx> { + cycle_delay_bug + } + + query adt_dtorck_constraint( + _: DefId + ) -> Result, NoSolution> {} + + /// True if this is a const fn, use the `is_const_fn` to know whether your crate actually + /// sees it as const fn (e.g., the const-fn-ness might be unstable and you might not have + /// the feature gate active) + /// + /// **Do not call this function manually.** It is only meant to cache the base data for the + /// `is_const_fn` function. + query is_const_fn_raw(key: DefId) -> bool { + desc { |tcx| "checking if item is const fn: `{}`", tcx.def_path_str(key) } + } + + /// Returns true if calls to the function may be promoted + /// + /// This is either because the function is e.g., a tuple-struct or tuple-variant + /// constructor, or because it has the `#[rustc_promotable]` attribute. The attribute should + /// be removed in the future in favour of some form of check which figures out whether the + /// function does not inspect the bits of any of its arguments (so is essentially just a + /// constructor function). + query is_promotable_const_fn(_: DefId) -> bool {} + + /// True if this is a foreign item (i.e., linked via `extern { ... }`). + query is_foreign_item(_: DefId) -> bool {} + + /// Get a map with the variance of every item; use `item_variance` + /// instead. + query crate_variances(_: CrateNum) -> Lrc { + desc { "computing the variances for items in this crate" } + } + + /// Maps from def-id of a type or region parameter to its + /// (inferred) variance. + query variances_of(_: DefId) -> Lrc> {} + } + + TypeChecking { + /// Maps from def-id of a type to its (inferred) outlives. + query inferred_outlives_crate(_: CrateNum) + -> Lrc> { + desc { "computing the inferred outlives predicates for items in this crate" } + } + } + + Other { + /// Maps from an impl/trait def-id to a list of the def-ids of its items + query associated_item_def_ids(_: DefId) -> Lrc> {} + + /// Maps from a trait item to the trait item "descriptor" + query associated_item(_: DefId) -> ty::AssociatedItem {} + + query impl_trait_ref(_: DefId) -> Option> {} + query impl_polarity(_: DefId) -> hir::ImplPolarity {} + + query issue33140_self_ty(_: DefId) -> Option> {} + } + + TypeChecking { + /// Maps a DefId of a type to a list of its inherent impls. + /// Contains implementations of methods that are inherent to a type. + /// Methods in these implementations don't need to be exported. + query inherent_impls(_: DefId) -> Lrc> { + eval_always + } + } + + TypeChecking { + /// The result of unsafety-checking this `DefId`. + query unsafety_check_result(_: DefId) -> mir::UnsafetyCheckResult {} + + /// HACK: when evaluated, this reports a "unsafe derive on repr(packed)" error + query unsafe_derive_on_repr_packed(_: DefId) -> () {} + + /// The signature of functions and closures. + query fn_sig(_: DefId) -> ty::PolyFnSig<'tcx> {} + } + + Other { + query lint_mod(key: DefId) -> () { + desc { |tcx| "linting {}", key.describe_as_module(tcx) } + } + + /// Checks the attributes in the module + query check_mod_attrs(key: DefId) -> () { + desc { |tcx| "checking attributes in {}", key.describe_as_module(tcx) } + } + + query check_mod_unstable_api_usage(key: DefId) -> () { + desc { |tcx| "checking for unstable API usage in {}", key.describe_as_module(tcx) } + } + + /// Checks the loops in the module + query check_mod_loops(key: DefId) -> () { + desc { |tcx| "checking loops in {}", key.describe_as_module(tcx) } + } + + query check_mod_item_types(key: DefId) -> () { + desc { |tcx| "checking item types in {}", key.describe_as_module(tcx) } + } + + query check_mod_privacy(key: DefId) -> () { + desc { |tcx| "checking privacy in {}", key.describe_as_module(tcx) } + } + + query check_mod_intrinsics(key: DefId) -> () { + desc { |tcx| "checking intrinsics in {}", key.describe_as_module(tcx) } + } + + query check_mod_liveness(key: DefId) -> () { + desc { |tcx| "checking liveness of variables in {}", key.describe_as_module(tcx) } + } + + query check_mod_impl_wf(key: DefId) -> () { + desc { |tcx| "checking that impls are well-formed in {}", key.describe_as_module(tcx) } + } + + query collect_mod_item_types(key: DefId) -> () { + desc { |tcx| "collecting item types in {}", key.describe_as_module(tcx) } + } + + /// Caches CoerceUnsized kinds for impls on custom types. + query coerce_unsized_info(_: DefId) + -> ty::adjustment::CoerceUnsizedInfo {} + } + + TypeChecking { + query typeck_item_bodies(_: CrateNum) -> () { + desc { "type-checking all item bodies" } + } + + query typeck_tables_of(key: DefId) -> &'tcx ty::TypeckTables<'tcx> { + cache { key.is_local() } + load_cached(tcx, id) { + let typeck_tables: Option> = tcx + .queries.on_disk_cache + .try_load_query_result(tcx, id); + + typeck_tables.map(|tables| tcx.alloc_tables(tables)) + } + } + } + + Other { + query used_trait_imports(_: DefId) -> Lrc {} + } + + TypeChecking { + query has_typeck_tables(_: DefId) -> bool {} + + query coherent_trait(def_id: DefId) -> () { + desc { |tcx| "coherence checking all impls of trait `{}`", tcx.def_path_str(def_id) } + } + } + + BorrowChecking { + query borrowck(_: DefId) -> Lrc {} + + /// Borrow checks the function body. If this is a closure, returns + /// additional requirements that the closure's creator must verify. + query mir_borrowck(_: DefId) -> mir::BorrowCheckResult<'tcx> {} + } + + TypeChecking { + /// Gets a complete map from all types to their inherent impls. + /// Not meant to be used directly outside of coherence. + /// (Defined only for `LOCAL_CRATE`.) + query crate_inherent_impls(k: CrateNum) + -> Lrc { + eval_always + desc { "all inherent impls defined in crate `{:?}`", k } + } + + /// Checks all types in the crate for overlap in their inherent impls. Reports errors. + /// Not meant to be used directly outside of coherence. + /// (Defined only for `LOCAL_CRATE`.) + query crate_inherent_impls_overlap_check(_: CrateNum) + -> () { + eval_always + desc { "check for overlap between inherent impls defined in this crate" } + } + } + + Other { + /// Evaluate a constant without running sanity checks + /// + /// **Do not use this** outside const eval. Const eval uses this to break query cycles + /// during validation. Please add a comment to every use site explaining why using + /// `const_eval` isn't sufficient + query const_eval_raw(key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) + -> ConstEvalRawResult<'tcx> { + no_force + desc { |tcx| + "const-evaluating `{}`", + tcx.def_path_str(key.value.instance.def.def_id()) + } + cache { true } + load_cached(tcx, id) { + tcx.queries.on_disk_cache.try_load_query_result(tcx, id).map(Ok) + } + } + + /// Results of evaluating const items or constants embedded in + /// other items (such as enum variant explicit discriminants). + query const_eval(key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) + -> ConstEvalResult<'tcx> { + no_force + desc { |tcx| + "const-evaluating + checking `{}`", + tcx.def_path_str(key.value.instance.def.def_id()) + } + cache { true } + load_cached(tcx, id) { + tcx.queries.on_disk_cache.try_load_query_result(tcx, id).map(Ok) + } + } + } + + TypeChecking { + query check_match(_: DefId) -> () {} + + /// Performs part of the privacy check and computes "access levels". + query privacy_access_levels(_: CrateNum) -> Lrc { + eval_always + desc { "privacy access levels" } + } + query check_private_in_public(_: CrateNum) -> () { + eval_always + desc { "checking for private elements in public interfaces" } + } + } + + Other { + query reachable_set(_: CrateNum) -> ReachableSet { + desc { "reachability" } + } + + /// Per-body `region::ScopeTree`. The `DefId` should be the owner `DefId` for the body; + /// in the case of closures, this will be redirected to the enclosing function. + query region_scope_tree(_: DefId) -> Lrc {} + + query mir_shims(key: ty::InstanceDef<'tcx>) -> &'tcx mir::Mir<'tcx> { + no_force + desc { |tcx| "generating MIR shim for `{}`", tcx.def_path_str(key.def_id()) } + } + + query symbol_name(key: ty::Instance<'tcx>) -> ty::SymbolName { + no_force + desc { "computing the symbol for `{}`", key } + cache { true } + } + + query describe_def(_: DefId) -> Option {} + query def_span(_: DefId) -> Span { + // FIXME(mw): DefSpans are not really inputs since they are derived from + // HIR. But at the moment HIR hashing still contains some hacks that allow + // to make type debuginfo to be source location independent. Declaring + // DefSpan an input makes sure that changes to these are always detected + // regardless of HIR hashing. + eval_always + } + query lookup_stability(_: DefId) -> Option<&'tcx attr::Stability> {} + query lookup_deprecation_entry(_: DefId) -> Option {} + query item_attrs(_: DefId) -> Lrc<[ast::Attribute]> {} + } + + Codegen { + query codegen_fn_attrs(_: DefId) -> CodegenFnAttrs {} + } + + Other { + query fn_arg_names(_: DefId) -> Vec {} + /// Gets the rendered value of the specified constant or associated constant. + /// Used by rustdoc. + query rendered_const(_: DefId) -> String {} + query impl_parent(_: DefId) -> Option {} + } + + TypeChecking { + query trait_of_item(_: DefId) -> Option {} + query const_is_rvalue_promotable_to_static(key: DefId) -> bool { + desc { |tcx| + "const checking if rvalue is promotable to static `{}`", + tcx.def_path_str(key) + } + cache { true } + } + query rvalue_promotable_map(key: DefId) -> Lrc { + desc { |tcx| + "checking which parts of `{}` are promotable to static", + tcx.def_path_str(key) + } + } + } + + Codegen { + query is_mir_available(key: DefId) -> bool { + desc { |tcx| "checking if item has mir available: `{}`", tcx.def_path_str(key) } + } + } + + Other { + query vtable_methods(key: ty::PolyTraitRef<'tcx>) + -> &'tcx [Option<(DefId, SubstsRef<'tcx>)>] { + no_force + desc { |tcx| "finding all methods for trait {}", tcx.def_path_str(key.def_id()) } + } + } + + Codegen { + query codegen_fulfill_obligation( + key: (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>) + ) -> Vtable<'tcx, ()> { + no_force + cache { true } + desc { |tcx| + "checking if `{}` fulfills its obligations", + tcx.def_path_str(key.1.def_id()) + } + } + } + + TypeChecking { + query trait_impls_of(key: DefId) -> Lrc { + desc { |tcx| "trait impls of `{}`", tcx.def_path_str(key) } + } + query specialization_graph_of(_: DefId) -> &'tcx specialization_graph::Graph {} + query is_object_safe(key: DefId) -> bool { + desc { |tcx| "determine object safety of trait `{}`", tcx.def_path_str(key) } + } + + /// Gets the ParameterEnvironment for a given item; this environment + /// will be in "user-facing" mode, meaning that it is suitabe for + /// type-checking etc, and it does not normalize specializable + /// associated types. This is almost always what you want, + /// unless you are doing MIR optimizations, in which case you + /// might want to use `reveal_all()` method to change modes. + query param_env(_: DefId) -> ty::ParamEnv<'tcx> {} + + /// Trait selection queries. These are best used by invoking `ty.is_copy_modulo_regions()`, + /// `ty.is_copy()`, etc, since that will prune the environment where possible. + query is_copy_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool { + no_force + desc { "computing whether `{}` is `Copy`", env.value } + } + query is_sized_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool { + no_force + desc { "computing whether `{}` is `Sized`", env.value } + } + query is_freeze_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool { + no_force + desc { "computing whether `{}` is freeze", env.value } + } + + // The cycle error here should be reported as an error by `check_representable`. + // We consider the type as not needing drop in the meanwhile to avoid + // further errors (done in impl Value for NeedsDrop). + // Use `cycle_delay_bug` to delay the cycle error here to be emitted later + // in case we accidentally otherwise don't emit an error. + query needs_drop_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> NeedsDrop { + cycle_delay_bug + no_force + desc { "computing whether `{}` needs drop", env.value } + } + + query layout_raw( + env: ty::ParamEnvAnd<'tcx, Ty<'tcx>> + ) -> Result<&'tcx ty::layout::LayoutDetails, ty::layout::LayoutError<'tcx>> { + no_force + desc { "computing layout of `{}`", env.value } + } + } + + Other { + query dylib_dependency_formats(_: CrateNum) + -> Lrc> { + desc { "dylib dependency formats of crate" } + } + } + + Codegen { + query is_compiler_builtins(_: CrateNum) -> bool { + fatal_cycle + desc { "checking if the crate is_compiler_builtins" } + } + query has_global_allocator(_: CrateNum) -> bool { + fatal_cycle + desc { "checking if the crate has_global_allocator" } + } + query has_panic_handler(_: CrateNum) -> bool { + fatal_cycle + desc { "checking if the crate has_panic_handler" } + } + query is_sanitizer_runtime(_: CrateNum) -> bool { + fatal_cycle + desc { "query a crate is #![sanitizer_runtime]" } + } + query is_profiler_runtime(_: CrateNum) -> bool { + fatal_cycle + desc { "query a crate is #![profiler_runtime]" } + } + query panic_strategy(_: CrateNum) -> PanicStrategy { + fatal_cycle + desc { "query a crate's configured panic strategy" } + } + query is_no_builtins(_: CrateNum) -> bool { + fatal_cycle + desc { "test whether a crate has #![no_builtins]" } + } + + query extern_crate(_: DefId) -> Lrc> { + eval_always + desc { "getting crate's ExternCrateData" } + } + } + + TypeChecking { + query specializes(_: (DefId, DefId)) -> bool { + no_force + desc { "computing whether impls specialize one another" } + } + query in_scope_traits_map(_: DefIndex) + -> Option>>>> { + eval_always + desc { "traits in scope at a block" } + } + } + + Other { + query module_exports(_: DefId) -> Option>> { + eval_always + } + } + + TypeChecking { + query impl_defaultness(_: DefId) -> hir::Defaultness {} + + query check_item_well_formed(_: DefId) -> () {} + query check_trait_item_well_formed(_: DefId) -> () {} + query check_impl_item_well_formed(_: DefId) -> () {} + } + + Linking { + // The DefIds of all non-generic functions and statics in the given crate + // that can be reached from outside the crate. + // + // We expect this items to be available for being linked to. + // + // This query can also be called for LOCAL_CRATE. In this case it will + // compute which items will be reachable to other crates, taking into account + // the kind of crate that is currently compiled. Crates with only a + // C interface have fewer reachable things. + // + // Does not include external symbols that don't have a corresponding DefId, + // like the compiler-generated `main` function and so on. + query reachable_non_generics(_: CrateNum) + -> Lrc> { + desc { "looking up the exported symbols of a crate" } + } + query is_reachable_non_generic(_: DefId) -> bool {} + query is_unreachable_local_definition(_: DefId) -> bool {} + } + + Codegen { + query upstream_monomorphizations( + k: CrateNum + ) -> Lrc, CrateNum>>>> { + desc { "collecting available upstream monomorphizations `{:?}`", k } + } + query upstream_monomorphizations_for(_: DefId) + -> Option, CrateNum>>> {} + } + + Other { + query foreign_modules(_: CrateNum) -> Lrc> { + desc { "looking up the foreign modules of a linked crate" } + } + + /// Identifies the entry-point (e.g., the `main` function) for a given + /// crate, returning `None` if there is no entry point (such as for library crates). + query entry_fn(_: CrateNum) -> Option<(DefId, EntryFnType)> { + desc { "looking up the entry function of a crate" } + } + query plugin_registrar_fn(_: CrateNum) -> Option { + desc { "looking up the plugin registrar for a crate" } + } + query proc_macro_decls_static(_: CrateNum) -> Option { + desc { "looking up the derive registrar for a crate" } + } + query crate_disambiguator(_: CrateNum) -> CrateDisambiguator { + eval_always + desc { "looking up the disambiguator a crate" } + } + query crate_hash(_: CrateNum) -> Svh { + eval_always + desc { "looking up the hash a crate" } + } + query original_crate_name(_: CrateNum) -> Symbol { + eval_always + desc { "looking up the original name a crate" } + } + query extra_filename(_: CrateNum) -> String { + eval_always + desc { "looking up the extra filename for a crate" } + } + } + + TypeChecking { + query implementations_of_trait(_: (CrateNum, DefId)) + -> Lrc> { + no_force + desc { "looking up implementations of a trait in a crate" } + } + query all_trait_implementations(_: CrateNum) + -> Lrc> { + desc { "looking up all (?) trait implementations" } + } + } + + Other { + query dllimport_foreign_items(_: CrateNum) + -> Lrc> { + desc { "dllimport_foreign_items" } + } + query is_dllimport_foreign_item(_: DefId) -> bool {} + query is_statically_included_foreign_item(_: DefId) -> bool {} + query native_library_kind(_: DefId) + -> Option {} + } + + Linking { + query link_args(_: CrateNum) -> Lrc> { + eval_always + desc { "looking up link arguments for a crate" } + } + } + + BorrowChecking { + // Lifetime resolution. See `middle::resolve_lifetimes`. + query resolve_lifetimes(_: CrateNum) -> Lrc { + desc { "resolving lifetimes" } + } + query named_region_map(_: DefIndex) -> + Option>> { + desc { "looking up a named region" } + } + query is_late_bound_map(_: DefIndex) -> + Option>> { + desc { "testing if a region is late bound" } + } + query object_lifetime_defaults_map(_: DefIndex) + -> Option>>>> { + desc { "looking up lifetime defaults for a region" } + } + } + + TypeChecking { + query visibility(_: DefId) -> ty::Visibility {} + } + + Other { + query dep_kind(_: CrateNum) -> DepKind { + eval_always + desc { "fetching what a dependency looks like" } + } + query crate_name(_: CrateNum) -> Symbol { + eval_always + desc { "fetching what a crate is named" } + } + query item_children(_: DefId) -> Lrc> {} + query extern_mod_stmt_cnum(_: DefId) -> Option {} + + query get_lib_features(_: CrateNum) -> Lrc { + eval_always + desc { "calculating the lib features map" } + } + query defined_lib_features(_: CrateNum) + -> Lrc)>> { + desc { "calculating the lib features defined in a crate" } + } + query get_lang_items(_: CrateNum) -> Lrc { + eval_always + desc { "calculating the lang items map" } + } + query defined_lang_items(_: CrateNum) -> Lrc> { + desc { "calculating the lang items defined in a crate" } + } + query missing_lang_items(_: CrateNum) -> Lrc> { + desc { "calculating the missing lang items in a crate" } + } + query visible_parent_map(_: CrateNum) + -> Lrc> { + desc { "calculating the visible parent map" } + } + query missing_extern_crate_item(_: CrateNum) -> bool { + eval_always + desc { "seeing if we're missing an `extern crate` item for this crate" } + } + query used_crate_source(_: CrateNum) -> Lrc { + eval_always + desc { "looking at the source for a crate" } + } + query postorder_cnums(_: CrateNum) -> Lrc> { + eval_always + desc { "generating a postorder list of CrateNums" } + } + + query freevars(_: DefId) -> Option>> { + eval_always + } + query maybe_unused_trait_import(_: DefId) -> bool { + eval_always + } + query maybe_unused_extern_crates(_: CrateNum) + -> Lrc> { + eval_always + desc { "looking up all possibly unused extern crates" } + } + query names_imported_by_glob_use(_: DefId) + -> Lrc> { + eval_always + } + + query stability_index(_: CrateNum) -> Lrc> { + eval_always + desc { "calculating the stability index for the local crate" } + } + query all_crate_nums(_: CrateNum) -> Lrc> { + eval_always + desc { "fetching all foreign CrateNum instances" } + } + + /// A vector of every trait accessible in the whole crate + /// (i.e., including those from subcrates). This is used only for + /// error reporting. + query all_traits(_: CrateNum) -> Lrc> { + desc { "fetching all foreign and local traits" } + } + } + + Linking { + query exported_symbols(_: CrateNum) + -> Arc, SymbolExportLevel)>> { + desc { "exported_symbols" } + } + } + + Codegen { + query collect_and_partition_mono_items(_: CrateNum) + -> (Arc, Arc>>>) { + eval_always + desc { "collect_and_partition_mono_items" } + } + query is_codegened_item(_: DefId) -> bool {} + query codegen_unit(_: InternedString) -> Arc> { + no_force + desc { "codegen_unit" } + } + query backend_optimization_level(_: CrateNum) -> OptLevel { + desc { "optimization level used by backend" } + } + } + + Other { + query output_filenames(_: CrateNum) -> Arc { + eval_always + desc { "output_filenames" } + } + } + + TypeChecking { + /// Do not call this query directly: invoke `normalize` instead. + query normalize_projection_ty( + goal: CanonicalProjectionGoal<'tcx> + ) -> Result< + Lrc>>>, + NoSolution, + > { + no_force + desc { "normalizing `{:?}`", goal } + } + + /// Do not call this query directly: invoke `normalize_erasing_regions` instead. + query normalize_ty_after_erasing_regions( + goal: ParamEnvAnd<'tcx, Ty<'tcx>> + ) -> Ty<'tcx> { + no_force + desc { "normalizing `{:?}`", goal } + } + + query implied_outlives_bounds( + goal: CanonicalTyGoal<'tcx> + ) -> Result< + Lrc>>>>, + NoSolution, + > { + no_force + desc { "computing implied outlives bounds for `{:?}`", goal } + } + + /// Do not call this query directly: invoke `infcx.at().dropck_outlives()` instead. + query dropck_outlives( + goal: CanonicalTyGoal<'tcx> + ) -> Result< + Lrc>>>, + NoSolution, + > { + no_force + desc { "computing dropck types for `{:?}`", goal } + } + + /// Do not call this query directly: invoke `infcx.predicate_may_hold()` or + /// `infcx.predicate_must_hold()` instead. + query evaluate_obligation( + goal: CanonicalPredicateGoal<'tcx> + ) -> Result { + no_force + desc { "evaluating trait selection obligation `{}`", goal.value.value } + } + + query evaluate_goal( + goal: traits::ChalkCanonicalGoal<'tcx> + ) -> Result< + Lrc>>, + NoSolution + > { + no_force + desc { "evaluating trait selection obligation `{}`", goal.value.goal } + } + + /// Do not call this query directly: part of the `Eq` type-op + query type_op_ascribe_user_type( + goal: CanonicalTypeOpAscribeUserTypeGoal<'tcx> + ) -> Result< + Lrc>>, + NoSolution, + > { + no_force + desc { "evaluating `type_op_ascribe_user_type` `{:?}`", goal } + } + + /// Do not call this query directly: part of the `Eq` type-op + query type_op_eq( + goal: CanonicalTypeOpEqGoal<'tcx> + ) -> Result< + Lrc>>, + NoSolution, + > { + no_force + desc { "evaluating `type_op_eq` `{:?}`", goal } + } + + /// Do not call this query directly: part of the `Subtype` type-op + query type_op_subtype( + goal: CanonicalTypeOpSubtypeGoal<'tcx> + ) -> Result< + Lrc>>, + NoSolution, + > { + no_force + desc { "evaluating `type_op_subtype` `{:?}`", goal } + } + + /// Do not call this query directly: part of the `ProvePredicate` type-op + query type_op_prove_predicate( + goal: CanonicalTypeOpProvePredicateGoal<'tcx> + ) -> Result< + Lrc>>, + NoSolution, + > { + no_force + desc { "evaluating `type_op_prove_predicate` `{:?}`", goal } + } + + /// Do not call this query directly: part of the `Normalize` type-op + query type_op_normalize_ty( + goal: CanonicalTypeOpNormalizeGoal<'tcx, Ty<'tcx>> + ) -> Result< + Lrc>>>, + NoSolution, + > { + no_force + desc { "normalizing `{:?}`", goal } + } + + /// Do not call this query directly: part of the `Normalize` type-op + query type_op_normalize_predicate( + goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::Predicate<'tcx>> + ) -> Result< + Lrc>>>, + NoSolution, + > { + no_force + desc { "normalizing `{:?}`", goal } + } + + /// Do not call this query directly: part of the `Normalize` type-op + query type_op_normalize_poly_fn_sig( + goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::PolyFnSig<'tcx>> + ) -> Result< + Lrc>>>, + NoSolution, + > { + no_force + desc { "normalizing `{:?}`", goal } + } + + /// Do not call this query directly: part of the `Normalize` type-op + query type_op_normalize_fn_sig( + goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::FnSig<'tcx>> + ) -> Result< + Lrc>>>, + NoSolution, + > { + no_force + desc { "normalizing `{:?}`", goal } + } + + query substitute_normalize_and_test_predicates(key: (DefId, SubstsRef<'tcx>)) -> bool { + no_force + desc { |tcx| + "testing substituted normalized predicates:`{}`", + tcx.def_path_str(key.0) + } + } + + query method_autoderef_steps( + goal: CanonicalTyGoal<'tcx> + ) -> MethodAutoderefStepsResult<'tcx> { + no_force + desc { "computing autoderef types for `{:?}`", goal } + } + } + + Other { + query target_features_whitelist(_: CrateNum) -> Lrc>> { + eval_always + desc { "looking up the whitelist of target features" } + } + + // Get an estimate of the size of an InstanceDef based on its MIR for CGU partitioning. + query instance_def_size_estimate(def: ty::InstanceDef<'tcx>) + -> usize { + no_force + desc { |tcx| "estimating size for `{}`", tcx.def_path_str(def.def_id()) } + } + + query features_query(_: CrateNum) -> Lrc { + eval_always + desc { "looking up enabled feature gates" } + } + } +} diff --git a/src/librustc/session/code_stats.rs b/src/librustc/session/code_stats.rs index b8f5ce3cdbc7c..730abacf6f691 100644 --- a/src/librustc/session/code_stats.rs +++ b/src/librustc/session/code_stats.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc_target::abi::{Align, Size}; use rustc_data_structures::fx::{FxHashSet}; use std::cmp::{self, Ordering}; diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs index b58d80e24857d..6a7b3c39dedb8 100644 --- a/src/librustc/session/config.rs +++ b/src/librustc/session/config.rs @@ -1,26 +1,17 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Contains infrastructure for configuring the compiler, including parsing //! command line options. use std::str::FromStr; -use session::{early_error, early_warn, Session}; -use session::search_paths::SearchPath; +use crate::session::{early_error, early_warn, Session}; +use crate::session::search_paths::SearchPath; -use rustc_target::spec::{LinkerFlavor, PanicStrategy, RelroLevel}; +use rustc_target::spec::{LinkerFlavor, MergeFunctions, PanicStrategy, RelroLevel}; use rustc_target::spec::{Target, TargetTriple}; -use lint; -use middle::cstore; +use crate::lint; +use crate::middle::cstore; +use syntax; use syntax::ast::{self, IntTy, UintTy, MetaItemKind}; use syntax::source_map::{FileName, FilePathMapping}; use syntax::edition::{Edition, EDITION_NAME_LIST, DEFAULT_EDITION}; @@ -68,6 +59,8 @@ pub enum OptLevel { SizeMin, // -Oz } +impl_stable_hash_via_hash!(OptLevel); + /// This is what the `LtoCli` values get mapped to after resolving defaults and /// and taking other command line options into account. #[derive(Clone, Copy, PartialEq, Hash, Debug)] @@ -104,18 +97,18 @@ pub enum LtoCli { } #[derive(Clone, PartialEq, Hash)] -pub enum CrossLangLto { +pub enum LinkerPluginLto { LinkerPlugin(PathBuf), LinkerPluginAuto, Disabled } -impl CrossLangLto { +impl LinkerPluginLto { pub fn enabled(&self) -> bool { match *self { - CrossLangLto::LinkerPlugin(_) | - CrossLangLto::LinkerPluginAuto => true, - CrossLangLto::Disabled => false, + LinkerPluginLto::LinkerPlugin(_) | + LinkerPluginLto::LinkerPluginAuto => true, + LinkerPluginLto::Disabled => false, } } } @@ -419,6 +412,10 @@ top_level_options!( remap_path_prefix: Vec<(PathBuf, PathBuf)> [UNTRACKED], edition: Edition [TRACKED], + + // The list of crates to consider private when + // checking leaked private dependency types in public interfaces + extern_private: Vec [TRACKED], } ); @@ -479,7 +476,7 @@ impl BorrowckMode { } pub enum Input { - /// Load source from file + /// Loads source from file File(PathBuf), Str { /// String that is shown in place of a filename @@ -503,6 +500,13 @@ impl Input { Input::Str { ref mut input, .. } => Some(input), } } + + pub fn source_name(&self) -> FileName { + match *self { + Input::File(ref ifile) => ifile.clone().into(), + Input::Str { ref name, .. } => name.clone(), + } + } } #[derive(Clone, Hash)] @@ -527,7 +531,7 @@ impl OutputFilenames { .unwrap_or_else(|| self.temp_path(flavor, None)) } - /// Get the path where a compilation artifact of the given type for the + /// Gets the path where a compilation artifact of the given type for the /// given codegen unit should be placed on disk. If codegen_unit_name is /// None, a path distinct from those of any codegen unit will be generated. pub fn temp_path(&self, flavor: OutputType, codegen_unit_name: Option<&str>) -> PathBuf { @@ -536,7 +540,7 @@ impl OutputFilenames { } /// Like temp_path, but also supports things where there is no corresponding - /// OutputType, like no-opt-bitcode or lto-bitcode. + /// OutputType, like noopt-bitcode or lto-bitcode. pub fn temp_path_ext(&self, ext: &str, codegen_unit_name: Option<&str>) -> PathBuf { let base = self.out_directory.join(&self.filestem()); @@ -614,12 +618,13 @@ impl Default for Options { cli_forced_thinlto_off: false, remap_path_prefix: Vec::new(), edition: DEFAULT_EDITION, + extern_private: Vec::new() } } } impl Options { - /// True if there is a reason to build the dep graph. + /// Returns `true` if there is a reason to build the dep graph. pub fn build_dep_graph(&self) -> bool { self.incremental.is_some() || self.debugging_opts.dump_dep_graph || self.debugging_opts.query_dep_graph @@ -635,7 +640,7 @@ impl Options { FilePathMapping::new(self.remap_path_prefix.clone()) } - /// True if there will be an output file generated + /// Returns `true` if there will be an output file generated pub fn will_create_output_file(&self) -> bool { !self.debugging_opts.parse_only && // The file is just being parsed !self.debugging_opts.ls // The file is just being queried @@ -659,15 +664,15 @@ impl Options { } } -// The type of entry function, so -// users can have their own entry -// functions -#[derive(Copy, Clone, PartialEq)] +// The type of entry function, so users can have their own entry functions +#[derive(Copy, Clone, PartialEq, Hash, Debug)] pub enum EntryFnType { Main, Start, } +impl_stable_hash_via_hash!(EntryFnType); + #[derive(Copy, PartialEq, PartialOrd, Clone, Ord, Eq, Hash, Debug)] pub enum CrateType { Executable, @@ -795,6 +800,7 @@ macro_rules! options { pub const parse_opt_pathbuf: Option<&str> = Some("a path"); pub const parse_list: Option<&str> = Some("a space-separated list of strings"); pub const parse_opt_list: Option<&str> = Some("a space-separated list of strings"); + pub const parse_opt_comma_list: Option<&str> = Some("a comma-separated list of strings"); pub const parse_uint: Option<&str> = Some("a number"); pub const parse_passes: Option<&str> = Some("a space-separated list of passes, or `all`"); @@ -812,19 +818,24 @@ macro_rules! options { Some("crate=integer"); pub const parse_unpretty: Option<&str> = Some("`string` or `string=string`"); + pub const parse_treat_err_as_bug: Option<&str> = + Some("either no value or a number bigger than 0"); pub const parse_lto: Option<&str> = Some("either a boolean (`yes`, `no`, `on`, `off`, etc), `thin`, \ `fat`, or omitted"); - pub const parse_cross_lang_lto: Option<&str> = + pub const parse_linker_plugin_lto: Option<&str> = Some("either a boolean (`yes`, `no`, `on`, `off`, etc), \ or the path to the linker plugin"); + pub const parse_merge_functions: Option<&str> = + Some("one of: `disabled`, `trampolines`, or `aliases`"); } #[allow(dead_code)] mod $mod_set { - use super::{$struct_name, Passes, Sanitizer, LtoCli, CrossLangLto}; - use rustc_target::spec::{LinkerFlavor, PanicStrategy, RelroLevel}; + use super::{$struct_name, Passes, Sanitizer, LtoCli, LinkerPluginLto}; + use rustc_target::spec::{LinkerFlavor, MergeFunctions, PanicStrategy, RelroLevel}; use std::path::PathBuf; + use std::str::FromStr; $( pub fn $opt(cg: &mut $struct_name, v: Option<&str>) -> bool { @@ -916,6 +927,18 @@ macro_rules! options { } } + fn parse_opt_comma_list(slot: &mut Option>, v: Option<&str>) + -> bool { + match v { + Some(s) => { + let v = s.split(',').map(|s| s.to_string()).collect(); + *slot = Some(v); + true + }, + None => false, + } + } + fn parse_uint(slot: &mut usize, v: Option<&str>) -> bool { match v.and_then(|s| s.parse().ok()) { Some(i) => { *slot = i; true }, @@ -1015,6 +1038,13 @@ macro_rules! options { } } + fn parse_treat_err_as_bug(slot: &mut Option, v: Option<&str>) -> bool { + match v { + Some(s) => { *slot = s.parse().ok().filter(|&x| x != 0); slot.unwrap_or(0) != 0 } + None => { *slot = Some(1); true } + } + } + fn parse_lto(slot: &mut LtoCli, v: Option<&str>) -> bool { if v.is_some() { let mut bool_arg = None; @@ -1037,25 +1067,33 @@ macro_rules! options { true } - fn parse_cross_lang_lto(slot: &mut CrossLangLto, v: Option<&str>) -> bool { + fn parse_linker_plugin_lto(slot: &mut LinkerPluginLto, v: Option<&str>) -> bool { if v.is_some() { let mut bool_arg = None; if parse_opt_bool(&mut bool_arg, v) { *slot = if bool_arg.unwrap() { - CrossLangLto::LinkerPluginAuto + LinkerPluginLto::LinkerPluginAuto } else { - CrossLangLto::Disabled + LinkerPluginLto::Disabled }; return true } } *slot = match v { - None => CrossLangLto::LinkerPluginAuto, - Some(path) => CrossLangLto::LinkerPlugin(PathBuf::from(path)), + None => LinkerPluginLto::LinkerPluginAuto, + Some(path) => LinkerPluginLto::LinkerPlugin(PathBuf::from(path)), }; true } + + fn parse_merge_functions(slot: &mut Option, v: Option<&str>) -> bool { + match v.and_then(|s| MergeFunctions::from_str(s).ok()) { + Some(mergefunc) => *slot = Some(mergefunc), + _ => return false, + } + true + } } ) } @@ -1137,6 +1175,10 @@ options! {CodegenOptions, CodegenSetter, basic_codegen_options, "allow the linker to link its default libraries"), linker_flavor: Option = (None, parse_linker_flavor, [UNTRACKED], "Linker flavor"), + linker_plugin_lto: LinkerPluginLto = (LinkerPluginLto::Disabled, + parse_linker_plugin_lto, [TRACKED], + "generate build artifacts that are compatible with linker-based LTO."), + } options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, @@ -1158,6 +1200,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "when using two-phase-borrows, allow two phases even for non-autoref `&mut` borrows"), time_passes: bool = (false, parse_bool, [UNTRACKED], "measure time of each rustc pass"), + time: bool = (false, parse_bool, [UNTRACKED], + "measure time of rustc processes"), count_llvm_insns: bool = (false, parse_bool, [UNTRACKED_WITH_WARNING(true, "The output generated by `-Z count_llvm_insns` might not be reliable \ @@ -1191,8 +1235,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "prints the llvm optimization passes being run"), ast_json: bool = (false, parse_bool, [UNTRACKED], "print the AST as JSON and halt"), - query_threads: Option = (None, parse_opt_uint, [UNTRACKED], - "execute queries on a thread pool with N threads"), + threads: Option = (None, parse_opt_uint, [UNTRACKED], + "use a thread pool with N threads"), ast_json_noexpand: bool = (false, parse_bool, [UNTRACKED], "print the pre-expansion AST as JSON and halt"), ls: bool = (false, parse_bool, [UNTRACKED], @@ -1213,10 +1257,12 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, Use with RUST_REGION_GRAPH=help for more info"), parse_only: bool = (false, parse_bool, [UNTRACKED], "parse only; do not compile, assemble, or link"), + dual_proc_macros: bool = (false, parse_bool, [TRACKED], + "load proc macros for both target and host, but only link to the target"), no_codegen: bool = (false, parse_bool, [TRACKED], "run all passes except codegen; no output"), - treat_err_as_bug: bool = (false, parse_bool, [TRACKED], - "treat all errors that occur as bugs"), + treat_err_as_bug: Option = (None, parse_treat_err_as_bug, [TRACKED], + "treat error number `val` that occurs as bug"), report_delayed_bugs: bool = (false, parse_bool, [TRACKED], "immediately print bugs registered with `delay_span_bug`"), external_macro_backtrace: bool = (false, parse_bool, [UNTRACKED], @@ -1225,6 +1271,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "show extended diagnostic help"), continue_parse_after_error: bool = (false, parse_bool, [TRACKED], "attempt to recover from parse errors (experimental)"), + dep_tasks: bool = (false, parse_bool, [UNTRACKED], + "print tasks that execute and the color their dep node gets (requires debug build)"), incremental: Option = (None, parse_opt_string, [UNTRACKED], "enable incremental compilation (experimental)"), incremental_queries: bool = (true, parse_bool, [UNTRACKED], @@ -1237,6 +1285,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "verify incr. comp. hashes of green query instances"), incremental_ignore_spans: bool = (false, parse_bool, [UNTRACKED], "ignore spans during ICH computation -- used for testing"), + instrument_mcount: bool = (false, parse_bool, [TRACKED], + "insert function instrument code for mcount-based tracing"), dump_dep_graph: bool = (false, parse_bool, [UNTRACKED], "dump the dependency graph to $RUST_DEP_GRAPH (default: /tmp/dep_graph.gv)"), query_dep_graph: bool = (false, parse_bool, [UNTRACKED], @@ -1271,8 +1321,6 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "set the MIR optimization level (0-3, default: 1)"), mutable_noalias: Option = (None, parse_opt_bool, [TRACKED], "emit noalias metadata for mutable references (default: yes on LLVM >= 6)"), - arg_align_attributes: bool = (false, parse_bool, [TRACKED], - "emit align metadata for reference arguments"), dump_mir: Option = (None, parse_opt_string, [UNTRACKED], "dump MIR state to file. `val` is used to select which passes and functions to dump. For example: @@ -1291,10 +1339,14 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "emit Retagging MIR statements, interpreted e.g., by miri; implies -Zmir-opt-level=0"), perf_stats: bool = (false, parse_bool, [UNTRACKED], "print some performance-related statistics"), + query_stats: bool = (false, parse_bool, [UNTRACKED], + "print some statistics about the query system"), hir_stats: bool = (false, parse_bool, [UNTRACKED], "print some statistics about AST and HIR"), always_encode_mir: bool = (false, parse_bool, [TRACKED], "encode MIR of all functions into the crate metadata"), + unleash_the_miri_inside_of_you: bool = (false, parse_bool, [TRACKED], + "take the breaks off const evaluation. NOTE: this is unsound"), osx_rpath_install_name: bool = (false, parse_bool, [TRACKED], "pass `-install_name @rpath/...` to the macOS linker"), sanitizer: Option = (None, parse_sanitizer, [TRACKED], @@ -1319,12 +1371,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "Disable the instrumentation pre-inliner, useful for profiling / PGO."), relro_level: Option = (None, parse_relro_level, [TRACKED], "choose which RELRO level to use"), - nll_subminimal_causes: bool = (false, parse_bool, [UNTRACKED], - "when tracking region error causes, accept subminimal results for faster execution."), nll_facts: bool = (false, parse_bool, [UNTRACKED], "dump facts from NLL analysis into side files"), - disable_nll_user_type_assert: bool = (false, parse_bool, [UNTRACKED], - "disable user provided type assertion in NLL"), nll_dont_emit_read_for_match: bool = (false, parse_bool, [UNTRACKED], "in match codegen, do not include FakeRead statements (used by mir-borrowck)"), dont_buffer_diagnostics: bool = (false, parse_bool, [UNTRACKED], @@ -1354,10 +1402,15 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, unpretty: Option = (None, parse_unpretty, [UNTRACKED], "Present the input source, unstable (and less-pretty) variants; valid types are any of the types for `--pretty`, as well as: + `expanded`, `expanded,identified`, + `expanded,hygiene` (with internal representations), `flowgraph=` (graphviz formatted flowgraph for node), + `flowgraph,unlabelled=` (unlabelled graphviz formatted flowgraph for node), `everybody_loops` (all function bodies replaced with `loop {}`), - `hir` (the HIR), `hir,identified`, or - `hir,typed` (HIR with types for each node)."), + `hir` (the HIR), `hir,identified`, + `hir,typed` (HIR with types for each node), + `hir-tree` (dump the raw HIR), + `mir` (the MIR), or `mir-cfg` (graphviz formatted MIR)"), run_dsymutil: Option = (None, parse_opt_bool, [TRACKED], "run `dsymutil` and delete intermediate object files"), ui_testing: bool = (false, parse_bool, [UNTRACKED], @@ -1370,24 +1423,27 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options, "make the current crate share its generic instantiations"), chalk: bool = (false, parse_bool, [TRACKED], "enable the experimental Chalk-based trait solving engine"), - cross_lang_lto: CrossLangLto = (CrossLangLto::Disabled, parse_cross_lang_lto, [TRACKED], - "generate build artifacts that are compatible with linker-based LTO."), no_parallel_llvm: bool = (false, parse_bool, [UNTRACKED], "don't run LLVM in parallel (while keeping codegen-units and ThinLTO)"), no_leak_check: bool = (false, parse_bool, [UNTRACKED], "disables the 'leak check' for subtyping; unsound, but useful for tests"), + no_interleave_lints: bool = (false, parse_bool, [UNTRACKED], + "don't interleave execution of lints; allows benchmarking individual lints"), crate_attr: Vec = (Vec::new(), parse_string_push, [TRACKED], "inject the given attribute in the crate"), self_profile: bool = (false, parse_bool, [UNTRACKED], - "run the self profiler"), - profile_json: bool = (false, parse_bool, [UNTRACKED], - "output a json file with profiler results"), + "run the self profiler and output the raw event data"), emit_stack_sizes: bool = (false, parse_bool, [UNTRACKED], "emits a section containing stack size metadata"), plt: Option = (None, parse_opt_bool, [TRACKED], "whether to use the PLT when calling into shared libraries; only has effect for PIC code on systems with ELF binaries (default: PLT is disabled if full relro is enabled)"), + merge_functions: Option = (None, parse_merge_functions, [TRACKED], + "control the operation of the MergeFunctions LLVM pass, taking + the same values as the target option of the same name"), + allow_features: Option> = (None, parse_opt_comma_list, [TRACKED], + "only allow the listed language features to be enabled in code (space separated)"), } pub fn default_lib_output() -> CrateType { @@ -1397,6 +1453,7 @@ pub fn default_lib_output() -> CrateType { pub fn default_configuration(sess: &Session) -> ast::CrateConfig { let end = &sess.target.target.target_endian; let arch = &sess.target.target.arch; + let cpu = &sess.target.target.options.cpu; let wordsz = &sess.target.target.target_pointer_width; let os = &sess.target.target.target_os; let env = &sess.target.target.target_env; @@ -1426,6 +1483,10 @@ pub fn default_configuration(sess: &Session) -> ast::CrateConfig { Symbol::intern("target_vendor"), Some(Symbol::intern(vendor)), )); + if sess.target.target.options.is_specific_cpu() { + ret.insert((Symbol::intern("target_cpu"), Some(Symbol::intern(cpu)))); + } + if sess.target.target.options.has_elf_tls { ret.insert((Symbol::intern("target_thread_local"), None)); } @@ -1456,6 +1517,15 @@ pub fn default_configuration(sess: &Session) -> ast::CrateConfig { ret } +/// Converts the crate cfg! configuration from String to Symbol. +/// `rustc_interface::interface::Config` accepts this in the compiler configuration, +/// but the symbol interner is not yet set up then, so we must convert it later. +pub fn to_crate_config(cfg: FxHashSet<(String, Option)>) -> ast::CrateConfig { + cfg.into_iter() + .map(|(a, b)| (Symbol::intern(&a), b.map(|b| Symbol::intern(&b)))) + .collect() +} + pub fn build_configuration(sess: &Session, mut user_cfg: ast::CrateConfig) -> ast::CrateConfig { // Combine the configuration requested by the session (command line) with // some default and generated configuration items @@ -1643,6 +1713,12 @@ pub fn rustc_short_optgroups() -> Vec { "Specify the name of the crate being built", "NAME", ), + opt::opt_s( + "", + "edition", + "Specify which edition of the compiler to use when compiling code.", + EDITION_NAME_LIST, + ), opt::multi_s( "", "emit", @@ -1713,6 +1789,12 @@ pub fn rustc_optgroups() -> Vec { "Specify where an external rust library is located", "NAME=PATH", ), + opt::multi_s( + "", + "extern-private", + "Specify where an extern rust library is located, marking it as a private dependency", + "NAME=PATH", + ), opt::opt_s("", "sysroot", "Override the system root", "PATH"), opt::multi("Z", "", "Set internal debugging options", "FLAG"), opt::opt_s( @@ -1739,12 +1821,6 @@ pub fn rustc_optgroups() -> Vec { `expanded,identified` (fully parenthesized, AST nodes with IDs).", "TYPE", ), - opt::opt_s( - "", - "edition", - "Specify which edition of the compiler to use when compiling code.", - EDITION_NAME_LIST, - ), opt::multi_s( "", "remap-path-prefix", @@ -1756,10 +1832,9 @@ pub fn rustc_optgroups() -> Vec { } // Convert strings provided as --cfg [cfgspec] into a crate_cfg -pub fn parse_cfgspecs(cfgspecs: Vec) -> ast::CrateConfig { - cfgspecs - .into_iter() - .map(|s| { +pub fn parse_cfgspecs(cfgspecs: Vec) -> FxHashSet<(String, Option)> { + syntax::with_globals(move || { + let cfg = cfgspecs.into_iter().map(|s| { let sess = parse::ParseSess::new(FilePathMapping::empty()); let filename = FileName::cfg_spec_source_code(&s); let mut parser = parse::new_parser_from_source_str(&sess, filename, s.to_string()); @@ -1771,7 +1846,7 @@ pub fn parse_cfgspecs(cfgspecs: Vec) -> ast::CrateConfig { match &mut parser.parse_meta_item() { Ok(meta_item) if parser.token == token::Eof => { - if meta_item.ident.segments.len() != 1 { + if meta_item.path.segments.len() != 1 { error!("argument key must be an identifier"); } match &meta_item.node { @@ -1782,7 +1857,8 @@ pub fn parse_cfgspecs(cfgspecs: Vec) -> ast::CrateConfig { error!("argument value must be a string"); } MetaItemKind::NameValue(..) | MetaItemKind::Word => { - return (meta_item.name(), meta_item.value_str()); + let ident = meta_item.ident().expect("multi-segment cfg key"); + return (ident.name, meta_item.value_str()); } } } @@ -1791,8 +1867,11 @@ pub fn parse_cfgspecs(cfgspecs: Vec) -> ast::CrateConfig { } error!(r#"expected `key` or `key="value"`"#); - }) - .collect::() + }).collect::(); + cfg.into_iter().map(|(a, b)| { + (a.to_string(), b.map(|b| b.to_string())) + }).collect() + }) } pub fn get_cmd_lint_options(matches: &getopts::Matches, @@ -1820,7 +1899,7 @@ pub fn get_cmd_lint_options(matches: &getopts::Matches, pub fn build_session_options_and_crate_config( matches: &getopts::Matches, -) -> (Options, ast::CrateConfig) { +) -> (Options, FxHashSet<(String, Option)>) { let color = match matches.opt_str("color").as_ref().map(|s| &s[..]) { Some("auto") => ColorConfig::Auto, Some("always") => ColorConfig::Always, @@ -1894,6 +1973,7 @@ pub fn build_session_options_and_crate_config( let crate_types = parse_crate_types_from_list(unparsed_crate_types) .unwrap_or_else(|e| early_error(error_format, &e[..])); + let (lint_opts, describe_lints, lint_cap) = get_cmd_lint_options(matches, error_format); let mut debugging_opts = build_debugging_options(matches, error_format); @@ -1975,17 +2055,17 @@ pub fn build_session_options_and_crate_config( } } - if debugging_opts.query_threads == Some(0) { + if debugging_opts.threads == Some(0) { early_error( error_format, - "Value for query threads must be a positive nonzero integer", + "Value for threads must be a positive nonzero integer", ); } - if debugging_opts.query_threads.unwrap_or(1) > 1 && debugging_opts.fuel.is_some() { + if debugging_opts.threads.unwrap_or(1) > 1 && debugging_opts.fuel.is_some() { early_error( error_format, - "Optimization fuel is incompatible with multiple query threads", + "Optimization fuel is incompatible with multiple threads", ); } @@ -2207,8 +2287,18 @@ pub fn build_session_options_and_crate_config( ); } + if matches.opt_present("extern-private") && !debugging_opts.unstable_options { + early_error( + ErrorOutputType::default(), + "'--extern-private' is unstable and only \ + available for nightly builds of rustc." + ) + } + + let extern_private = matches.opt_strs("extern-private"); + let mut externs: BTreeMap<_, BTreeSet<_>> = BTreeMap::new(); - for arg in &matches.opt_strs("extern") { + for arg in matches.opt_strs("extern").into_iter().chain(matches.opt_strs("extern-private")) { let mut parts = arg.splitn(2, '='); let name = parts.next().unwrap_or_else(|| early_error(error_format, "--extern value must not be empty")); @@ -2276,6 +2366,7 @@ pub fn build_session_options_and_crate_config( cli_forced_thinlto_off: disable_thinlto, remap_path_prefix, edition, + extern_private }, cfg, ) @@ -2308,7 +2399,7 @@ pub mod nightly_options { use getopts; use syntax::feature_gate::UnstableFeatures; use super::{ErrorOutputType, OptionStability, RustcOptGroup}; - use session::early_error; + use crate::session::early_error; pub fn is_unstable_enabled(matches: &getopts::Matches) -> bool { is_nightly_build() @@ -2397,16 +2488,16 @@ impl fmt::Display for CrateType { /// we have an opt-in scheme here, so one is hopefully forced to think about /// how the hash should be calculated when adding a new command-line argument. mod dep_tracking { - use lint; - use middle::cstore; + use crate::lint; + use crate::middle::cstore; use std::collections::BTreeMap; use std::hash::Hash; use std::path::PathBuf; use std::collections::hash_map::DefaultHasher; use super::{CrateType, DebugInfo, ErrorOutputType, OptLevel, OutputTypes, - Passes, Sanitizer, LtoCli, CrossLangLto}; + Passes, Sanitizer, LtoCli, LinkerPluginLto}; use syntax::feature_gate::UnstableFeatures; - use rustc_target::spec::{PanicStrategy, RelroLevel, TargetTriple}; + use rustc_target::spec::{MergeFunctions, PanicStrategy, RelroLevel, TargetTriple}; use syntax::edition::Edition; pub trait DepTrackingHash { @@ -2449,12 +2540,15 @@ mod dep_tracking { impl_dep_tracking_hash_via_hash!(Option); impl_dep_tracking_hash_via_hash!(Option); impl_dep_tracking_hash_via_hash!(Option<(String, u64)>); + impl_dep_tracking_hash_via_hash!(Option>); + impl_dep_tracking_hash_via_hash!(Option); impl_dep_tracking_hash_via_hash!(Option); impl_dep_tracking_hash_via_hash!(Option); impl_dep_tracking_hash_via_hash!(Option); impl_dep_tracking_hash_via_hash!(Option); impl_dep_tracking_hash_via_hash!(Option); impl_dep_tracking_hash_via_hash!(CrateType); + impl_dep_tracking_hash_via_hash!(MergeFunctions); impl_dep_tracking_hash_via_hash!(PanicStrategy); impl_dep_tracking_hash_via_hash!(RelroLevel); impl_dep_tracking_hash_via_hash!(Passes); @@ -2468,7 +2562,7 @@ mod dep_tracking { impl_dep_tracking_hash_via_hash!(Option); impl_dep_tracking_hash_via_hash!(TargetTriple); impl_dep_tracking_hash_via_hash!(Edition); - impl_dep_tracking_hash_via_hash!(CrossLangLto); + impl_dep_tracking_hash_via_hash!(LinkerPluginLto); impl_dep_tracking_hash_for_sortable_vec_of!(String); impl_dep_tracking_hash_for_sortable_vec_of!(PathBuf); @@ -2528,19 +2622,22 @@ mod dep_tracking { #[cfg(test)] mod tests { - use errors; use getopts; - use lint; - use middle::cstore; - use session::config::{build_configuration, build_session_options_and_crate_config}; - use session::config::{LtoCli, CrossLangLto}; - use session::build_session; - use session::search_paths::SearchPath; + use crate::lint; + use crate::middle::cstore; + use crate::session::config::{ + build_configuration, + build_session_options_and_crate_config, + to_crate_config + }; + use crate::session::config::{LtoCli, LinkerPluginLto}; + use crate::session::build_session; + use crate::session::search_paths::SearchPath; use std::collections::{BTreeMap, BTreeSet}; use std::iter::FromIterator; use std::path::PathBuf; use super::{Externs, OutputType, OutputTypes}; - use rustc_target::spec::{PanicStrategy, RelroLevel}; + use rustc_target::spec::{MergeFunctions, PanicStrategy, RelroLevel}; use syntax::symbol::Symbol; use syntax::edition::{Edition, DEFAULT_EDITION}; use syntax; @@ -2573,7 +2670,7 @@ mod tests { let registry = errors::registry::Registry::new(&[]); let (sessopts, cfg) = build_session_options_and_crate_config(matches); let sess = build_session(sessopts, None, registry); - let cfg = build_configuration(&sess, cfg); + let cfg = build_configuration(&sess, to_crate_config(cfg)); assert!(cfg.contains(&(Symbol::intern("test"), None))); }); } @@ -2591,7 +2688,7 @@ mod tests { let registry = errors::registry::Registry::new(&[]); let (sessopts, cfg) = build_session_options_and_crate_config(matches); let sess = build_session(sessopts, None, registry); - let cfg = build_configuration(&sess, cfg); + let cfg = build_configuration(&sess, to_crate_config(cfg)); let mut test_items = cfg.iter().filter(|&&(name, _)| name == "test"); assert!(test_items.next().is_some()); assert!(test_items.next().is_none()); @@ -3066,6 +3163,10 @@ mod tests { opts = reference.clone(); opts.cg.panic = Some(PanicStrategy::Abort); assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.cg.linker_plugin_lto = LinkerPluginLto::LinkerPluginAuto; + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); } #[test] @@ -3161,7 +3262,7 @@ mod tests { assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); opts = reference.clone(); - opts.debugging_opts.treat_err_as_bug = true; + opts.debugging_opts.treat_err_as_bug = Some(1); assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); opts = reference.clone(); @@ -3193,7 +3294,11 @@ mod tests { assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); opts = reference.clone(); - opts.debugging_opts.cross_lang_lto = CrossLangLto::LinkerPluginAuto; + opts.debugging_opts.merge_functions = Some(MergeFunctions::Disabled); + assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); + + opts = reference.clone(); + opts.debugging_opts.allow_features = Some(vec![String::from("lang_items")]); assert!(reference.dep_tracking_hash() != opts.dep_tracking_hash()); } diff --git a/src/librustc/session/filesearch.rs b/src/librustc/session/filesearch.rs index c204556d517a8..cf09d45ca38fc 100644 --- a/src/librustc/session/filesearch.rs +++ b/src/librustc/session/filesearch.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_camel_case_types)] pub use self::FileMatch::*; @@ -17,7 +7,7 @@ use std::env; use std::fs; use std::path::{Path, PathBuf}; -use session::search_paths::{SearchPath, PathKind}; +use crate::session::search_paths::{SearchPath, PathKind}; use rustc_fs_util::fix_windows_verbatim_for_gcc; #[derive(Copy, Clone)] @@ -28,6 +18,7 @@ pub enum FileMatch { // A module for searching for libraries +#[derive(Clone)] pub struct FileSearch<'a> { sysroot: &'a Path, triple: &'a str, diff --git a/src/librustc/session/mod.rs b/src/librustc/session/mod.rs index 12b5646e7f1d1..9cd6c30e272f5 100644 --- a/src/librustc/session/mod.rs +++ b/src/librustc/session/mod.rs @@ -1,34 +1,27 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - pub use self::code_stats::{DataTypeKind, SizeKind, FieldInfo, VariantInfo}; use self::code_stats::CodeStats; -use dep_graph::cgu_reuse_tracker::CguReuseTracker; -use hir::def_id::CrateNum; +use crate::dep_graph::cgu_reuse_tracker::CguReuseTracker; +use crate::hir::def_id::CrateNum; use rustc_data_structures::fingerprint::Fingerprint; -use lint; -use lint::builtin::BuiltinLintDiagnostics; -use middle::allocator::AllocatorKind; -use middle::dependency_format; -use session::config::{OutputType, Lto}; -use session::search_paths::{PathKind, SearchPath}; -use util::nodemap::{FxHashMap, FxHashSet}; -use util::common::{duration_to_secs_str, ErrorReported}; -use util::common::ProfileQueriesMsg; +use crate::lint; +use crate::lint::builtin::BuiltinLintDiagnostics; +use crate::middle::allocator::AllocatorKind; +use crate::middle::dependency_format; +use crate::session::config::OutputType; +use crate::session::search_paths::{PathKind, SearchPath}; +use crate::util::nodemap::{FxHashMap, FxHashSet}; +use crate::util::common::{duration_to_secs_str, ErrorReported}; +use crate::util::common::ProfileQueriesMsg; use rustc_data_structures::base_n; -use rustc_data_structures::sync::{self, Lrc, Lock, LockCell, OneThread, Once, RwLock}; +use rustc_data_structures::sync::{ + self, Lrc, Lock, OneThread, Once, RwLock, AtomicU64, AtomicUsize, Ordering, + Ordering::SeqCst, +}; -use errors::{self, DiagnosticBuilder, DiagnosticId, Applicability}; +use errors::{DiagnosticBuilder, DiagnosticId, Applicability}; use errors::emitter::{Emitter, EmitterWriter}; use syntax::ast::{self, NodeId}; use syntax::edition::Edition; @@ -37,11 +30,12 @@ use syntax::json::JsonEmitter; use syntax::source_map; use syntax::parse::{self, ParseSess}; use syntax_pos::{MultiSpan, Span}; -use util::profiling::SelfProfiler; +use crate::util::profiling::SelfProfiler; use rustc_target::spec::{PanicStrategy, RelroLevel, Target, TargetTriple}; use rustc_data_structures::flock; -use jobserver::Client; +use rustc_data_structures::jobserver; +use ::jobserver::Client; use std; use std::cell::{self, Cell, RefCell}; @@ -50,14 +44,22 @@ use std::fmt; use std::io::Write; use std::path::PathBuf; use std::time::Duration; -use std::sync::mpsc; -use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::{Arc, mpsc}; + +use parking_lot::Mutex as PlMutex; mod code_stats; pub mod config; pub mod filesearch; pub mod search_paths; +pub struct OptimizationFuel { + /// If `-zfuel=crate=n` is specified, initially set to `n`, otherwise `0`. + remaining: u64, + /// We're rejecting all further optimizations. + out_of_fuel: bool, +} + /// Represents the data associated with a compilation /// session for a single crate. pub struct Session { @@ -65,13 +67,9 @@ pub struct Session { pub host: Target, pub opts: config::Options, pub host_tlib_path: SearchPath, - /// This is `None` if the host and target are the same. + /// `None` if the host and target are the same. pub target_tlib_path: Option, pub parse_sess: ParseSess, - /// For a library crate, this is always none - pub entry_fn: Once>, - pub plugin_registrar_fn: Once>, - pub proc_macro_decls_static: Once>, pub sysroot: PathBuf, /// The name of the root source file of the crate, in the local file system. /// `None` means that there is no source file. @@ -90,7 +88,7 @@ pub struct Session { /// in order to avoid redundantly verbose output (Issue #24690, #44953). pub one_time_diagnostics: Lock, String)>>, pub plugin_llvm_passes: OneThread>>, - pub plugin_attributes: OneThread>>, + pub plugin_attributes: Lock>, pub crate_types: Once>, pub dependency_formats: Once, /// The crate_disambiguator is constructed out of all the `-C metadata` @@ -109,7 +107,7 @@ pub struct Session { /// The maximum length of types during monomorphization. pub type_length_limit: Once, - /// The maximum number of stackframes allowed in const eval + /// The maximum number of stackframes allowed in const eval. pub const_eval_stack_frame_limit: usize, /// The metadata::creader module may inject an allocator/panic_runtime @@ -128,11 +126,11 @@ pub struct Session { /// `-Zquery-dep-graph` is specified. pub cgu_reuse_tracker: CguReuseTracker, - /// Used by -Z profile-queries in util::common + /// Used by `-Z profile-queries` in `util::common`. pub profile_channel: Lock>>, /// Used by -Z self-profile - pub self_profiling: Lock, + pub self_profiling: Option>>, /// Some measurements that are being gathered during compilation. pub perf_stats: PerfStats, @@ -142,37 +140,39 @@ pub struct Session { next_node_id: OneThread>, - /// If -zfuel=crate=n is specified, Some(crate). + /// If `-zfuel=crate=n` is specified, `Some(crate)`. optimization_fuel_crate: Option, - /// If -zfuel=crate=n is specified, initially set to n. Otherwise 0. - optimization_fuel_limit: LockCell, - /// We're rejecting all further optimizations. - out_of_fuel: LockCell, + + /// Tracks fuel info if `-zfuel=crate=n` is specified. + optimization_fuel: Lock, // The next two are public because the driver needs to read them. - /// If -zprint-fuel=crate, Some(crate). + /// If `-zprint-fuel=crate`, `Some(crate)`. pub print_fuel_crate: Option, /// Always set to zero and incremented so that we can print fuel expended by a crate. - pub print_fuel: LockCell, + pub print_fuel: AtomicU64, /// Loaded up early on in the initialization of this `Session` to avoid /// false positives about a job server in our environment. pub jobserver: Client, - /// Metadata about the allocators for the current crate being compiled + /// Metadata about the allocators for the current crate being compiled. pub has_global_allocator: Once, - /// Metadata about the panic handlers for the current crate being compiled + /// Metadata about the panic handlers for the current crate being compiled. pub has_panic_handler: Once, /// Cap lint level specified by a driver specifically. pub driver_lint_caps: FxHashMap, + + /// `Span`s of trait methods that weren't found to avoid emitting object safety errors + pub trait_methods_not_found: Lock>, } pub struct PerfStats { - /// The accumulated time spent on computing symbol hashes + /// The accumulated time spent on computing symbol hashes. pub symbol_hash_time: Lock, - /// The accumulated time spent decoding def path tables from metadata + /// The accumulated time spent decoding def path tables from metadata. pub decode_def_path_tables_time: Lock, /// Total number of values canonicalized queries constructed. pub queries_canonicalized: AtomicUsize, @@ -314,7 +314,7 @@ impl Session { pub fn abort_if_errors(&self) { self.diagnostic().abort_if_errors(); } - pub fn compile_status(&self) -> Result<(), CompileIncomplete> { + pub fn compile_status(&self) -> Result<(), ErrorReported> { compile_result_from_err_count(self.err_count()) } pub fn track_errors(&self, f: F) -> Result @@ -406,6 +406,9 @@ impl Session { pub fn next_node_id(&self) -> NodeId { self.reserve_node_ids(1) } + pub(crate) fn current_node_id_count(&self) -> usize { + self.next_node_id.get().as_u32() as usize + } pub fn diagnostic<'a>(&'a self) -> &'a errors::Handler { &self.parse_sess.span_diagnostic } @@ -435,7 +438,7 @@ impl Session { } DiagnosticBuilderMethod::SpanSuggestion(suggestion) => { let span = span_maybe.expect("span_suggestion_* needs a span"); - diag_builder.span_suggestion_with_applicability( + diag_builder.span_suggestion( span, message, suggestion, @@ -501,6 +504,9 @@ impl Session { self.opts.debugging_opts.verbose } pub fn time_passes(&self) -> bool { + self.opts.debugging_opts.time_passes || self.opts.debugging_opts.time + } + pub fn time_extended(&self) -> bool { self.opts.debugging_opts.time_passes } pub fn profile_queries(&self) -> bool { @@ -510,6 +516,9 @@ impl Session { pub fn profile_queries_and_keys(&self) -> bool { self.opts.debugging_opts.profile_queries_and_keys } + pub fn instrument_mcount(&self) -> bool { + self.opts.debugging_opts.instrument_mcount + } pub fn count_llvm_insns(&self) -> bool { self.opts.debugging_opts.count_llvm_insns } @@ -536,7 +545,7 @@ impl Session { self.opts.debugging_opts.print_llvm_passes } - /// Get the features enabled for the current compilation session. + /// Gets the features enabled for the current compilation session. /// DO NOT USE THIS METHOD if there is a TyCtxt available, as it circumvents /// dependency tracking. Use tcx.features() instead. #[inline] @@ -674,7 +683,11 @@ impl Session { } pub fn must_not_eliminate_frame_pointers(&self) -> bool { - if let Some(x) = self.opts.cg.force_frame_pointers { + // "mcount" function relies on stack pointer. + // See https://sourceware.org/binutils/docs/gprof/Implementation.html + if self.instrument_mcount() { + true + } else if let Some(x) = self.opts.cg.force_frame_pointers { x } else { !self.target.target.options.eliminate_frame_pointer @@ -823,21 +836,24 @@ impl Session { } } - pub fn profiler ()>(&self, f: F) { - if self.opts.debugging_opts.self_profile || self.opts.debugging_opts.profile_json { - let mut profiler = self.self_profiling.borrow_mut(); - f(&mut profiler); - } - } + #[inline(never)] + #[cold] + fn profiler_active ()>(&self, f: F) { + match &self.self_profiling { + None => bug!("profiler_active() called but there was no profiler active"), + Some(profiler) => { + let mut p = profiler.lock(); - pub fn print_profiler_results(&self) { - let mut profiler = self.self_profiling.borrow_mut(); - profiler.print_results(&self.opts); + f(&mut p); + } + } } - pub fn save_json_results(&self) { - let profiler = self.self_profiling.borrow(); - profiler.save_results(&self.opts); + #[inline(always)] + pub fn profiler ()>(&self, f: F) { + if unlikely!(self.self_profiling.is_some()) { + self.profiler_active(f) + } } pub fn print_perf_stats(&self) { @@ -863,21 +879,21 @@ impl Session { let mut ret = true; if let Some(ref c) = self.optimization_fuel_crate { if c == crate_name { - assert_eq!(self.query_threads(), 1); - let fuel = self.optimization_fuel_limit.get(); - ret = fuel != 0; - if fuel == 0 && !self.out_of_fuel.get() { + assert_eq!(self.threads(), 1); + let mut fuel = self.optimization_fuel.lock(); + ret = fuel.remaining != 0; + if fuel.remaining == 0 && !fuel.out_of_fuel { eprintln!("optimization-fuel-exhausted: {}", msg()); - self.out_of_fuel.set(true); - } else if fuel > 0 { - self.optimization_fuel_limit.set(fuel - 1); + fuel.out_of_fuel = true; + } else if fuel.remaining > 0 { + fuel.remaining -= 1; } } } if let Some(ref c) = self.print_fuel_crate { if c == crate_name { - assert_eq!(self.query_threads(), 1); - self.print_fuel.set(self.print_fuel.get() + 1); + assert_eq!(self.threads(), 1); + self.print_fuel.fetch_add(1, SeqCst); } } ret @@ -885,14 +901,14 @@ impl Session { /// Returns the number of query threads that should be used for this /// compilation - pub fn query_threads_from_opts(opts: &config::Options) -> usize { - opts.debugging_opts.query_threads.unwrap_or(1) + pub fn threads_from_count(query_threads: Option) -> usize { + query_threads.unwrap_or(::num_cpus::get()) } /// Returns the number of query threads that should be used for this /// compilation - pub fn query_threads(&self) -> usize { - Self::query_threads_from_opts(&self.opts) + pub fn threads(&self) -> usize { + Self::threads_from_count(self.opts.debugging_opts.threads) } /// Returns the number of codegen units that should be used for this @@ -975,7 +991,7 @@ impl Session { self.opts.edition } - /// True if we cannot skip the PLT for shared library calls. + /// Returns `true` if we cannot skip the PLT for shared library calls. pub fn needs_plt(&self) -> bool { // Check if the current target usually needs PLT to be enabled. // The user can use the command line flag to override it. @@ -1009,16 +1025,67 @@ pub fn build_session( local_crate_source_file, registry, Lrc::new(source_map::SourceMap::new(file_path_mapping)), - None, + DiagnosticOutput::Default, + Default::default(), ) } +fn default_emitter( + sopts: &config::Options, + registry: errors::registry::Registry, + source_map: &Lrc, + emitter_dest: Option>, +) -> Box { + match (sopts.error_format, emitter_dest) { + (config::ErrorOutputType::HumanReadable(color_config), None) => Box::new( + EmitterWriter::stderr( + color_config, + Some(source_map.clone()), + false, + sopts.debugging_opts.teach, + ).ui_testing(sopts.debugging_opts.ui_testing), + ), + (config::ErrorOutputType::HumanReadable(_), Some(dst)) => Box::new( + EmitterWriter::new(dst, Some(source_map.clone()), false, false) + .ui_testing(sopts.debugging_opts.ui_testing), + ), + (config::ErrorOutputType::Json(pretty), None) => Box::new( + JsonEmitter::stderr( + Some(registry), + source_map.clone(), + pretty, + ).ui_testing(sopts.debugging_opts.ui_testing), + ), + (config::ErrorOutputType::Json(pretty), Some(dst)) => Box::new( + JsonEmitter::new( + dst, + Some(registry), + source_map.clone(), + pretty, + ).ui_testing(sopts.debugging_opts.ui_testing), + ), + (config::ErrorOutputType::Short(color_config), None) => Box::new( + EmitterWriter::stderr(color_config, Some(source_map.clone()), true, false), + ), + (config::ErrorOutputType::Short(_), Some(dst)) => { + Box::new(EmitterWriter::new(dst, Some(source_map.clone()), true, false)) + } + } +} + +pub enum DiagnosticOutput { + Default, + Raw(Box), + Emitter(Box) +} + pub fn build_session_with_source_map( sopts: config::Options, local_crate_source_file: Option, registry: errors::registry::Registry, source_map: Lrc, - emitter_dest: Option>, + diagnostics_output: DiagnosticOutput, + lint_caps: FxHashMap, ) -> Session { // FIXME: This is not general enough to make the warning lint completely override // normal diagnostic warnings, since the warning lint can also be denied and changed @@ -1040,42 +1107,13 @@ pub fn build_session_with_source_map( let external_macro_backtrace = sopts.debugging_opts.external_macro_backtrace; - let emitter: Box = - match (sopts.error_format, emitter_dest) { - (config::ErrorOutputType::HumanReadable(color_config), None) => Box::new( - EmitterWriter::stderr( - color_config, - Some(source_map.clone()), - false, - sopts.debugging_opts.teach, - ).ui_testing(sopts.debugging_opts.ui_testing), - ), - (config::ErrorOutputType::HumanReadable(_), Some(dst)) => Box::new( - EmitterWriter::new(dst, Some(source_map.clone()), false, false) - .ui_testing(sopts.debugging_opts.ui_testing), - ), - (config::ErrorOutputType::Json(pretty), None) => Box::new( - JsonEmitter::stderr( - Some(registry), - source_map.clone(), - pretty, - ).ui_testing(sopts.debugging_opts.ui_testing), - ), - (config::ErrorOutputType::Json(pretty), Some(dst)) => Box::new( - JsonEmitter::new( - dst, - Some(registry), - source_map.clone(), - pretty, - ).ui_testing(sopts.debugging_opts.ui_testing), - ), - (config::ErrorOutputType::Short(color_config), None) => Box::new( - EmitterWriter::stderr(color_config, Some(source_map.clone()), true, false), - ), - (config::ErrorOutputType::Short(_), Some(dst)) => { - Box::new(EmitterWriter::new(dst, Some(source_map.clone()), true, false)) - } - }; + let emitter = match diagnostics_output { + DiagnosticOutput::Default => default_emitter(&sopts, registry, &source_map, None), + DiagnosticOutput::Raw(write) => { + default_emitter(&sopts, registry, &source_map, Some(write)) + } + DiagnosticOutput::Emitter(emitter) => emitter, + }; let diagnostic_handler = errors::Handler::with_emitter_and_flags( emitter, @@ -1089,15 +1127,20 @@ pub fn build_session_with_source_map( }, ); - build_session_(sopts, local_crate_source_file, diagnostic_handler, source_map) + build_session_(sopts, local_crate_source_file, diagnostic_handler, source_map, lint_caps) } -pub fn build_session_( +fn build_session_( sopts: config::Options, local_crate_source_file: Option, span_diagnostic: errors::Handler, source_map: Lrc, + driver_lint_caps: FxHashMap, ) -> Session { + let self_profiler = + if sopts.debugging_opts.self_profile { Some(Arc::new(PlMutex::new(SelfProfiler::new()))) } + else { None }; + let host_triple = TargetTriple::from_triple(config::host_triple()); let host = Target::search(&host_triple).unwrap_or_else(|e| span_diagnostic @@ -1127,10 +1170,12 @@ pub fn build_session_( local_crate_source_file.map(|path| file_path_mapping.map_prefix(path).0); let optimization_fuel_crate = sopts.debugging_opts.fuel.as_ref().map(|i| i.0.clone()); - let optimization_fuel_limit = - LockCell::new(sopts.debugging_opts.fuel.as_ref().map(|i| i.1).unwrap_or(0)); + let optimization_fuel = Lock::new(OptimizationFuel { + remaining: sopts.debugging_opts.fuel.as_ref().map(|i| i.1).unwrap_or(0), + out_of_fuel: false, + }); let print_fuel_crate = sopts.debugging_opts.print_fuel.clone(); - let print_fuel = LockCell::new(0); + let print_fuel = AtomicU64::new(0); let working_dir = env::current_dir().unwrap_or_else(|e| p_s.span_diagnostic @@ -1152,10 +1197,6 @@ pub fn build_session_( host_tlib_path, target_tlib_path, parse_sess: p_s, - // For a library crate, this is always none - entry_fn: Once::new(), - plugin_registrar_fn: Once::new(), - proc_macro_decls_static: Once::new(), sysroot, local_crate_source_file, working_dir, @@ -1163,7 +1204,7 @@ pub fn build_session_( buffered_lints: Lock::new(Some(Default::default())), one_time_diagnostics: Default::default(), plugin_llvm_passes: OneThread::new(RefCell::new(Vec::new())), - plugin_attributes: OneThread::new(RefCell::new(Vec::new())), + plugin_attributes: Lock::new(Vec::new()), crate_types: Once::new(), dependency_formats: Once::new(), crate_disambiguator: Once::new(), @@ -1177,7 +1218,7 @@ pub fn build_session_( imported_macro_spans: OneThread::new(RefCell::new(FxHashMap::default())), incr_comp_session: OneThread::new(RefCell::new(IncrCompSession::NotInitialized)), cgu_reuse_tracker, - self_profiling: Lock::new(SelfProfiler::new()), + self_profiling: self_profiler, profile_channel: Lock::new(None), perf_stats: PerfStats { symbol_hash_time: Lock::new(Duration::from_secs(0)), @@ -1188,39 +1229,14 @@ pub fn build_session_( }, code_stats: Default::default(), optimization_fuel_crate, - optimization_fuel_limit, + optimization_fuel, print_fuel_crate, print_fuel, - out_of_fuel: LockCell::new(false), - // Note that this is unsafe because it may misinterpret file descriptors - // on Unix as jobserver file descriptors. We hopefully execute this near - // the beginning of the process though to ensure we don't get false - // positives, or in other words we try to execute this before we open - // any file descriptors ourselves. - // - // Pick a "reasonable maximum" if we don't otherwise have - // a jobserver in our environment, capping out at 32 so we - // don't take everything down by hogging the process run queue. - // The fixed number is used to have deterministic compilation - // across machines. - // - // Also note that we stick this in a global because there could be - // multiple `Session` instances in this process, and the jobserver is - // per-process. - jobserver: unsafe { - static mut GLOBAL_JOBSERVER: *mut Client = 0 as *mut _; - static INIT: std::sync::Once = std::sync::ONCE_INIT; - INIT.call_once(|| { - let client = Client::from_env().unwrap_or_else(|| { - Client::new(32).expect("failed to create jobserver") - }); - GLOBAL_JOBSERVER = Box::into_raw(Box::new(client)); - }); - (*GLOBAL_JOBSERVER).clone() - }, + jobserver: jobserver::client(), has_global_allocator: Once::new(), has_panic_handler: Once::new(), - driver_lint_caps: Default::default(), + driver_lint_caps, + trait_methods_not_found: Lock::new(Default::default()), }; validate_commandline_args_with_session_available(&sess); @@ -1231,20 +1247,6 @@ pub fn build_session_( // If it is useful to have a Session available already for validating a // commandline argument, you can do so here. fn validate_commandline_args_with_session_available(sess: &Session) { - - if sess.opts.incremental.is_some() { - match sess.lto() { - Lto::Thin | - Lto::Fat => { - sess.err("can't perform LTO when compiling incrementally"); - } - Lto::ThinLocal | - Lto::No => { - // This is fine - } - } - } - // Since we don't know if code in an rlib will be linked to statically or // dynamically downstream, rustc generates `__imp_` symbols that help the // MSVC linker deal with this lack of knowledge (#27438). Unfortunately, @@ -1252,7 +1254,7 @@ fn validate_commandline_args_with_session_available(sess: &Session) { // bitcode during ThinLTO. Therefore we disallow dynamic linking on MSVC // when compiling for LLD ThinLTO. This way we can validly just not generate // the `dllimport` attributes and `__imp_` symbols in that case. - if sess.opts.debugging_opts.cross_lang_lto.enabled() && + if sess.opts.cg.linker_plugin_lto.enabled() && sess.opts.cg.prefer_dynamic && sess.target.target.options.is_like_msvc { sess.err("Linker plugin based LTO is not supported together with \ @@ -1320,7 +1322,7 @@ pub fn early_error(output: config::ErrorOutputType, msg: &str) -> ! { Box::new(EmitterWriter::stderr(color_config, None, true, false)) } }; - let handler = errors::Handler::with_emitter(true, false, emitter); + let handler = errors::Handler::with_emitter(true, None, emitter); handler.emit(&MultiSpan::new(), msg, errors::Level::Fatal); errors::FatalError.raise(); } @@ -1335,26 +1337,16 @@ pub fn early_warn(output: config::ErrorOutputType, msg: &str) { Box::new(EmitterWriter::stderr(color_config, None, true, false)) } }; - let handler = errors::Handler::with_emitter(true, false, emitter); + let handler = errors::Handler::with_emitter(true, None, emitter); handler.emit(&MultiSpan::new(), msg, errors::Level::Warning); } -#[derive(Copy, Clone, Debug)] -pub enum CompileIncomplete { - Stopped, - Errored(ErrorReported), -} -impl From for CompileIncomplete { - fn from(err: ErrorReported) -> CompileIncomplete { - CompileIncomplete::Errored(err) - } -} -pub type CompileResult = Result<(), CompileIncomplete>; +pub type CompileResult = Result<(), ErrorReported>; pub fn compile_result_from_err_count(err_count: usize) -> CompileResult { if err_count == 0 { Ok(()) } else { - Err(CompileIncomplete::Errored(ErrorReported)) + Err(ErrorReported) } } diff --git a/src/librustc/session/search_paths.rs b/src/librustc/session/search_paths.rs index 5c44a07f84341..1b6a1739b0248 100644 --- a/src/librustc/session/search_paths.rs +++ b/src/librustc/session/search_paths.rs @@ -1,16 +1,7 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::path::{Path, PathBuf}; -use session::{early_error, config}; -use session::filesearch::make_target_lib_path; +use rustc_macros::HashStable; +use crate::session::{early_error, config}; +use crate::session::filesearch::make_target_lib_path; #[derive(Clone, Debug)] pub struct SearchPath { @@ -19,7 +10,7 @@ pub struct SearchPath { pub files: Vec, } -#[derive(Eq, PartialEq, Clone, Copy, Debug, PartialOrd, Ord, Hash)] +#[derive(Eq, PartialEq, Clone, Copy, Debug, PartialOrd, Ord, Hash, HashStable)] pub enum PathKind { Native, Crate, diff --git a/src/librustc/traits/auto_trait.rs b/src/librustc/traits/auto_trait.rs index fff77816e7535..e93351197fe47 100644 --- a/src/librustc/traits/auto_trait.rs +++ b/src/librustc/traits/auto_trait.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Support code for rustdoc and external tools . You really don't //! want to be using this unless you need to. @@ -16,12 +6,12 @@ use super::*; use std::collections::hash_map::Entry; use std::collections::VecDeque; -use infer::region_constraints::{Constraint, RegionConstraintData}; -use infer::InferCtxt; +use crate::infer::region_constraints::{Constraint, RegionConstraintData}; +use crate::infer::InferCtxt; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; -use ty::fold::TypeFolder; -use ty::{Region, RegionVid}; +use crate::ty::fold::TypeFolder; +use crate::ty::{Region, RegionVid}; // FIXME(twk): this is obviously not nice to duplicate like that #[derive(Eq, PartialEq, Hash, Copy, Clone, Debug)] @@ -67,7 +57,7 @@ impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { AutoTraitFinder { tcx } } - /// Make a best effort to determine whether and under which conditions an auto trait is + /// Makes a best effort to determine whether and under which conditions an auto trait is /// implemented for a type. For example, if you have /// /// ``` @@ -212,7 +202,7 @@ impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { full_env, ty, trait_did, - ObligationCause::misc(DUMMY_SP, ast::DUMMY_NODE_ID), + ObligationCause::misc(DUMMY_SP, hir::DUMMY_HIR_ID), ); fulfill.select_all_or_error(&infcx).unwrap_or_else(|e| { panic!( @@ -325,7 +315,7 @@ impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { user_env.caller_bounds.iter().cloned().collect(); let mut new_env = param_env.clone(); - let dummy_cause = ObligationCause::misc(DUMMY_SP, ast::DUMMY_NODE_ID); + let dummy_cause = ObligationCause::misc(DUMMY_SP, hir::DUMMY_HIR_ID); while let Some(pred) = predicates.pop_front() { infcx.clear_caches(); @@ -398,12 +388,17 @@ impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { computed_preds.extend(user_computed_preds.iter().cloned()); let normalized_preds = elaborate_predicates(tcx, computed_preds.clone().into_iter().collect()); - new_env = ty::ParamEnv::new(tcx.mk_predicates(normalized_preds), param_env.reveal); + new_env = ty::ParamEnv::new( + tcx.mk_predicates(normalized_preds), + param_env.reveal, + None + ); } let final_user_env = ty::ParamEnv::new( tcx.mk_predicates(user_computed_preds.into_iter()), user_env.reveal, + None ); debug!( "evaluate_nested_obligations(ty_did={:?}, trait_did={:?}): succeeded with '{:?}' \ @@ -636,7 +631,7 @@ impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { finished_map } - fn is_param_no_infer(&self, substs: &Substs<'_>) -> bool { + fn is_param_no_infer(&self, substs: SubstsRef<'_>) -> bool { return self.is_of_param(substs.type_at(0)) && !substs.types().any(|t| t.has_infer_types()); } @@ -674,7 +669,7 @@ impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { select: &mut SelectionContext<'c, 'd, 'cx>, only_projections: bool, ) -> bool { - let dummy_cause = ObligationCause::misc(DUMMY_SP, ast::DUMMY_NODE_ID); + let dummy_cause = ObligationCause::misc(DUMMY_SP, hir::DUMMY_HIR_ID); for (obligation, mut predicate) in nested .map(|o| (o.clone(), o.predicate.clone())) @@ -742,9 +737,9 @@ impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { } // We can only call poly_project_and_unify_type when our predicate's - // Ty is an inference variable - otherwise, there won't be anything to + // Ty contains an inference variable - otherwise, there won't be anything to // unify - if p.ty().skip_binder().is_ty_infer() { + if p.ty().skip_binder().has_infer_types() { debug!("Projecting and unifying projection predicate {:?}", predicate); match poly_project_and_unify_type(select, &obligation.with(p.clone())) { diff --git a/src/librustc/traits/chalk_fulfill.rs b/src/librustc/traits/chalk_fulfill.rs new file mode 100644 index 0000000000000..d9eb6d8157dfb --- /dev/null +++ b/src/librustc/traits/chalk_fulfill.rs @@ -0,0 +1,165 @@ +use crate::traits::{ + Environment, + InEnvironment, + TraitEngine, + ObligationCause, + PredicateObligation, + FulfillmentError, + FulfillmentErrorCode, + SelectionError, +}; +use crate::traits::query::NoSolution; +use crate::infer::InferCtxt; +use crate::infer::canonical::{Canonical, OriginalQueryValues}; +use crate::ty::{self, Ty}; +use rustc_data_structures::fx::FxHashSet; + +pub type CanonicalGoal<'tcx> = Canonical<'tcx, InEnvironment<'tcx, ty::Predicate<'tcx>>>; + +pub struct FulfillmentContext<'tcx> { + obligations: FxHashSet>>, +} + +impl FulfillmentContext<'tcx> { + crate fn new() -> Self { + FulfillmentContext { + obligations: FxHashSet::default(), + } + } +} + +fn in_environment( + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + obligation: PredicateObligation<'tcx> +) -> InEnvironment<'tcx, PredicateObligation<'tcx>> { + assert!(!infcx.is_in_snapshot()); + let obligation = infcx.resolve_type_vars_if_possible(&obligation); + + let environment = match obligation.param_env.def_id { + Some(def_id) => infcx.tcx.environment(def_id), + None if obligation.param_env.caller_bounds.is_empty() => Environment { + clauses: ty::List::empty(), + }, + _ => bug!("non-empty `ParamEnv` with no def-id"), + }; + + InEnvironment { + environment, + goal: obligation, + } +} + +impl TraitEngine<'tcx> for FulfillmentContext<'tcx> { + fn normalize_projection_type( + &mut self, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + _param_env: ty::ParamEnv<'tcx>, + projection_ty: ty::ProjectionTy<'tcx>, + _cause: ObligationCause<'tcx>, + ) -> Ty<'tcx> { + infcx.tcx.mk_ty(ty::Projection(projection_ty)) + } + + fn register_predicate_obligation( + &mut self, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + obligation: PredicateObligation<'tcx>, + ) { + self.obligations.insert(in_environment(infcx, obligation)); + } + + fn select_all_or_error( + &mut self, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + ) -> Result<(), Vec>> { + self.select_where_possible(infcx)?; + + if self.obligations.is_empty() { + Ok(()) + } else { + let errors = self.obligations.iter() + .map(|obligation| FulfillmentError { + obligation: obligation.goal.clone(), + code: FulfillmentErrorCode::CodeAmbiguity, + }) + .collect(); + Err(errors) + } + } + + fn select_where_possible( + &mut self, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + ) -> Result<(), Vec>> { + let mut errors = Vec::new(); + let mut next_round = FxHashSet::default(); + let mut making_progress; + + loop { + making_progress = false; + + // We iterate over all obligations, and record if we are able + // to unambiguously prove at least one obligation. + for obligation in self.obligations.drain() { + let mut orig_values = OriginalQueryValues::default(); + let canonical_goal = infcx.canonicalize_query(&InEnvironment { + environment: obligation.environment, + goal: obligation.goal.predicate, + }, &mut orig_values); + + match infcx.tcx.global_tcx().evaluate_goal(canonical_goal) { + Ok(response) => { + if response.is_proven() { + making_progress = true; + + match infcx.instantiate_query_response_and_region_obligations( + &obligation.goal.cause, + obligation.goal.param_env, + &orig_values, + &response + ) { + Ok(infer_ok) => next_round.extend( + infer_ok.obligations + .into_iter() + .map(|obligation| in_environment(infcx, obligation)) + ), + + Err(_err) => errors.push(FulfillmentError { + obligation: obligation.goal, + code: FulfillmentErrorCode::CodeSelectionError( + SelectionError::Unimplemented + ), + }), + } + } else { + // Ambiguous: retry at next round. + next_round.insert(obligation); + } + } + + Err(NoSolution) => errors.push(FulfillmentError { + obligation: obligation.goal, + code: FulfillmentErrorCode::CodeSelectionError( + SelectionError::Unimplemented + ), + }) + } + } + next_round = std::mem::replace(&mut self.obligations, next_round); + + if !making_progress { + break; + } + } + + if errors.is_empty() { + Ok(()) + } else { + Err(errors) + } + } + + fn pending_obligations(&self) -> Vec> { + self.obligations.iter().map(|obligation| obligation.goal.clone()).collect() + } +} diff --git a/src/librustc/traits/codegen/mod.rs b/src/librustc/traits/codegen/mod.rs index 4e88150a18acc..9b0a3820c859c 100644 --- a/src/librustc/traits/codegen/mod.rs +++ b/src/librustc/traits/codegen/mod.rs @@ -1,31 +1,21 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // This file contains various trait resolution methods used by codegen. // They all assume regions can be erased and monomorphic types. It // seems likely that they should eventually be merged into more // general routines. -use dep_graph::{DepKind, DepTrackingMapConfig}; +use crate::dep_graph::{DepKind, DepTrackingMapConfig}; use std::marker::PhantomData; use syntax_pos::DUMMY_SP; -use infer::InferCtxt; +use crate::infer::InferCtxt; use syntax_pos::Span; -use traits::{FulfillmentContext, Obligation, ObligationCause, SelectionContext, +use crate::traits::{FulfillmentContext, Obligation, ObligationCause, SelectionContext, TraitEngine, Vtable}; -use ty::{self, Ty, TyCtxt}; -use ty::subst::{Subst, Substs}; -use ty::fold::TypeFoldable; +use crate::ty::{self, Ty, TyCtxt}; +use crate::ty::subst::{Subst, SubstsRef}; +use crate::ty::fold::TypeFoldable; -/// Attempts to resolve an obligation to a vtable.. The result is -/// a shallow vtable resolution -- meaning that we do not +/// Attempts to resolve an obligation to a vtable. The result is +/// a shallow vtable resolution, meaning that we do not /// (necessarily) resolve all nested obligations on the impl. Note /// that type check should guarantee to us that all nested /// obligations *could be* resolved if we wanted to. @@ -92,7 +82,7 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { /// types. pub fn subst_and_normalize_erasing_regions( self, - param_substs: &Substs<'tcx>, + param_substs: SubstsRef<'tcx>, param_env: ty::ParamEnv<'tcx>, value: &T ) -> T diff --git a/src/librustc/traits/coherence.rs b/src/librustc/traits/coherence.rs index af338cd3868fa..35d8e2beef557 100644 --- a/src/librustc/traits/coherence.rs +++ b/src/librustc/traits/coherence.rs @@ -1,30 +1,20 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! See rustc guide chapters on [trait-resolution] and [trait-specialization] for more info on how +//! See Rustc Guide chapters on [trait-resolution] and [trait-specialization] for more info on how //! this works. //! //! [trait-resolution]: https://rust-lang.github.io/rustc-guide/traits/resolution.html //! [trait-specialization]: https://rust-lang.github.io/rustc-guide/traits/specialization.html -use hir::def_id::{DefId, LOCAL_CRATE}; +use crate::infer::CombinedSnapshot; +use crate::hir::def_id::{DefId, LOCAL_CRATE}; use syntax_pos::DUMMY_SP; -use traits::{self, Normalized, SelectionContext, Obligation, ObligationCause}; -use traits::IntercrateMode; -use traits::select::IntercrateAmbiguityCause; -use ty::{self, Ty, TyCtxt}; -use ty::relate::TraitObjectMode; -use ty::fold::TypeFoldable; -use ty::subst::Subst; +use crate::traits::{self, Normalized, SelectionContext, Obligation, ObligationCause}; +use crate::traits::IntercrateMode; +use crate::traits::select::IntercrateAmbiguityCause; +use crate::ty::{self, Ty, TyCtxt}; +use crate::ty::fold::TypeFoldable; +use crate::ty::subst::Subst; -use infer::{InferOk}; +use crate::infer::{InferOk}; /// Whether we do the orphan check relative to this crate or /// to some remote crate. @@ -43,6 +33,17 @@ pub enum Conflict { pub struct OverlapResult<'tcx> { pub impl_header: ty::ImplHeader<'tcx>, pub intercrate_ambiguity_causes: Vec, + + /// `true` if the overlap might've been permitted before the shift + /// to universes. + pub involves_placeholder: bool, +} + +pub fn add_placeholder_note(err: &mut errors::DiagnosticBuilder<'_>) { + err.note(&format!( + "this behavior recently changed as a result of a bug fix; \ + see rust-lang/rust#56105 for details" + )); } /// If there are types that satisfy both impls, invokes `on_overlap` @@ -53,7 +54,6 @@ pub fn overlapping_impls<'gcx, F1, F2, R>( impl1_def_id: DefId, impl2_def_id: DefId, intercrate_mode: IntercrateMode, - trait_object_mode: TraitObjectMode, on_overlap: F1, no_overlap: F2, ) -> R @@ -64,14 +64,12 @@ where debug!("overlapping_impls(\ impl1_def_id={:?}, \ impl2_def_id={:?}, - intercrate_mode={:?}, - trait_object_mode={:?})", + intercrate_mode={:?})", impl1_def_id, impl2_def_id, - intercrate_mode, - trait_object_mode); + intercrate_mode); - let overlaps = tcx.infer_ctxt().with_trait_object_mode(trait_object_mode).enter(|infcx| { + let overlaps = tcx.infer_ctxt().enter(|infcx| { let selcx = &mut SelectionContext::intercrate(&infcx, intercrate_mode); overlap(selcx, impl1_def_id, impl2_def_id).is_some() }); @@ -83,7 +81,7 @@ where // In the case where we detect an error, run the check again, but // this time tracking intercrate ambuiguity causes for better // diagnostics. (These take time and can lead to false errors.) - tcx.infer_ctxt().with_trait_object_mode(trait_object_mode).enter(|infcx| { + tcx.infer_ctxt().enter(|infcx| { let selcx = &mut SelectionContext::intercrate(&infcx, intercrate_mode); selcx.enable_tracking_intercrate_ambiguity_causes(); on_overlap(overlap(selcx, impl1_def_id, impl2_def_id).unwrap()) @@ -113,14 +111,23 @@ fn with_fresh_ty_vars<'cx, 'gcx, 'tcx>(selcx: &mut SelectionContext<'cx, 'gcx, ' } /// Can both impl `a` and impl `b` be satisfied by a common type (including -/// `where` clauses)? If so, returns an `ImplHeader` that unifies the two impls. -fn overlap<'cx, 'gcx, 'tcx>(selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, - a_def_id: DefId, - b_def_id: DefId) - -> Option> -{ +/// where-clauses)? If so, returns an `ImplHeader` that unifies the two impls. +fn overlap<'cx, 'gcx, 'tcx>( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + a_def_id: DefId, + b_def_id: DefId, +) -> Option> { debug!("overlap(a_def_id={:?}, b_def_id={:?})", a_def_id, b_def_id); + selcx.infcx().probe(|snapshot| overlap_within_probe(selcx, a_def_id, b_def_id, snapshot)) +} + +fn overlap_within_probe( + selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, + a_def_id: DefId, + b_def_id: DefId, + snapshot: &CombinedSnapshot<'_, 'tcx>, +) -> Option> { // For the purposes of this check, we don't bring any placeholder // types into scope; instead, we replace the generic types with // fresh type variables, and hence we do our evaluations in an @@ -168,7 +175,13 @@ fn overlap<'cx, 'gcx, 'tcx>(selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, let impl_header = selcx.infcx().resolve_type_vars_if_possible(&a_impl_header); let intercrate_ambiguity_causes = selcx.take_intercrate_ambiguity_causes(); debug!("overlap: intercrate_ambiguity_causes={:#?}", intercrate_ambiguity_causes); - Some(OverlapResult { impl_header, intercrate_ambiguity_causes }) + + let involves_placeholder = match selcx.infcx().region_constraints_added_in_snapshot(snapshot) { + Some(true) => true, + _ => false, + }; + + Some(OverlapResult { impl_header, intercrate_ambiguity_causes, involves_placeholder }) } pub fn trait_ref_is_knowable<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, @@ -229,7 +242,7 @@ pub enum OrphanCheckErr<'tcx> { } /// Checks the coherence orphan rules. `impl_def_id` should be the -/// def-id of a trait impl. To pass, either the trait must be local, or else +/// `DefId` of a trait impl. To pass, either the trait must be local, or else /// two conditions must be satisfied: /// /// 1. All type parameters in `Self` must be "covered" by some local type constructor. @@ -255,7 +268,7 @@ pub fn orphan_check<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, orphan_check_trait_ref(tcx, trait_ref, InCrate::Local) } -/// Check whether a trait-ref is potentially implementable by a crate. +/// Checks whether a trait-ref is potentially implementable by a crate. /// /// The current rule is that a trait-ref orphan checks in a crate C: /// @@ -354,50 +367,75 @@ fn orphan_check_trait_ref<'tcx>(tcx: TyCtxt<'_, '_, '_>, trait_ref); } - // First, create an ordered iterator over all the type parameters to the trait, with the self - // type appearing first. - // Find the first input type that either references a type parameter OR - // some local type. - for input_ty in trait_ref.input_types() { - if ty_is_local(tcx, input_ty, in_crate) { - debug!("orphan_check_trait_ref: ty_is_local `{:?}`", input_ty); - - // First local input type. Check that there are no - // uncovered type parameters. - let uncovered_tys = uncovered_tys(tcx, input_ty, in_crate); - for uncovered_ty in uncovered_tys { - if let Some(param) = uncovered_ty.walk() - .find(|t| is_possibly_remote_type(t, in_crate)) - { - debug!("orphan_check_trait_ref: uncovered type `{:?}`", param); - return Err(OrphanCheckErr::UncoveredTy(param)); - } + if tcx.features().re_rebalance_coherence { + // Given impl Trait for T0, an impl is valid only + // if at least one of the following is true: + // + // - Trait is a local trait + // (already checked in orphan_check prior to calling this function) + // - All of + // - At least one of the types T0..=Tn must be a local type. + // Let Ti be the first such type. + // - No uncovered type parameters P1..=Pn may appear in T0..Ti (excluding Ti) + // + for input_ty in trait_ref.input_types() { + debug!("orphan_check_trait_ref: check ty `{:?}`", input_ty); + if ty_is_local(tcx, input_ty, in_crate) { + debug!("orphan_check_trait_ref: ty_is_local `{:?}`", input_ty); + return Ok(()); + } else if let ty::Param(_) = input_ty.sty { + debug!("orphan_check_trait_ref: uncovered ty: `{:?}`", input_ty); + return Err(OrphanCheckErr::UncoveredTy(input_ty)) } - - // OK, found local type, all prior types upheld invariant. - return Ok(()); } + // If we exit above loop, never found a local type. + debug!("orphan_check_trait_ref: no local type"); + Err(OrphanCheckErr::NoLocalInputType) + } else { + // First, create an ordered iterator over all the type + // parameters to the trait, with the self type appearing + // first. Find the first input type that either references a + // type parameter OR some local type. + for input_ty in trait_ref.input_types() { + if ty_is_local(tcx, input_ty, in_crate) { + debug!("orphan_check_trait_ref: ty_is_local `{:?}`", input_ty); + + // First local input type. Check that there are no + // uncovered type parameters. + let uncovered_tys = uncovered_tys(tcx, input_ty, in_crate); + for uncovered_ty in uncovered_tys { + if let Some(param) = uncovered_ty.walk() + .find(|t| is_possibly_remote_type(t, in_crate)) + { + debug!("orphan_check_trait_ref: uncovered type `{:?}`", param); + return Err(OrphanCheckErr::UncoveredTy(param)); + } + } + + // OK, found local type, all prior types upheld invariant. + return Ok(()); + } - // Otherwise, enforce invariant that there are no type - // parameters reachable. - if let Some(param) = input_ty.walk() - .find(|t| is_possibly_remote_type(t, in_crate)) - { - debug!("orphan_check_trait_ref: uncovered type `{:?}`", param); - return Err(OrphanCheckErr::UncoveredTy(param)); + // Otherwise, enforce invariant that there are no type + // parameters reachable. + if let Some(param) = input_ty.walk() + .find(|t| is_possibly_remote_type(t, in_crate)) + { + debug!("orphan_check_trait_ref: uncovered type `{:?}`", param); + return Err(OrphanCheckErr::UncoveredTy(param)); + } } + // If we exit above loop, never found a local type. + debug!("orphan_check_trait_ref: no local type"); + Err(OrphanCheckErr::NoLocalInputType) } - - // If we exit above loop, never found a local type. - debug!("orphan_check_trait_ref: no local type"); - return Err(OrphanCheckErr::NoLocalInputType); } fn uncovered_tys<'tcx>(tcx: TyCtxt<'_, '_, '_>, ty: Ty<'tcx>, in_crate: InCrate) -> Vec> { if ty_is_local_constructor(ty, in_crate) { vec![] - } else if fundamental_ty(tcx, ty) { + } else if fundamental_ty(ty) { ty.walk_shallow() .flat_map(|t| uncovered_tys(tcx, t, in_crate)) .collect() @@ -415,14 +453,13 @@ fn is_possibly_remote_type(ty: Ty<'_>, _in_crate: InCrate) -> bool { fn ty_is_local(tcx: TyCtxt<'_, '_, '_>, ty: Ty<'_>, in_crate: InCrate) -> bool { ty_is_local_constructor(ty, in_crate) || - fundamental_ty(tcx, ty) && ty.walk_shallow().any(|t| ty_is_local(tcx, t, in_crate)) + fundamental_ty(ty) && ty.walk_shallow().any(|t| ty_is_local(tcx, t, in_crate)) } -fn fundamental_ty(tcx: TyCtxt<'_, '_, '_>, ty: Ty<'_>) -> bool { +fn fundamental_ty(ty: Ty<'_>) -> bool { match ty.sty { ty::Ref(..) => true, ty::Adt(def, _) => def.is_fundamental(), - ty::Dynamic(ref data, ..) => tcx.has_attr(data.principal().def_id(), "fundamental"), _ => false } } @@ -469,7 +506,13 @@ fn ty_is_local_constructor(ty: Ty<'_>, in_crate: InCrate) -> bool { ty::Adt(def, _) => def_id_is_local(def.did, in_crate), ty::Foreign(did) => def_id_is_local(did, in_crate), - ty::Dynamic(ref tt, ..) => def_id_is_local(tt.principal().def_id(), in_crate), + ty::Dynamic(ref tt, ..) => { + if let Some(principal) = tt.principal() { + def_id_is_local(principal.def_id(), in_crate) + } else { + false + } + } ty::Error => true, diff --git a/src/librustc/traits/engine.rs b/src/librustc/traits/engine.rs index acbf5392cf54c..2f019d823ff5d 100644 --- a/src/librustc/traits/engine.rs +++ b/src/librustc/traits/engine.rs @@ -1,18 +1,9 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +use crate::infer::InferCtxt; +use crate::ty::{self, Ty, TyCtxt, ToPredicate}; +use crate::traits::Obligation; +use crate::hir::def_id::DefId; -use infer::InferCtxt; -use ty::{self, Ty, TyCtxt}; -use hir::def_id::DefId; - -use super::{FulfillmentContext, FulfillmentError}; +use super::{ChalkFulfillmentContext, FulfillmentContext, FulfillmentError}; use super::{ObligationCause, PredicateObligation}; pub trait TraitEngine<'tcx>: 'tcx { @@ -24,6 +15,9 @@ pub trait TraitEngine<'tcx>: 'tcx { cause: ObligationCause<'tcx>, ) -> Ty<'tcx>; + /// Requires that `ty` must implement the trait with `def_id` in + /// the given environment. This trait must not have any type + /// parameters (except for `Self`). fn register_bound( &mut self, infcx: &InferCtxt<'_, 'gcx, 'tcx>, @@ -31,7 +25,18 @@ pub trait TraitEngine<'tcx>: 'tcx { ty: Ty<'tcx>, def_id: DefId, cause: ObligationCause<'tcx>, - ); + ) { + let trait_ref = ty::TraitRef { + def_id, + substs: infcx.tcx.mk_substs_trait(ty, &[]), + }; + self.register_predicate_obligation(infcx, Obligation { + cause, + recursion_depth: 0, + param_env, + predicate: trait_ref.to_predicate() + }); + } fn register_predicate_obligation( &mut self, @@ -73,7 +78,11 @@ impl> TraitEngineExt<'tcx> for T { } impl dyn TraitEngine<'tcx> { - pub fn new(_tcx: TyCtxt<'_, '_, 'tcx>) -> Box { - Box::new(FulfillmentContext::new()) + pub fn new(tcx: TyCtxt<'_, '_, 'tcx>) -> Box { + if tcx.sess.opts.debugging_opts.chalk { + Box::new(ChalkFulfillmentContext::new()) + } else { + Box::new(FulfillmentContext::new()) + } } } diff --git a/src/librustc/traits/error_reporting.rs b/src/librustc/traits/error_reporting.rs index 373d6652b9e6a..5b5a7cc9ed85b 100644 --- a/src/librustc/traits/error_reporting.rs +++ b/src/librustc/traits/error_reporting.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::{ FulfillmentError, FulfillmentErrorCode, @@ -27,24 +17,24 @@ use super::{ Overflow, }; +use crate::hir; +use crate::hir::Node; +use crate::hir::def_id::DefId; +use crate::infer::{self, InferCtxt}; +use crate::infer::type_variable::TypeVariableOrigin; +use crate::session::DiagnosticMessageId; +use crate::ty::{self, AdtKind, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; +use crate::ty::GenericParamDefKind; +use crate::ty::error::ExpectedFound; +use crate::ty::fast_reject; +use crate::ty::fold::TypeFolder; +use crate::ty::subst::Subst; +use crate::ty::SubtypePredicate; +use crate::util::nodemap::{FxHashMap, FxHashSet}; + use errors::{Applicability, DiagnosticBuilder}; -use hir; -use hir::Node; -use hir::def_id::DefId; -use infer::{self, InferCtxt}; -use infer::type_variable::TypeVariableOrigin; use std::fmt; use syntax::ast; -use session::DiagnosticMessageId; -use ty::{self, AdtKind, ToPredicate, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable}; -use ty::GenericParamDefKind; -use ty::error::ExpectedFound; -use ty::fast_reject; -use ty::fold::TypeFolder; -use ty::subst::Subst; -use ty::SubtypePredicate; -use util::nodemap::{FxHashMap, FxHashSet}; - use syntax_pos::{DUMMY_SP, Span, ExpnInfo, ExpnFormat}; impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { @@ -399,7 +389,8 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { for param in generics.params.iter() { let value = match param.kind { - GenericParamDefKind::Type {..} => { + GenericParamDefKind::Type { .. } | + GenericParamDefKind::Const => { trait_ref.substs[param.index as usize].to_string() }, GenericParamDefKind::Lifetime => continue, @@ -428,9 +419,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { Some(format!("[{}]", self.tcx.type_of(def.did).to_string())), )); let tcx = self.tcx; - if let Some(len) = len.val.try_to_scalar().and_then(|scalar| { - scalar.to_usize(&tcx).ok() - }) { + if let Some(len) = len.assert_usize(tcx) { flags.push(( "_Self".to_owned(), Some(format!("[{}; {}]", self.tcx.type_of(def.did).to_string(), len)), @@ -459,7 +448,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { { let simp = fast_reject::simplify_type(self.tcx, trait_ref.skip_binder().self_ty(), - true,); + true); let all_impls = self.tcx.all_impls(trait_ref.def_id()); match simp { @@ -483,7 +472,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } fn report_similar_impl_candidates(&self, - mut impl_candidates: Vec>, + impl_candidates: Vec>, err: &mut DiagnosticBuilder<'_>) { if impl_candidates.is_empty() { @@ -509,14 +498,18 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { }); // Sort impl candidates so that ordering is consistent for UI tests. - let normalized_impl_candidates = &mut impl_candidates[0..end] + let mut normalized_impl_candidates = impl_candidates .iter() .map(normalize) .collect::>(); + + // Sort before taking the `..end` range, + // because the ordering of `impl_candidates` may not be deterministic: + // https://github.com/rust-lang/rust/pull/57475#issuecomment-455519507 normalized_impl_candidates.sort(); err.help(&format!("the following implementations were found:{}{}", - normalized_impl_candidates.join(""), + normalized_impl_candidates[..end].join(""), if len > 5 { format!("\nand {} others", len - 4) } else { @@ -591,7 +584,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } - /// Get the parent trait chain start + /// Gets the parent trait chain start fn get_parent_trait_ref(&self, code: &ObligationCauseCode<'tcx>) -> Option { match code { &ObligationCauseCode::BuiltinDerivedObligation(ref data) => { @@ -606,11 +599,12 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } } - pub fn report_selection_error(&self, - obligation: &PredicateObligation<'tcx>, - error: &SelectionError<'tcx>, - fallback_has_occurred: bool) - { + pub fn report_selection_error( + &self, + obligation: &PredicateObligation<'tcx>, + error: &SelectionError<'tcx>, + fallback_has_occurred: bool, + ) { let span = obligation.cause.span; let mut err = match *error { @@ -680,6 +674,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { self.suggest_borrow_on_unsized_slice(&obligation.cause.code, &mut err); self.suggest_remove_reference(&obligation, &mut err, &trait_ref); + self.suggest_semicolon_removal(&obligation, &mut err, span, &trait_ref); // Try to report a help message if !trait_ref.has_infer_types() && @@ -741,9 +736,11 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { let predicate = self.resolve_type_vars_if_possible(predicate); let err = self.region_outlives_predicate(&obligation.cause, &predicate).err().unwrap(); - struct_span_err!(self.tcx.sess, span, E0279, + struct_span_err!( + self.tcx.sess, span, E0279, "the requirement `{}` is not satisfied (`{}`)", - predicate, err) + predicate, err, + ) } ty::Predicate::Projection(..) | ty::Predicate::TypeOutlives(..) => { @@ -757,16 +754,22 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { ty::Predicate::ObjectSafe(trait_def_id) => { let violations = self.tcx.global_tcx() .object_safety_violations(trait_def_id); - self.tcx.report_object_safety_error(span, - trait_def_id, - violations) + if let Some(err) = self.tcx.report_object_safety_error( + span, + trait_def_id, + violations, + ) { + err + } else { + return; + } } ty::Predicate::ClosureKind(closure_def_id, closure_substs, kind) => { let found_kind = self.closure_kind(closure_def_id, closure_substs).unwrap(); let closure_span = self.tcx.sess.source_map() .def_span(self.tcx.hir().span_if_local(closure_def_id).unwrap()); - let node_id = self.tcx.hir().as_local_node_id(closure_def_id).unwrap(); + let hir_id = self.tcx.hir().as_local_hir_id(closure_def_id).unwrap(); let mut err = struct_span_err!( self.tcx.sess, closure_span, E0525, "expected a closure that implements the `{}` trait, \ @@ -785,8 +788,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // a particular trait. if let Some(tables) = self.in_progress_tables { let tables = tables.borrow(); - let closure_hir_id = self.tcx.hir().node_to_hir_id(node_id); - match (found_kind, tables.closure_kind_origins().get(closure_hir_id)) { + match (found_kind, tables.closure_kind_origins().get(hir_id)) { (ty::ClosureKind::FnOnce, Some((span, name))) => { err.span_label(*span, format!( "closure is `FnOnce` because it moves the \ @@ -806,12 +808,21 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } ty::Predicate::WellFormed(ty) => { - // WF predicates cannot themselves make - // errors. They can only block due to - // ambiguity; otherwise, they always - // degenerate into other obligations - // (which may fail). - span_bug!(span, "WF predicate not satisfied for {:?}", ty); + if !self.tcx.sess.opts.debugging_opts.chalk { + // WF predicates cannot themselves make + // errors. They can only block due to + // ambiguity; otherwise, they always + // degenerate into other obligations + // (which may fail). + span_bug!(span, "WF predicate not satisfied for {:?}", ty); + } else { + // FIXME: we'll need a better message which takes into account + // which bounds actually failed to hold. + self.tcx.sess.struct_span_err( + span, + &format!("the type `{}` is not well-formed (chalk)", ty) + ) + } } ty::Predicate::ConstEvaluatable(..) => { @@ -849,10 +860,11 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { _ => vec![ArgKind::empty()], }; - let expected = match expected_trait_ref.skip_binder().substs.type_at(1).sty { + let expected_ty = expected_trait_ref.skip_binder().substs.type_at(1); + let expected = match expected_ty.sty { ty::Tuple(ref tys) => tys.iter() .map(|t| ArgKind::from_expected_ty(t, Some(span))).collect(), - ref sty => vec![ArgKind::Arg("_".to_owned(), sty.to_string())], + _ => vec![ArgKind::Arg("_".to_owned(), expected_ty.to_string())], }; if found.len() == expected.len() { @@ -878,7 +890,11 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { TraitNotObjectSafe(did) => { let violations = self.tcx.global_tcx().object_safety_violations(did); - self.tcx.report_object_safety_error(span, did, violations) + if let Some(err) = self.tcx.report_object_safety_error(span, did, violations) { + err + } else { + return; + } } // already reported in the query @@ -897,16 +913,18 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// When encountering an assignment of an unsized trait, like `let x = ""[..];`, provide a /// suggestion to borrow the initializer in order to use have a slice instead. - fn suggest_borrow_on_unsized_slice(&self, - code: &ObligationCauseCode<'tcx>, - err: &mut DiagnosticBuilder<'tcx>) { + fn suggest_borrow_on_unsized_slice( + &self, + code: &ObligationCauseCode<'tcx>, + err: &mut DiagnosticBuilder<'tcx>, + ) { if let &ObligationCauseCode::VariableType(node_id) = code { let parent_node = self.tcx.hir().get_parent_node(node_id); if let Some(Node::Local(ref local)) = self.tcx.hir().find(parent_node) { if let Some(ref expr) = local.init { if let hir::ExprKind::Index(_, _) = expr.node { if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(expr.span) { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, "consider borrowing here", format!("&{}", snippet), @@ -921,10 +939,12 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { /// Whenever references are used by mistake, like `for (i, e) in &vec.iter().enumerate()`, /// suggest removing these references until we reach a type that implements the trait. - fn suggest_remove_reference(&self, - obligation: &PredicateObligation<'tcx>, - err: &mut DiagnosticBuilder<'tcx>, - trait_ref: &ty::Binder>) { + fn suggest_remove_reference( + &self, + obligation: &PredicateObligation<'tcx>, + err: &mut DiagnosticBuilder<'tcx>, + trait_ref: &ty::Binder>, + ) { let trait_ref = trait_ref.skip_binder(); let span = obligation.cause.span; @@ -954,7 +974,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { let format_str = format!("consider removing {} leading `&`-references", remove_refs); - err.span_suggestion_short_with_applicability( + err.span_suggestion_short( sp, &format_str, String::new(), Applicability::MachineApplicable ); break; @@ -966,6 +986,40 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } } + fn suggest_semicolon_removal( + &self, + obligation: &PredicateObligation<'tcx>, + err: &mut DiagnosticBuilder<'tcx>, + span: Span, + trait_ref: &ty::Binder>, + ) { + let hir = self.tcx.hir(); + let parent_node = hir.get_parent_node( + hir.hir_to_node_id(obligation.cause.body_id), + ); + let node = hir.find(parent_node); + if let Some(hir::Node::Item(hir::Item { + node: hir::ItemKind::Fn(decl, _, _, body_id), + .. + })) = node { + let body = hir.body(*body_id); + if let hir::ExprKind::Block(blk, _) = &body.value.node { + if decl.output.span().overlaps(span) && blk.expr.is_none() && + "()" == &trait_ref.self_ty().to_string() + { + // FIXME(estebank): When encountering a method with a trait + // bound not satisfied in the return type with a body that has + // no return, suggest removal of semicolon on last statement. + // Once that is added, close #54771. + if let Some(ref stmt) = blk.stmts.last() { + let sp = self.tcx.sess.source_map().end_point(stmt.span); + err.span_label(sp, "consider removing this semicolon"); + } + } + } + } + } + /// Given some node representing a fn-like thing in the HIR map, /// returns a span and `ArgKind` information that describes the /// arguments it expects. This can be supplied to @@ -1023,22 +1077,13 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { _ => ArgKind::empty() }).collect::>()) } - Node::Variant(&hir::Variant { - span, - node: hir::VariantKind { - data: hir::VariantData::Tuple(ref fields, _), - .. - }, - .. - }) => { - (self.tcx.sess.source_map().def_span(span), - fields.iter().map(|field| - ArgKind::Arg(field.ident.to_string(), "_".to_string()) - ).collect::>()) - } - Node::StructCtor(ref variant_data) => { - (self.tcx.sess.source_map().def_span(self.tcx.hir().span(variant_data.id())), - vec![ArgKind::empty(); variant_data.fields().len()]) + Node::Ctor(ref variant_data) => { + let span = variant_data.ctor_hir_id() + .map(|hir_id| self.tcx.hir().span_by_hir_id(hir_id)) + .unwrap_or(DUMMY_SP); + let span = self.tcx.sess.source_map().def_span(span); + + (span, vec![ArgKind::empty(); variant_data.fields().len()]) } _ => panic!("non-FnLike node found: {:?}", node), } @@ -1111,7 +1156,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { // For example, if `expected_args_length` is 2, suggest `|_, _|`. if found_args.is_empty() && is_closure { let underscores = vec!["_"; expected_args.len()].join(", "); - err.span_suggestion_with_applicability( + err.span_suggestion( pipe_span, &format!( "consider changing the closure to take and ignore the expected argument{}", @@ -1132,11 +1177,12 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { .map(|(name, _)| name.to_owned()) .collect::>() .join(", "); - err.span_suggestion_with_applicability(found_span, - "change the closure to take multiple \ - arguments instead of a single tuple", - format!("|{}|", sugg), - Applicability::MachineApplicable); + err.span_suggestion( + found_span, + "change the closure to take multiple arguments instead of a single tuple", + format!("|{}|", sugg), + Applicability::MachineApplicable, + ); } } if let &[ArgKind::Tuple(_, ref fields)] = &expected_args[..] { @@ -1164,12 +1210,11 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { String::new() }, ); - err.span_suggestion_with_applicability( + err.span_suggestion( found_span, - "change the closure to accept a tuple instead of \ - individual arguments", + "change the closure to accept a tuple instead of individual arguments", sugg, - Applicability::MachineApplicable + Applicability::MachineApplicable, ); } } @@ -1240,21 +1285,25 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { let span = self.sess.source_map().def_span(span); let mut err = struct_span_err!(self.sess, span, E0072, "recursive type `{}` has infinite size", - self.item_path_str(type_def_id)); + self.def_path_str(type_def_id)); err.span_label(span, "recursive type has infinite size"); err.help(&format!("insert indirection (e.g., a `Box`, `Rc`, or `&`) \ at some point to make `{}` representable", - self.item_path_str(type_def_id))); + self.def_path_str(type_def_id))); err } - pub fn report_object_safety_error(self, - span: Span, - trait_def_id: DefId, - violations: Vec) - -> DiagnosticBuilder<'tcx> - { - let trait_str = self.item_path_str(trait_def_id); + pub fn report_object_safety_error( + self, + span: Span, + trait_def_id: DefId, + violations: Vec, + ) -> Option> { + if self.sess.trait_methods_not_found.borrow().contains(&span) { + // Avoid emitting error caused by non-existing method (#58734) + return None; + } + let trait_str = self.def_path_str(trait_def_id); let span = self.sess.source_map().def_span(span); let mut err = struct_span_err!( self.sess, span, E0038, @@ -1268,7 +1317,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { err.note(&violation.error_msg()); } } - err + Some(err) } } @@ -1377,7 +1426,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { } } - /// Returns whether the trait predicate may apply for *some* assignment + /// Returns `true` if the trait predicate may apply for *some* assignment /// to the type parameters. fn predicate_can_apply(&self, param_env: ty::ParamEnv<'tcx>, @@ -1450,15 +1499,15 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { match *cause_code { ObligationCauseCode::ExprAssignable | ObligationCauseCode::MatchExpressionArm { .. } | - ObligationCauseCode::IfExpression | + ObligationCauseCode::MatchExpressionArmPattern { .. } | + ObligationCauseCode::IfExpression { .. } | ObligationCauseCode::IfExpressionWithNoElse | ObligationCauseCode::MainFunctionType | ObligationCauseCode::StartFunctionType | ObligationCauseCode::IntrinsicType | ObligationCauseCode::MethodReceiver | ObligationCauseCode::ReturnNoExpression | - ObligationCauseCode::MiscObligation => { - } + ObligationCauseCode::MiscObligation => {} ObligationCauseCode::SliceOrArrayElem => { err.note("slice and array elements must have `Sized` type"); } @@ -1479,7 +1528,7 @@ impl<'a, 'gcx, 'tcx> InferCtxt<'a, 'gcx, 'tcx> { region, object_ty)); } ObligationCauseCode::ItemObligation(item_def_id) => { - let item_name = tcx.item_path_str(item_def_id); + let item_name = tcx.def_path_str(item_def_id); let msg = format!("required by `{}`", item_name); if let Some(sp) = tcx.hir().span_if_local(item_def_id) { @@ -1642,10 +1691,10 @@ impl ArgKind { ty::Tuple(ref tys) => ArgKind::Tuple( span, tys.iter() - .map(|ty| ("_".to_owned(), ty.sty.to_string())) + .map(|ty| ("_".to_owned(), ty.to_string())) .collect::>() ), - _ => ArgKind::Arg("_".to_owned(), t.sty.to_string()), + _ => ArgKind::Arg("_".to_owned(), t.to_string()), } } } diff --git a/src/librustc/traits/fulfill.rs b/src/librustc/traits/fulfill.rs index bc091a4e7e084..8c684c0775ee3 100644 --- a/src/librustc/traits/fulfill.rs +++ b/src/librustc/traits/fulfill.rs @@ -1,29 +1,18 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use infer::InferCtxt; -use mir::interpret::{GlobalId, ErrorHandled}; -use ty::{self, Ty, TypeFoldable, ToPolyTraitRef, ToPredicate}; -use ty::error::ExpectedFound; +use crate::infer::InferCtxt; +use crate::mir::interpret::{GlobalId, ErrorHandled}; +use crate::ty::{self, Ty, TypeFoldable, ToPolyTraitRef}; +use crate::ty::error::ExpectedFound; use rustc_data_structures::obligation_forest::{DoCompleted, Error, ForestObligation}; use rustc_data_structures::obligation_forest::{ObligationForest, ObligationProcessor}; use rustc_data_structures::obligation_forest::{ProcessResult}; use std::marker::PhantomData; -use hir::def_id::DefId; use super::CodeAmbiguity; use super::CodeProjectionError; use super::CodeSelectionError; use super::engine::{TraitEngine, TraitEngineExt}; use super::{FulfillmentError, FulfillmentErrorCode}; -use super::{ObligationCause, PredicateObligation, Obligation}; +use super::{ObligationCause, PredicateObligation}; use super::project; use super::select::SelectionContext; use super::{Unimplemented, ConstEvalFailure}; @@ -34,7 +23,7 @@ impl<'tcx> ForestObligation for PendingPredicateObligation<'tcx> { fn as_predicate(&self) -> &Self::Predicate { &self.obligation.predicate } } -/// The fulfillment context is used to drive trait resolution. It +/// The fulfillment context is used to drive trait resolution. It /// consists of a list of obligations that must be (eventually) /// satisfied. The job is to track which are satisfied, which yielded /// errors, and which are still pending. At any point, users can call @@ -61,6 +50,16 @@ pub struct FulfillmentContext<'tcx> { // type-lives-for-region constraints, and because the type // is well-formed, the constraints should hold. register_region_obligations: bool, + // Is it OK to register obligations into this infcx inside + // an infcx snapshot? + // + // The "primary fulfillment" in many cases in typeck lives + // outside of any snapshot, so any use of it inside a snapshot + // will lead to trouble and therefore is checked against, but + // other fulfillment contexts sometimes do live inside of + // a snapshot (they don't *straddle* a snapshot, so there + // is no trouble there). + usable_in_snapshot: bool } #[derive(Clone, Debug)] @@ -74,14 +73,24 @@ impl<'a, 'gcx, 'tcx> FulfillmentContext<'tcx> { pub fn new() -> FulfillmentContext<'tcx> { FulfillmentContext { predicates: ObligationForest::new(), - register_region_obligations: true + register_region_obligations: true, + usable_in_snapshot: false, + } + } + + pub fn new_in_snapshot() -> FulfillmentContext<'tcx> { + FulfillmentContext { + predicates: ObligationForest::new(), + register_region_obligations: true, + usable_in_snapshot: true, } } pub fn new_ignoring_regions() -> FulfillmentContext<'tcx> { FulfillmentContext { predicates: ObligationForest::new(), - register_region_obligations: false + register_region_obligations: false, + usable_in_snapshot: false } } @@ -131,7 +140,7 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> { /// creating a fresh type variable `$0` as well as a projection /// predicate `::X == $0`. When the /// inference engine runs, it will attempt to find an impl of - /// `SomeTrait` or a where clause that lets us unify `$0` with + /// `SomeTrait` or a where-clause that lets us unify `$0` with /// something concrete. If this fails, we'll unify `$0` with /// `projection_ty` again. fn normalize_projection_type<'a, 'gcx>(&mut self, @@ -163,28 +172,6 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> { normalized_ty } - /// Requires that `ty` must implement the trait with `def_id` in - /// the given environment. This trait must not have any type - /// parameters (except for `Self`). - fn register_bound<'a, 'gcx>(&mut self, - infcx: &InferCtxt<'a, 'gcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - ty: Ty<'tcx>, - def_id: DefId, - cause: ObligationCause<'tcx>) - { - let trait_ref = ty::TraitRef { - def_id, - substs: infcx.tcx.mk_substs_trait(ty, &[]), - }; - self.register_predicate_obligation(infcx, Obligation { - cause, - recursion_depth: 0, - param_env, - predicate: trait_ref.to_predicate() - }); - } - fn register_predicate_obligation<'a, 'gcx>(&mut self, infcx: &InferCtxt<'a, 'gcx, 'tcx>, obligation: PredicateObligation<'tcx>) @@ -195,7 +182,7 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> { debug!("register_predicate_obligation(obligation={:?})", obligation); - assert!(!infcx.is_in_snapshot()); + assert!(!infcx.is_in_snapshot() || self.usable_in_snapshot); self.predicates.register_obligation(PendingPredicateObligation { obligation, @@ -203,9 +190,10 @@ impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> { }); } - fn select_all_or_error<'a, 'gcx>(&mut self, - infcx: &InferCtxt<'a, 'gcx, 'tcx>) - -> Result<(),Vec>> + fn select_all_or_error<'a, 'gcx>( + &mut self, + infcx: &InferCtxt<'a, 'gcx, 'tcx> + ) -> Result<(),Vec>> { self.select_where_possible(infcx)?; @@ -287,6 +275,8 @@ impl<'a, 'b, 'gcx, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'gcx, self.selcx.infcx().resolve_type_vars_if_possible(&obligation.predicate); } + debug!("process_obligation: obligation = {:?}", obligation); + match obligation.predicate { ty::Predicate::Trait(ref data) => { let trait_obligation = obligation.with(data.clone()); @@ -294,7 +284,7 @@ impl<'a, 'b, 'gcx, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'gcx, if data.is_global() { // no type variables present, can use evaluation for better caching. // FIXME: consider caching errors too. - if self.selcx.infcx().predicate_must_hold(&obligation) { + if self.selcx.infcx().predicate_must_hold_considering_regions(&obligation) { debug!("selecting trait `{:?}` at depth {} evaluated to holds", data, obligation.recursion_depth); return ProcessResult::Changed(vec![]) @@ -523,7 +513,7 @@ impl<'a, 'b, 'gcx, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'gcx, } } -/// Return the set of type variables contained in a trait ref +/// Returns the set of type variables contained in a trait ref fn trait_ref_type_vars<'a, 'gcx, 'tcx>(selcx: &mut SelectionContext<'a, 'gcx, 'tcx>, t: ty::PolyTraitRef<'tcx>) -> Vec> { diff --git a/src/librustc/traits/mod.rs b/src/librustc/traits/mod.rs index cf37c3fceba8f..b875bfdfa9fa4 100644 --- a/src/librustc/traits/mod.rs +++ b/src/librustc/traits/mod.rs @@ -1,19 +1,10 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Trait Resolution. See the [rustc guide] for more information on how this works. //! //! [rustc guide]: https://rust-lang.github.io/rustc-guide/traits/resolution.html #[allow(dead_code)] pub mod auto_trait; +mod chalk_fulfill; mod coherence; pub mod error_reporting; mod engine; @@ -29,20 +20,20 @@ mod util; pub mod query; use chalk_engine; -use hir; -use hir::def_id::DefId; -use infer::{InferCtxt, SuppressRegionErrors}; -use infer::outlives::env::OutlivesEnvironment; -use middle::region; -use mir::interpret::ErrorHandled; -use rustc_data_structures::sync::Lrc; +use crate::hir; +use crate::hir::def_id::DefId; +use crate::infer::{InferCtxt, SuppressRegionErrors}; +use crate::infer::outlives::env::OutlivesEnvironment; +use crate::middle::region; +use crate::mir::interpret::ErrorHandled; +use rustc_macros::HashStable; use syntax::ast; use syntax_pos::{Span, DUMMY_SP}; -use ty::subst::Substs; -use ty::{self, AdtKind, List, Ty, TyCtxt, GenericParamDefKind, ToPredicate}; -use ty::error::{ExpectedFound, TypeError}; -use ty::fold::{TypeFolder, TypeFoldable, TypeVisitor}; -use util::common::ErrorReported; +use crate::ty::subst::{InternalSubsts, SubstsRef}; +use crate::ty::{self, AdtKind, List, Ty, TyCtxt, GenericParamDefKind, ToPredicate}; +use crate::ty::error::{ExpectedFound, TypeError}; +use crate::ty::fold::{TypeFolder, TypeFoldable, TypeVisitor}; +use crate::util::common::ErrorReported; use std::fmt::Debug; use std::rc::Rc; @@ -52,7 +43,8 @@ pub use self::FulfillmentErrorCode::*; pub use self::Vtable::*; pub use self::ObligationCauseCode::*; -pub use self::coherence::{orphan_check, overlapping_impls, OrphanCheckErr, OverlapResult}; +pub use self::coherence::{add_placeholder_note, orphan_check, overlapping_impls}; +pub use self::coherence::{OrphanCheckErr, OverlapResult}; pub use self::fulfill::{FulfillmentContext, PendingPredicateObligation}; pub use self::project::MismatchedProjectionTypes; pub use self::project::{normalize, normalize_projection_type, poly_project_and_unify_type}; @@ -71,19 +63,24 @@ pub use self::util::{elaborate_predicates, elaborate_trait_ref, elaborate_trait_ pub use self::util::{supertraits, supertrait_def_ids, transitive_bounds, Supertraits, SupertraitDefIds}; +pub use self::chalk_fulfill::{ + CanonicalGoal as ChalkCanonicalGoal, + FulfillmentContext as ChalkFulfillmentContext +}; + pub use self::ObligationCauseCode::*; pub use self::FulfillmentErrorCode::*; pub use self::SelectionError::*; pub use self::Vtable::*; -// Whether to enable bug compatibility with issue #43355 +/// Whether to enable bug compatibility with issue #43355. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum IntercrateMode { Issue43355, Fixed } -// The mode that trait queries run in +/// The mode that trait queries run in. #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum TraitQueryMode { // Standard/un-canonicalized queries get accurate @@ -96,46 +93,46 @@ pub enum TraitQueryMode { Canonical, } -/// An `Obligation` represents some trait reference (e.g., `int:Eq`) for -/// which the vtable must be found. The process of finding a vtable is +/// An `Obligation` represents some trait reference (e.g., `int: Eq`) for +/// which the vtable must be found. The process of finding a vtable is /// called "resolving" the `Obligation`. This process consists of /// either identifying an `impl` (e.g., `impl Eq for int`) that /// provides the required vtable, or else finding a bound that is in /// scope. The eventual result is usually a `Selection` (defined below). #[derive(Clone, PartialEq, Eq, Hash)] pub struct Obligation<'tcx, T> { - /// Why do we have to prove this thing? + /// The reason we have to prove this thing. pub cause: ObligationCause<'tcx>, - /// In which environment should we prove this thing? + /// The environment in which we should prove this thing. pub param_env: ty::ParamEnv<'tcx>, - /// What are we trying to prove? + /// The thing we are trying to prove. pub predicate: T, /// If we started proving this as a result of trying to prove /// something else, track the total depth to ensure termination. /// If this goes over a certain threshold, we abort compilation -- /// in such cases, we can not say whether or not the predicate - /// holds for certain. Stupid halting problem. Such a drag. + /// holds for certain. Stupid halting problem; such a drag. pub recursion_depth: usize, } pub type PredicateObligation<'tcx> = Obligation<'tcx, ty::Predicate<'tcx>>; pub type TraitObligation<'tcx> = Obligation<'tcx, ty::PolyTraitPredicate<'tcx>>; -/// Why did we incur this obligation? Used for error reporting. +/// The reason why we incurred this obligation; used for error reporting. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct ObligationCause<'tcx> { pub span: Span, - // The id of the fn body that triggered this obligation. This is - // used for region obligations to determine the precise - // environment in which the region obligation should be evaluated - // (in particular, closures can add new assumptions). See the - // field `region_obligations` of the `FulfillmentContext` for more - // information. - pub body_id: ast::NodeId, + /// The ID of the fn body that triggered this obligation. This is + /// used for region obligations to determine the precise + /// environment in which the region obligation should be evaluated + /// (in particular, closures can add new assumptions). See the + /// field `region_obligations` of the `FulfillmentContext` for more + /// information. + pub body_id: hir::HirId, pub code: ObligationCauseCode<'tcx> } @@ -148,6 +145,7 @@ impl<'tcx> ObligationCause<'tcx> { ObligationCauseCode::StartFunctionType => { tcx.sess.source_map().def_span(self.span) } + ObligationCauseCode::MatchExpressionArm { arm_span, .. } => arm_span, _ => self.span, } } @@ -155,20 +153,20 @@ impl<'tcx> ObligationCause<'tcx> { #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum ObligationCauseCode<'tcx> { - /// Not well classified or should be obvious from span. + /// Not well classified or should be obvious from the span. MiscObligation, - /// A slice or array is WF only if `T: Sized` + /// A slice or array is WF only if `T: Sized`. SliceOrArrayElem, - /// A tuple is WF only if its middle elements are Sized + /// A tuple is WF only if its middle elements are `Sized`. TupleElem, - /// This is the trait reference from the given projection + /// This is the trait reference from the given projection. ProjectionWf(ty::ProjectionTy<'tcx>), - /// In an impl of trait X for type Y, type Y must - /// also implement all supertraits of X. + /// In an impl of trait `X` for type `Y`, type `Y` must + /// also implement all supertraits of `X`. ItemObligation(DefId), /// A type like `&'a T` is WF only if `T: 'a`. @@ -223,11 +221,23 @@ pub enum ObligationCauseCode<'tcx> { ExprAssignable, /// Computing common supertype in the arms of a match expression - MatchExpressionArm { arm_span: Span, - source: hir::MatchSource }, + MatchExpressionArm { + arm_span: Span, + source: hir::MatchSource, + prior_arms: Vec, + last_ty: Ty<'tcx>, + discrim_hir_id: hir::HirId, + }, + + /// Computing common supertype in the pattern guard for the arms of a match expression + MatchExpressionArmPattern { span: Span, ty: Ty<'tcx> }, /// Computing common supertype in an if expression - IfExpression, + IfExpression { + then: Span, + outer: Option, + semicolon: Option, + }, /// Computing common supertype of an if expression with no else counter-part IfExpressionWithNoElse, @@ -248,10 +258,10 @@ pub enum ObligationCauseCode<'tcx> { ReturnNoExpression, /// `return` with an expression - ReturnType(ast::NodeId), + ReturnType(hir::HirId), /// Block implicit return - BlockTailExpression(ast::NodeId), + BlockTailExpression(hir::HirId), /// #[feature(trivial_bounds)] is not enabled TrivialBound, @@ -265,7 +275,7 @@ pub struct DerivedObligationCause<'tcx> { /// directly. parent_trait_ref: ty::PolyTraitRef<'tcx>, - /// The parent trait had this cause + /// The parent trait had this cause. parent_code: Rc> } @@ -274,18 +284,18 @@ pub type PredicateObligations<'tcx> = Vec>; pub type TraitObligations<'tcx> = Vec>; /// The following types: -/// * `WhereClause` -/// * `WellFormed` -/// * `FromEnv` -/// * `DomainGoal` -/// * `Goal` -/// * `Clause` -/// * `Environment` -/// * `InEnvironment` +/// * `WhereClause`, +/// * `WellFormed`, +/// * `FromEnv`, +/// * `DomainGoal`, +/// * `Goal`, +/// * `Clause`, +/// * `Environment`, +/// * `InEnvironment`, /// are used for representing the trait system in the form of /// logic programming clauses. They are part of the interface /// for the chalk SLG solver. -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable)] pub enum WhereClause<'tcx> { Implemented(ty::TraitPredicate<'tcx>), ProjectionEq(ty::ProjectionPredicate<'tcx>), @@ -293,19 +303,19 @@ pub enum WhereClause<'tcx> { TypeOutlives(ty::TypeOutlivesPredicate<'tcx>), } -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable)] pub enum WellFormed<'tcx> { Trait(ty::TraitPredicate<'tcx>), Ty(Ty<'tcx>), } -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable)] pub enum FromEnv<'tcx> { Trait(ty::TraitPredicate<'tcx>), Ty(Ty<'tcx>), } -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable)] pub enum DomainGoal<'tcx> { Holds(WhereClause<'tcx>), WellFormed(WellFormed<'tcx>), @@ -315,19 +325,20 @@ pub enum DomainGoal<'tcx> { pub type PolyDomainGoal<'tcx> = ty::Binder>; -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable)] pub enum QuantifierKind { Universal, Existential, } -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable)] pub enum GoalKind<'tcx> { Implies(Clauses<'tcx>, Goal<'tcx>), And(Goal<'tcx>, Goal<'tcx>), Not(Goal<'tcx>), DomainGoal(DomainGoal<'tcx>), Quantified(QuantifierKind, ty::Binder>), + Subtype(Ty<'tcx>, Ty<'tcx>), CannotProve, } @@ -350,9 +361,9 @@ impl<'tcx> DomainGoal<'tcx> { } impl<'tcx> GoalKind<'tcx> { - pub fn from_poly_domain_goal<'a>( + pub fn from_poly_domain_goal<'a, 'gcx>( domain_goal: PolyDomainGoal<'tcx>, - tcx: TyCtxt<'a, 'tcx, 'tcx>, + tcx: TyCtxt<'a, 'gcx, 'tcx>, ) -> GoalKind<'tcx> { match domain_goal.no_bound_vars() { Some(p) => p.into_goal(), @@ -366,7 +377,7 @@ impl<'tcx> GoalKind<'tcx> { /// This matches the definition from Page 7 of "A Proof Procedure for the Logic of Hereditary /// Harrop Formulas". -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable)] pub enum Clause<'tcx> { Implies(ProgramClause<'tcx>), ForAll(ty::Binder>), @@ -390,19 +401,19 @@ pub type Clauses<'tcx> = &'tcx List>; /// it with the reverse implication operator `:-` to emphasize the way /// that programs are actually solved (via backchaining, which starts /// with the goal to solve and proceeds from there). -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable)] pub struct ProgramClause<'tcx> { - /// This goal will be considered true... + /// This goal will be considered true ... pub goal: DomainGoal<'tcx>, - /// ...if we can prove these hypotheses (there may be no hypotheses at all): + /// ... if we can prove these hypotheses (there may be no hypotheses at all): pub hypotheses: Goals<'tcx>, /// Useful for filtering clauses. pub category: ProgramClauseCategory, } -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable)] pub enum ProgramClauseCategory { ImpliedBound, WellFormed, @@ -410,7 +421,7 @@ pub enum ProgramClauseCategory { } /// A set of clauses that we assume to be true. -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable)] pub struct Environment<'tcx> { pub clauses: Clauses<'tcx>, } @@ -425,7 +436,7 @@ impl Environment<'tcx> { } /// Something (usually a goal), along with an environment. -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable)] pub struct InEnvironment<'tcx, G> { pub environment: Environment<'tcx>, pub goal: G, @@ -478,7 +489,6 @@ pub type SelectionResult<'tcx, T> = Result, SelectionError<'tcx>>; /// For example, the vtable may be tied to a specific impl (case A), /// or it may be relative to some bound that is in scope (case B). /// -/// /// ``` /// impl Clone for Option { ... } // Impl_1 /// impl Clone for Box { ... } // Impl_2 @@ -505,12 +515,12 @@ pub type SelectionResult<'tcx, T> = Result, SelectionError<'tcx>>; /// ### The type parameter `N` /// /// See explanation on `VtableImplData`. -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, HashStable)] pub enum Vtable<'tcx, N> { /// Vtable identifying a particular impl. VtableImpl(VtableImplData<'tcx, N>), - /// Vtable for auto trait implementations + /// Vtable for auto trait implementations. /// This carries the information and nested obligations with regards /// to an auto implementation for a trait `Trait`. The nested obligations /// ensure the trait implementation holds for all the constituent types. @@ -522,18 +532,18 @@ pub enum Vtable<'tcx, N> { /// any). VtableParam(Vec), - /// Virtual calls through an object + /// Virtual calls through an object. VtableObject(VtableObjectData<'tcx, N>), /// Successful resolution for a builtin trait. VtableBuiltin(VtableBuiltinData), - /// Vtable automatically generated for a closure. The def ID is the ID + /// Vtable automatically generated for a closure. The `DefId` is the ID /// of the closure expression. This is a `VtableImpl` in spirit, but the /// impl is generated by the compiler and does not appear in the source. VtableClosure(VtableClosureData<'tcx, N>), - /// Same as above, but for a fn pointer type with the given signature. + /// Same as above, but for a function pointer type with the given signature. VtableFnPointer(VtableFnPointerData<'tcx, N>), /// Vtable automatically generated for a generator. @@ -553,14 +563,14 @@ pub enum Vtable<'tcx, N> { /// is `Obligation`, as one might expect. During codegen, however, this /// is `()`, because codegen only requires a shallow resolution of an /// impl, and nested obligations are satisfied later. -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, HashStable)] pub struct VtableImplData<'tcx, N> { pub impl_def_id: DefId, - pub substs: &'tcx Substs<'tcx>, + pub substs: SubstsRef<'tcx>, pub nested: Vec } -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, HashStable)] pub struct VtableGeneratorData<'tcx, N> { pub generator_def_id: DefId, pub substs: ty::GeneratorSubsts<'tcx>, @@ -569,7 +579,7 @@ pub struct VtableGeneratorData<'tcx, N> { pub nested: Vec } -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, HashStable)] pub struct VtableClosureData<'tcx, N> { pub closure_def_id: DefId, pub substs: ty::ClosureSubsts<'tcx>, @@ -578,20 +588,20 @@ pub struct VtableClosureData<'tcx, N> { pub nested: Vec } -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, HashStable)] pub struct VtableAutoImplData { pub trait_def_id: DefId, pub nested: Vec } -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, HashStable)] pub struct VtableBuiltinData { pub nested: Vec } /// A vtable for some object-safe trait `Foo` automatically derived /// for the object type `Foo`. -#[derive(PartialEq, Eq, Clone, RustcEncodable, RustcDecodable)] +#[derive(PartialEq, Eq, Clone, RustcEncodable, RustcDecodable, HashStable)] pub struct VtableObjectData<'tcx, N> { /// `Foo` upcast to the obligation trait. This will be some supertrait of `Foo`. pub upcast_trait_ref: ty::PolyTraitRef<'tcx>, @@ -604,16 +614,16 @@ pub struct VtableObjectData<'tcx, N> { pub nested: Vec, } -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, HashStable)] pub struct VtableFnPointerData<'tcx, N> { pub fn_ty: Ty<'tcx>, pub nested: Vec } -#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable)] +#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, HashStable)] pub struct VtableTraitAliasData<'tcx, N> { pub alias_def_id: DefId, - pub substs: &'tcx Substs<'tcx>, + pub substs: SubstsRef<'tcx>, pub nested: Vec, } @@ -631,16 +641,16 @@ pub fn predicates_for_generics<'tcx>(cause: ObligationCause<'tcx>, /// `bound` or is not known to meet bound (note that this is /// conservative towards *no impl*, which is the opposite of the /// `evaluate` methods). -pub fn type_known_to_meet_bound<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - ty: Ty<'tcx>, - def_id: DefId, - span: Span) --> bool -{ - debug!("type_known_to_meet_bound(ty={:?}, bound={:?})", +pub fn type_known_to_meet_bound_modulo_regions<'a, 'gcx, 'tcx>( + infcx: &InferCtxt<'a, 'gcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + ty: Ty<'tcx>, + def_id: DefId, + span: Span, +) -> bool { + debug!("type_known_to_meet_bound_modulo_regions(ty={:?}, bound={:?})", ty, - infcx.tcx.item_path_str(def_id)); + infcx.tcx.def_path_str(def_id)); let trait_ref = ty::TraitRef { def_id, @@ -648,14 +658,14 @@ pub fn type_known_to_meet_bound<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx }; let obligation = Obligation { param_env, - cause: ObligationCause::misc(span, ast::DUMMY_NODE_ID), + cause: ObligationCause::misc(span, hir::DUMMY_HIR_ID), recursion_depth: 0, predicate: trait_ref.to_predicate(), }; - let result = infcx.predicate_must_hold(&obligation); + let result = infcx.predicate_must_hold_modulo_regions(&obligation); debug!("type_known_to_meet_ty={:?} bound={} => {:?}", - ty, infcx.tcx.item_path_str(def_id), result); + ty, infcx.tcx.def_path_str(def_id), result); if result && (ty.has_infer_types() || ty.has_closure_types()) { // Because of inference "guessing", selection can sometimes claim @@ -671,7 +681,7 @@ pub fn type_known_to_meet_bound<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx // We can use a dummy node-id here because we won't pay any mind // to region obligations that arise (there shouldn't really be any // anyhow). - let cause = ObligationCause::misc(span, ast::DUMMY_NODE_ID); + let cause = ObligationCause::misc(span, hir::DUMMY_HIR_ID); fulfill_cx.register_bound(infcx, param_env, ty, def_id, cause); @@ -680,15 +690,15 @@ pub fn type_known_to_meet_bound<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx // assume it is move; linear is always ok. match fulfill_cx.select_all_or_error(infcx) { Ok(()) => { - debug!("type_known_to_meet_bound: ty={:?} bound={} success", + debug!("type_known_to_meet_bound_modulo_regions: ty={:?} bound={} success", ty, - infcx.tcx.item_path_str(def_id)); + infcx.tcx.def_path_str(def_id)); true } Err(e) => { - debug!("type_known_to_meet_bound: ty={:?} bound={} errors={:?}", + debug!("type_known_to_meet_bound_modulo_regions: ty={:?} bound={} errors={:?}", ty, - infcx.tcx.item_path_str(def_id), + infcx.tcx.def_path_str(def_id), e); false } @@ -814,8 +824,11 @@ pub fn normalize_param_env_or_error<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, debug!("normalize_param_env_or_error: elaborated-predicates={:?}", predicates); - let elaborated_env = ty::ParamEnv::new(tcx.intern_predicates(&predicates), - unnormalized_env.reveal); + let elaborated_env = ty::ParamEnv::new( + tcx.intern_predicates(&predicates), + unnormalized_env.reveal, + unnormalized_env.def_id + ); // HACK: we are trying to normalize the param-env inside *itself*. The problem is that // normalization expects its param-env to be already normalized, which means we have @@ -862,8 +875,11 @@ pub fn normalize_param_env_or_error<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // predicates here anyway. Keeping them here anyway because it seems safer. let outlives_env: Vec<_> = non_outlives_predicates.iter().chain(&outlives_predicates).cloned().collect(); - let outlives_env = ty::ParamEnv::new(tcx.intern_predicates(&outlives_env), - unnormalized_env.reveal); + let outlives_env = ty::ParamEnv::new( + tcx.intern_predicates(&outlives_env), + unnormalized_env.reveal, + None + ); let outlives_predicates = match do_normalize_predicates(tcx, region_context, cause, outlives_env, outlives_predicates) { @@ -879,7 +895,11 @@ pub fn normalize_param_env_or_error<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let mut predicates = non_outlives_predicates; predicates.extend(outlives_predicates); debug!("normalize_param_env_or_error: final predicates={:?}", predicates); - ty::ParamEnv::new(tcx.intern_predicates(&predicates), unnormalized_env.reveal) + ty::ParamEnv::new( + tcx.intern_predicates(&predicates), + unnormalized_env.reveal, + unnormalized_env.def_id + ) } pub fn fully_normalize<'a, 'gcx, 'tcx, T>( @@ -944,7 +964,7 @@ fn normalize_and_test_predicates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } fn substitute_normalize_and_test_predicates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - key: (DefId, &'tcx Substs<'tcx>)) + key: (DefId, SubstsRef<'tcx>)) -> bool { debug!("substitute_normalize_and_test_predicates(key={:?})", @@ -964,16 +984,16 @@ fn substitute_normalize_and_test_predicates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx fn vtable_methods<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_ref: ty::PolyTraitRef<'tcx>) - -> Lrc)>>> + -> &'tcx [Option<(DefId, SubstsRef<'tcx>)>] { debug!("vtable_methods({:?})", trait_ref); - Lrc::new( + tcx.arena.alloc_from_iter( supertraits(tcx, trait_ref).flat_map(move |trait_ref| { let trait_methods = tcx.associated_items(trait_ref.def_id()) .filter(|item| item.kind == ty::AssociatedKind::Method); - // Now list each method's DefId and Substs (for within its trait). + // Now list each method's DefId and InternalSubsts (for within its trait). // If the method can never be called from this object, produce None. trait_methods.map(move |trait_method| { debug!("vtable_methods: trait_method={:?}", trait_method); @@ -988,10 +1008,11 @@ fn vtable_methods<'a, 'tcx>( // the method may have some early-bound lifetimes, add // regions for those let substs = trait_ref.map_bound(|trait_ref| - Substs::for_item(tcx, def_id, |param, _| + InternalSubsts::for_item(tcx, def_id, |param, _| match param.kind { GenericParamDefKind::Lifetime => tcx.types.re_erased.into(), - GenericParamDefKind::Type {..} => { + GenericParamDefKind::Type { .. } | + GenericParamDefKind::Const => { trait_ref.substs[param.index as usize] } } @@ -1018,7 +1039,7 @@ fn vtable_methods<'a, 'tcx>( Some((def_id, substs)) }) - }).collect() + }) ) } @@ -1041,7 +1062,7 @@ impl<'tcx,O> Obligation<'tcx,O> { } pub fn misc(span: Span, - body_id: ast::NodeId, + body_id: hir::HirId, param_env: ty::ParamEnv<'tcx>, trait_ref: O) -> Obligation<'tcx, O> { @@ -1059,18 +1080,18 @@ impl<'tcx,O> Obligation<'tcx,O> { impl<'tcx> ObligationCause<'tcx> { #[inline] pub fn new(span: Span, - body_id: ast::NodeId, + body_id: hir::HirId, code: ObligationCauseCode<'tcx>) -> ObligationCause<'tcx> { - ObligationCause { span: span, body_id: body_id, code: code } + ObligationCause { span, body_id, code } } - pub fn misc(span: Span, body_id: ast::NodeId) -> ObligationCause<'tcx> { - ObligationCause { span: span, body_id: body_id, code: MiscObligation } + pub fn misc(span: Span, body_id: hir::HirId) -> ObligationCause<'tcx> { + ObligationCause { span, body_id, code: MiscObligation } } pub fn dummy() -> ObligationCause<'tcx> { - ObligationCause { span: DUMMY_SP, body_id: ast::CRATE_NODE_ID, code: MiscObligation } + ObligationCause { span: DUMMY_SP, body_id: hir::CRATE_HIR_ID, code: MiscObligation } } } @@ -1174,14 +1195,26 @@ where ) -> bool; } -pub trait ExClauseLift<'tcx> +pub trait ChalkContextLift<'tcx> where Self: chalk_engine::context::Context + Clone, { type LiftedExClause: Debug + 'tcx; + type LiftedDelayedLiteral: Debug + 'tcx; + type LiftedLiteral: Debug + 'tcx; fn lift_ex_clause_to_tcx<'a, 'gcx>( ex_clause: &chalk_engine::ExClause, tcx: TyCtxt<'a, 'gcx, 'tcx>, ) -> Option; + + fn lift_delayed_literal_to_tcx<'a, 'gcx>( + ex_clause: &chalk_engine::DelayedLiteral, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + ) -> Option; + + fn lift_literal_to_tcx<'a, 'gcx>( + ex_clause: &chalk_engine::Literal, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + ) -> Option; } diff --git a/src/librustc/traits/object_safety.rs b/src/librustc/traits/object_safety.rs index 4b2f817cfa91a..1c8ea5c7b9c5b 100644 --- a/src/librustc/traits/object_safety.rs +++ b/src/librustc/traits/object_safety.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! "Object safety" refers to the ability for a trait to be converted //! to an object. In general, traits may only be converted to an //! object if all of their methods meet certain criteria. In particular, @@ -16,15 +6,16 @@ //! - have a suitable receiver from which we can extract a vtable and coerce to a "thin" version //! that doesn't contain the vtable; //! - not reference the erased type `Self` except for in this receiver; -//! - not have generic type parameters +//! - not have generic type parameters. use super::elaborate_predicates; -use hir::def_id::DefId; -use lint; -use traits::{self, Obligation, ObligationCause}; -use ty::{self, Ty, TyCtxt, TypeFoldable, Predicate, ToPredicate}; -use ty::subst::{Subst, Substs}; +use crate::hir; +use crate::hir::def_id::DefId; +use crate::lint; +use crate::traits::{self, Obligation, ObligationCause}; +use crate::ty::{self, Ty, TyCtxt, TypeFoldable, Predicate, ToPredicate}; +use crate::ty::subst::{Subst, InternalSubsts}; use std::borrow::Cow; use std::iter::{self}; use syntax::ast::{self, Name}; @@ -32,17 +23,17 @@ use syntax_pos::Span; #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum ObjectSafetyViolation { - /// Self : Sized declared on the trait + /// `Self: Sized` declared on the trait. SizedSelf, /// Supertrait reference references `Self` an in illegal location - /// (e.g., `trait Foo : Bar`) + /// (e.g., `trait Foo : Bar`). SupertraitSelf, - /// Method has something illegal + /// Method has something illegal. Method(ast::Name, MethodViolationCode), - /// Associated const + /// Associated const. AssociatedConst(ast::Name), } @@ -94,7 +85,7 @@ pub enum MethodViolationCode { impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { /// Returns the object safety violations that affect - /// astconv - currently, Self in supertraits. This is needed + /// astconv -- currently, `Self` in supertraits. This is needed /// because `object_safety_violations` can't be used during /// type collection. pub fn astconv_object_safety_violations(self, trait_def_id: DefId) @@ -139,10 +130,10 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { // It's also hard to get a use site span, so we use the method definition span. self.lint_node_note( lint::builtin::WHERE_CLAUSES_OBJECT_SAFETY, - ast::CRATE_NODE_ID, + hir::CRATE_HIR_ID, *span, &format!("the trait `{}` cannot be made into an object", - self.item_path_str(trait_def_id)), + self.def_path_str(trait_def_id)), &violation.error_msg()); false } else { @@ -190,7 +181,26 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { // In the case of a trait predicate, we can skip the "self" type. data.skip_binder().input_types().skip(1).any(|t| t.has_self_ty()) } - ty::Predicate::Projection(..) | + ty::Predicate::Projection(ref data) => { + // And similarly for projections. This should be redundant with + // the previous check because any projection should have a + // matching `Trait` predicate with the same inputs, but we do + // the check to be safe. + // + // Note that we *do* allow projection *outputs* to contain + // `self` (i.e., `trait Foo: Bar { type Result; }`), + // we just require the user to specify *both* outputs + // in the object type (i.e., `dyn Foo`). + // + // This is ALT2 in issue #56288, see that for discussion of the + // possible alternatives. + data.skip_binder() + .projection_ty + .trait_ref(self) + .input_types() + .skip(1) + .any(|t| t.has_self_ty()) + } ty::Predicate::WellFormed(..) | ty::Predicate::ObjectSafe(..) | ty::Predicate::TypeOutlives(..) | @@ -243,6 +253,7 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { method: &ty::AssociatedItem) -> Option { + debug!("object_safety_violation_for_method({:?}, {:?})", trait_def_id, method); // Any method that has a `Self : Sized` requisite is otherwise // exempt from the regulations. if self.generics_require_sized_self(method.def_id) { @@ -261,6 +272,7 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { method: &ty::AssociatedItem) -> bool { + debug!("is_vtable_safe_method({:?}, {:?})", trait_def_id, method); // Any method that has a `Self : Sized` requisite can't be called. if self.generics_require_sized_self(method.def_id) { return false; @@ -298,7 +310,8 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { } // We can't monomorphize things like `fn foo(...)`. - if self.generics_of(method.def_id).own_counts().types != 0 { + let own_counts = self.generics_of(method.def_id).own_counts(); + if own_counts.types + own_counts.consts != 0 { return Some(MethodViolationCode::Generic); } @@ -330,7 +343,7 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { } else { // sanity check to make sure the receiver actually has the layout of a pointer - use ty::layout::Abi; + use crate::ty::layout::Abi; let param_env = self.param_env(method.def_id); @@ -350,7 +363,15 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { match abi_of_ty(unit_receiver_ty) { &Abi::Scalar(..) => (), - abi => bug!("Receiver when Self = () should have a Scalar ABI, found {:?}", abi) + abi => { + self.sess.delay_span_bug( + self.def_span(method.def_id), + &format!( + "Receiver when Self = () should have a Scalar ABI, found {:?}", + abi + ), + ); + } } let trait_object_ty = self.object_ty_for_trait( @@ -364,10 +385,15 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { match abi_of_ty(trait_object_receiver) { &Abi::ScalarPair(..) => (), - abi => bug!( - "Receiver when Self = {} should have a ScalarPair ABI, found {:?}", - trait_object_ty, abi - ) + abi => { + self.sess.delay_span_bug( + self.def_span(method.def_id), + &format!( + "Receiver when Self = {} should have a ScalarPair ABI, found {:?}", + trait_object_ty, abi + ), + ); + } } } } @@ -375,12 +401,13 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { None } - /// performs a type substitution to produce the version of receiver_ty when `Self = self_ty` - /// e.g., for receiver_ty = `Rc` and self_ty = `Foo`, returns `Rc` + /// Performs a type substitution to produce the version of receiver_ty when `Self = self_ty` + /// e.g., for receiver_ty = `Rc` and self_ty = `Foo`, returns `Rc`. fn receiver_for_self_ty( self, receiver_ty: Ty<'tcx>, self_ty: Ty<'tcx>, method_def_id: DefId ) -> Ty<'tcx> { - let substs = Substs::for_item(self, method_def_id, |param, _| { + debug!("receiver_for_self_ty({:?}, {:?}, {:?})", receiver_ty, self_ty, method_def_id); + let substs = InternalSubsts::for_item(self, method_def_id, |param, _| { if param.index == 0 { self_ty.into() } else { @@ -388,12 +415,15 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { } }); - receiver_ty.subst(self, substs) + let result = receiver_ty.subst(self, substs); + debug!("receiver_for_self_ty({:?}, {:?}, {:?}) = {:?}", + receiver_ty, self_ty, method_def_id, result); + result } - /// creates the object type for the current trait. For example, + /// Creates the object type for the current trait. For example, /// if the current trait is `Deref`, then this will be - /// `dyn Deref + 'static` + /// `dyn Deref + 'static`. fn object_ty_for_trait(self, trait_def_id: DefId, lifetime: ty::Region<'tcx>) -> Ty<'tcx> { debug!("object_ty_for_trait: trait_def_id={:?}", trait_def_id); @@ -404,18 +434,26 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { ); let mut associated_types = traits::supertraits(self, ty::Binder::dummy(trait_ref)) - .flat_map(|trait_ref| self.associated_items(trait_ref.def_id())) - .filter(|item| item.kind == ty::AssociatedKind::Type) + .flat_map(|super_trait_ref| { + self.associated_items(super_trait_ref.def_id()) + .map(move |item| (super_trait_ref, item)) + }) + .filter(|(_, item)| item.kind == ty::AssociatedKind::Type) .collect::>(); // existential predicates need to be in a specific order - associated_types.sort_by_cached_key(|item| self.def_path_hash(item.def_id)); - - let projection_predicates = associated_types.into_iter().map(|item| { + associated_types.sort_by_cached_key(|(_, item)| self.def_path_hash(item.def_id)); + + let projection_predicates = associated_types.into_iter().map(|(super_trait_ref, item)| { + // We *can* get bound lifetimes here in cases like + // `trait MyTrait: for<'s> OtherTrait<&'s T, Output=bool>`. + // + // binder moved to (*)... + let super_trait_ref = super_trait_ref.skip_binder(); ty::ExistentialPredicate::Projection(ty::ExistentialProjection { - ty: self.mk_projection(item.def_id, trait_ref.substs), + ty: self.mk_projection(item.def_id, super_trait_ref.substs), item_def_id: item.def_id, - substs: trait_ref.substs, + substs: super_trait_ref.substs, }) }); @@ -424,7 +462,8 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { ); let object_ty = self.mk_dynamic( - ty::Binder::dummy(existential_predicates), + // (*) ... binder re-introduced here + ty::Binder::bind(existential_predicates), lifetime, ); @@ -433,25 +472,27 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { object_ty } - /// checks the method's receiver (the `self` argument) can be dispatched on when `Self` is a + /// Checks the method's receiver (the `self` argument) can be dispatched on when `Self` is a /// trait object. We require that `DispatchableFromDyn` be implemented for the receiver type /// in the following way: - /// - let `Receiver` be the type of the `self` argument, i.e `Self`, `&Self`, `Rc` + /// - let `Receiver` be the type of the `self` argument, i.e `Self`, `&Self`, `Rc`, /// - require the following bound: /// - /// Receiver[Self => T]: DispatchFromDyn dyn Trait]> + /// ``` + /// Receiver[Self => T]: DispatchFromDyn dyn Trait]> + /// ``` /// - /// where `Foo[X => Y]` means "the same type as `Foo`, but with `X` replaced with `Y`" + /// where `Foo[X => Y]` means "the same type as `Foo`, but with `X` replaced with `Y`" /// (substitution notation). /// - /// some examples of receiver types and their required obligation - /// - `&'a mut self` requires `&'a mut Self: DispatchFromDyn<&'a mut dyn Trait>` - /// - `self: Rc` requires `Rc: DispatchFromDyn>` - /// - `self: Pin>` requires `Pin>: DispatchFromDyn>>` + /// Some examples of receiver types and their required obligation: + /// - `&'a mut self` requires `&'a mut Self: DispatchFromDyn<&'a mut dyn Trait>`, + /// - `self: Rc` requires `Rc: DispatchFromDyn>`, + /// - `self: Pin>` requires `Pin>: DispatchFromDyn>>`. /// /// The only case where the receiver is not dispatchable, but is still a valid receiver /// type (just not object-safe), is when there is more than one level of pointer indirection. - /// e.g., `self: &&Self`, `self: &Rc`, `self: Box>`. In these cases, there + /// E.g., `self: &&Self`, `self: &Rc`, `self: Box>`. In these cases, there /// is no way, or at least no inexpensive way, to coerce the receiver from the version where /// `Self = dyn Trait` to the version where `Self = T`, where `T` is the unknown erased type /// contained by the trait object, because the object that needs to be coerced is behind @@ -519,13 +560,17 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { // U: Trait let trait_predicate = { - let substs = Substs::for_item(self, method.container.assert_trait(), |param, _| { - if param.index == 0 { - unsized_self_ty.into() - } else { - self.mk_param_from_def(param) - } - }); + let substs = InternalSubsts::for_item( + self, + method.container.assert_trait(), + |param, _| { + if param.index == 0 { + unsized_self_ty.into() + } else { + self.mk_param_from_def(param) + } + }, + ); ty::TraitRef { def_id: unsize_did, @@ -559,7 +604,7 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { self.infer_ctxt().enter(|ref infcx| { // the receiver is dispatchable iff the obligation holds - infcx.predicate_must_hold(&obligation) + infcx.predicate_must_hold_modulo_regions(&obligation) }) } diff --git a/src/librustc/traits/on_unimplemented.rs b/src/librustc/traits/on_unimplemented.rs index dcbddc0308091..2b286ee1b97fb 100644 --- a/src/librustc/traits/on_unimplemented.rs +++ b/src/librustc/traits/on_unimplemented.rs @@ -1,19 +1,9 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use fmt_macros::{Parser, Piece, Position}; -use hir::def_id::DefId; -use ty::{self, TyCtxt, GenericParamDefKind}; -use util::common::ErrorReported; -use util::nodemap::FxHashMap; +use crate::hir::def_id::DefId; +use crate::ty::{self, TyCtxt, GenericParamDefKind}; +use crate::util::common::ErrorReported; +use crate::util::nodemap::FxHashMap; use syntax::ast::{MetaItem, NestedMetaItem}; use syntax::attr; @@ -117,7 +107,7 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { { if let Some(items) = item.meta_item_list() { if let Ok(subcommand) = - Self::parse(tcx, trait_def_id, &items, item.span, false) + Self::parse(tcx, trait_def_id, &items, item.span(), false) { subcommands.push(subcommand); } else { @@ -128,7 +118,7 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { } // nothing found - parse_error(tcx, item.span, + parse_error(tcx, item.span(), "this attribute must have a valid value", "expected value here", Some(r#"eg `#[rustc_on_unimplemented(message="foo")]`"#)); @@ -167,10 +157,7 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { note: None, })) } else { - return Err(parse_error(tcx, attr.span, - "`#[rustc_on_unimplemented]` requires a value", - "value required here", - Some(r#"eg `#[rustc_on_unimplemented(message="foo")]`"#))); + return Err(ErrorReported); }; debug!("of_item({:?}/{:?}) = {:?}", trait_def_id, impl_def_id, result); result @@ -190,10 +177,12 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedDirective { for command in self.subcommands.iter().chain(Some(self)).rev() { if let Some(ref condition) = command.condition { if !attr::eval_condition(condition, &tcx.sess.parse_sess, &mut |c| { - options.contains(&( - c.name().as_str().to_string(), - c.value_str().map(|s| s.as_str().to_string()) - )) + c.ident().map_or(false, |ident| { + options.contains(&( + ident.to_string(), + c.value_str().map(|s| s.as_str().to_string()) + )) + }) }) { debug!("evaluate: skipping {:?} due to condition", command); continue @@ -244,7 +233,7 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedFormatString { { let name = tcx.item_name(trait_def_id); let generics = tcx.generics_of(trait_def_id); - let parser = Parser::new(&self.0, None); + let parser = Parser::new(&self.0, None, vec![], false); let mut result = Ok(()); for token in parser { match token { @@ -289,11 +278,12 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedFormatString { -> String { let name = tcx.item_name(trait_ref.def_id); - let trait_str = tcx.item_path_str(trait_ref.def_id); + let trait_str = tcx.def_path_str(trait_ref.def_id); let generics = tcx.generics_of(trait_ref.def_id); let generic_map = generics.params.iter().filter_map(|param| { let value = match param.kind { - GenericParamDefKind::Type {..} => { + GenericParamDefKind::Type { .. } | + GenericParamDefKind::Const => { trait_ref.substs[param.index as usize].to_string() }, GenericParamDefKind::Lifetime => return None @@ -303,7 +293,7 @@ impl<'a, 'gcx, 'tcx> OnUnimplementedFormatString { }).collect::>(); let empty_string = String::new(); - let parser = Parser::new(&self.0, None); + let parser = Parser::new(&self.0, None, vec![], false); parser.map(|p| match p { Piece::String(s) => s, diff --git a/src/librustc/traits/project.rs b/src/librustc/traits/project.rs index 5717a76f1cf0f..360e2323b647d 100644 --- a/src/librustc/traits/project.rs +++ b/src/librustc/traits/project.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Code for projecting associated types out of trait references. use super::elaborate_predicates; @@ -22,21 +12,21 @@ use super::SelectionError; use super::{VtableImplData, VtableClosureData, VtableGeneratorData, VtableFnPointerData}; use super::util; -use hir::def_id::DefId; -use infer::{InferCtxt, InferOk}; -use infer::type_variable::TypeVariableOrigin; -use mir::interpret::ConstValue; -use mir::interpret::{GlobalId}; +use crate::hir::def_id::DefId; +use crate::infer::{InferCtxt, InferOk, LateBoundRegionConversionTime}; +use crate::infer::type_variable::TypeVariableOrigin; +use crate::mir::interpret::{GlobalId, ConstValue}; use rustc_data_structures::snapshot_map::{Snapshot, SnapshotMap}; +use rustc_macros::HashStable; use syntax::ast::Ident; -use ty::subst::{Subst, Substs}; -use ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt}; -use ty::fold::{TypeFoldable, TypeFolder}; -use util::common::FN_OUTPUT_NAME; +use crate::ty::subst::{Subst, InternalSubsts}; +use crate::ty::{self, ToPredicate, ToPolyTraitRef, Ty, TyCtxt}; +use crate::ty::fold::{TypeFoldable, TypeFolder}; +use crate::util::common::FN_OUTPUT_NAME; /// Depending on the stage of compilation, we want projection to be /// more or less conservative. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, HashStable)] pub enum Reveal { /// At type-checking time, we refuse to project any associated /// type that is marked `default`. Non-`default` ("final") types @@ -66,7 +56,7 @@ pub enum Reveal { /// Also, `impl Trait` is normalized to the concrete type, /// which has to be already collected by type-checking. /// - /// NOTE: As `impl Trait`'s concrete type should *never* + /// NOTE: as `impl Trait`'s concrete type should *never* /// be observable directly by the user, `Reveal::All` /// should not be used by checks which may expose /// type equality or type contents to the user. @@ -206,24 +196,11 @@ pub fn poly_project_and_unify_type<'cx, 'gcx, 'tcx>( let (placeholder_predicate, placeholder_map) = infcx.replace_bound_vars_with_placeholders(&obligation.predicate); - let skol_obligation = obligation.with(placeholder_predicate); - let r = match project_and_unify_type(selcx, &skol_obligation) { - Ok(result) => { - let span = obligation.cause.span; - match infcx.leak_check(false, span, &placeholder_map, snapshot) { - Ok(()) => Ok(infcx.plug_leaks(placeholder_map, snapshot, result)), - Err(e) => { - debug!("poly_project_and_unify_type: leak check encountered error {:?}", e); - Err(MismatchedProjectionTypes { err: e }) - } - } - } - Err(e) => { - Err(e) - } - }; - - r + let placeholder_obligation = obligation.with(placeholder_predicate); + let result = project_and_unify_type(selcx, &placeholder_obligation)?; + infcx.leak_check(false, &placeholder_map, snapshot) + .map_err(|err| MismatchedProjectionTypes { err })?; + Ok(result) }) } @@ -425,7 +402,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for AssociatedTypeNormalizer<'a, let tcx = self.selcx.tcx().global_tcx(); if let Some(param_env) = self.tcx().lift_to_global(&self.param_env) { if substs.needs_infer() || substs.has_placeholders() { - let identity_substs = Substs::identity_for_item(tcx, def_id); + let identity_substs = InternalSubsts::identity_for_item(tcx, def_id); let instance = ty::Instance::resolve(tcx, param_env, def_id, identity_substs); if let Some(instance) = instance { let cid = GlobalId { @@ -433,8 +410,10 @@ impl<'a, 'b, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for AssociatedTypeNormalizer<'a, promoted: None }; if let Ok(evaluated) = tcx.const_eval(param_env.and(cid)) { - let evaluated = evaluated.subst(self.tcx(), substs); - return self.fold_const(evaluated); + let substs = tcx.lift_to_global(&substs).unwrap(); + let evaluated = tcx.mk_const(evaluated); + let evaluated = evaluated.subst(tcx, substs); + return evaluated; } } } else { @@ -446,7 +425,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for AssociatedTypeNormalizer<'a, promoted: None }; if let Ok(evaluated) = tcx.const_eval(param_env.and(cid)) { - return self.fold_const(evaluated) + return tcx.mk_const(evaluated); } } } @@ -777,9 +756,9 @@ fn prune_cache_value_obligations<'a, 'gcx, 'tcx>(infcx: &'a InferCtxt<'a, 'gcx, /// /// Concern #2. Even within the snapshot, if those original /// obligations are not yet proven, then we are able to do projections -/// that may yet turn out to be wrong. This *may* lead to some sort +/// that may yet turn out to be wrong. This *may* lead to some sort /// of trouble, though we don't have a concrete example of how that -/// can occur yet. But it seems risky at best. +/// can occur yet. But it seems risky at best. fn get_paranoid_cache_value_obligation<'a, 'gcx, 'tcx>( infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, @@ -812,7 +791,7 @@ fn get_paranoid_cache_value_obligation<'a, 'gcx, 'tcx>( /// cycles to arise, where you basically had a setup like ` /// as Trait>::Foo == $0`. Here, normalizing ` as /// Trait>::Foo> to `[type error]` would lead to an obligation of -/// ` as Trait>::Foo`. We are supposed to report +/// ` as Trait>::Foo`. We are supposed to report /// an error for this obligation, but we legitimately should not, /// because it contains `[type error]`. Yuck! (See issue #29857 for /// one case where this arose.) @@ -870,7 +849,7 @@ impl<'tcx> Progress<'tcx> { } } -/// Compute the result of a projection type (if we can). +/// Computes the result of a projection type (if we can). /// /// IMPORTANT: /// - `obligation` must be fully normalized @@ -1453,17 +1432,24 @@ fn confirm_callable_candidate<'cx, 'gcx, 'tcx>( fn confirm_param_env_candidate<'cx, 'gcx, 'tcx>( selcx: &mut SelectionContext<'cx, 'gcx, 'tcx>, obligation: &ProjectionTyObligation<'tcx>, - poly_projection: ty::PolyProjectionPredicate<'tcx>) - -> Progress<'tcx> -{ + poly_cache_entry: ty::PolyProjectionPredicate<'tcx>, +) -> Progress<'tcx> { let infcx = selcx.infcx(); - let cause = obligation.cause.clone(); + let cause = &obligation.cause; let param_env = obligation.param_env; - let trait_ref = obligation.predicate.trait_ref(infcx.tcx); - match infcx.match_poly_projection_predicate(cause, param_env, poly_projection, trait_ref) { - Ok(InferOk { value: ty_match, obligations }) => { + + let (cache_entry, _) = + infcx.replace_bound_vars_with_fresh_vars( + cause.span, + LateBoundRegionConversionTime::HigherRankedType, + &poly_cache_entry); + + let cache_trait_ref = cache_entry.projection_ty.trait_ref(infcx.tcx); + let obligation_trait_ref = obligation.predicate.trait_ref(infcx.tcx); + match infcx.at(cause, param_env).eq(cache_trait_ref, obligation_trait_ref) { + Ok(InferOk { value: _, obligations }) => { Progress { - ty: ty_match.value, + ty: cache_entry.ty, obligations, } } @@ -1473,7 +1459,7 @@ fn confirm_param_env_candidate<'cx, 'gcx, 'tcx>( "Failed to unify obligation `{:?}` \ with poly_projection `{:?}`: {:?}", obligation, - poly_projection, + poly_cache_entry, e); } } @@ -1506,7 +1492,7 @@ fn confirm_impl_candidate<'cx, 'gcx, 'tcx>( } let substs = translate_substs(selcx.infcx(), param_env, impl_def_id, substs, assoc_ty.node); let ty = if let ty::AssociatedKind::Existential = assoc_ty.item.kind { - let item_substs = Substs::identity_for_item(tcx, assoc_ty.item.def_id); + let item_substs = InternalSubsts::identity_for_item(tcx, assoc_ty.item.def_id); tcx.mk_opaque(assoc_ty.item.def_id, item_substs) } else { tcx.type_of(assoc_ty.item.def_id) @@ -1564,14 +1550,14 @@ fn assoc_ty_def<'cx, 'gcx, 'tcx>( // should have failed in astconv. bug!("No associated type `{}` for {}", assoc_ty_name, - tcx.item_path_str(impl_def_id)) + tcx.def_path_str(impl_def_id)) } } // # Cache /// The projection cache. Unlike the standard caches, this can include -/// infcx-dependent type variables - therefore, we have to roll the +/// infcx-dependent type variables, therefore we have to roll the /// cache back each time we roll a snapshot back, to avoid assumptions /// on yet-unresolved inference variables. Types with placeholder /// regions also have to be removed when the respective snapshot ends. @@ -1582,9 +1568,9 @@ fn assoc_ty_def<'cx, 'gcx, 'tcx>( /// (for the lifetime of the infcx). /// /// Entries in the projection cache might contain inference variables -/// that will be resolved by obligations on the projection cache entry - e.g. +/// that will be resolved by obligations on the projection cache entry (e.g., /// when a type parameter in the associated type is constrained through -/// an "RFC 447" projection on the impl. +/// an "RFC 447" projection on the impl). /// /// When working with a fulfillment context, the derived obligations of each /// projection cache entry will be registered on the fulfillcx, so any users @@ -1596,10 +1582,9 @@ fn assoc_ty_def<'cx, 'gcx, 'tcx>( /// If that is done, after evaluation the obligations, it is a good idea to /// call `ProjectionCache::complete` to make sure the obligations won't be /// re-evaluated and avoid an exponential worst-case. -/// -/// FIXME: we probably also want some sort of cross-infcx cache here to -/// reduce the amount of duplication. Let's see what we get with the Chalk -/// reforms. +// +// FIXME: we probably also want some sort of cross-infcx cache here to +// reduce the amount of duplication. Let's see what we get with the Chalk reforms. #[derive(Default)] pub struct ProjectionCache<'tcx> { map: SnapshotMap, ProjectionCacheEntry<'tcx>>, diff --git a/src/librustc/traits/query/dropck_outlives.rs b/src/librustc/traits/query/dropck_outlives.rs index b8bf0fcc15307..e6f9c7ebe6fe4 100644 --- a/src/librustc/traits/query/dropck_outlives.rs +++ b/src/librustc/traits/query/dropck_outlives.rs @@ -1,20 +1,10 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use infer::at::At; -use infer::InferOk; -use infer::canonical::OriginalQueryValues; +use crate::infer::at::At; +use crate::infer::InferOk; +use crate::infer::canonical::OriginalQueryValues; use std::iter::FromIterator; use syntax::source_map::Span; -use ty::subst::Kind; -use ty::{self, Ty, TyCtxt}; +use crate::ty::subst::Kind; +use crate::ty::{self, Ty, TyCtxt}; impl<'cx, 'gcx, 'tcx> At<'cx, 'gcx, 'tcx> { /// Given a type `ty` of some value being dropped, computes a set @@ -55,8 +45,8 @@ impl<'cx, 'gcx, 'tcx> At<'cx, 'gcx, 'tcx> { let c_ty = self.infcx.canonicalize_query(&self.param_env.and(ty), &mut orig_values); let span = self.cause.span; debug!("c_ty = {:?}", c_ty); - match &gcx.dropck_outlives(c_ty) { - Ok(result) if result.is_proven() => { + if let Ok(result) = &gcx.dropck_outlives(c_ty) { + if result.is_proven() { if let Ok(InferOk { value, obligations }) = self.infcx.instantiate_query_response_and_region_obligations( self.cause, @@ -72,8 +62,6 @@ impl<'cx, 'gcx, 'tcx> At<'cx, 'gcx, 'tcx> { }; } } - - _ => { /* fallthrough to error-handling code below */ } } // Errors and ambiuity in dropck occur in two cases: @@ -82,10 +70,11 @@ impl<'cx, 'gcx, 'tcx> At<'cx, 'gcx, 'tcx> { // Either of these should have created an error before. tcx.sess .delay_span_bug(span, "dtorck encountered internal error"); - return InferOk { + + InferOk { value: vec![], obligations: vec![], - }; + } } } @@ -102,7 +91,7 @@ impl<'tcx> DropckOutlivesResult<'tcx> { span: Span, ty: Ty<'tcx>, ) { - for overflow_ty in self.overflows.iter().take(1) { + if let Some(overflow_ty) = self.overflows.iter().next() { let mut err = struct_span_err!( tcx.sess, span, @@ -195,7 +184,7 @@ impl_stable_hash_for!(struct DtorckConstraint<'tcx> { /// outlive. This is similar but not *quite* the same as the /// `needs_drop` test in the compiler already -- that is, for every /// type T for which this function return true, needs-drop would -/// return false. But the reverse does not hold: in particular, +/// return `false`. But the reverse does not hold: in particular, /// `needs_drop` returns false for `PhantomData`, but it is not /// trivial for dropck-outlives. /// @@ -228,7 +217,7 @@ pub fn trivial_dropck_outlives<'tcx>(tcx: TyCtxt<'_, '_, 'tcx>, ty: Ty<'tcx>) -> // (T1..Tn) and closures have same properties as T1..Tn -- // check if *any* of those are trivial. - ty::Tuple(ref tys) => tys.iter().cloned().all(|t| trivial_dropck_outlives(tcx, t)), + ty::Tuple(ref tys) => tys.iter().all(|t| trivial_dropck_outlives(tcx, t)), ty::Closure(def_id, ref substs) => substs .upvar_tys(def_id, tcx) .all(|t| trivial_dropck_outlives(tcx, t)), diff --git a/src/librustc/traits/query/evaluate_obligation.rs b/src/librustc/traits/query/evaluate_obligation.rs index ca11c5f5a0874..d5230f15c2565 100644 --- a/src/librustc/traits/query/evaluate_obligation.rs +++ b/src/librustc/traits/query/evaluate_obligation.rs @@ -1,16 +1,6 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use infer::InferCtxt; -use infer::canonical::OriginalQueryValues; -use traits::{EvaluationResult, PredicateObligation, SelectionContext, +use crate::infer::InferCtxt; +use crate::infer::canonical::OriginalQueryValues; +use crate::traits::{EvaluationResult, PredicateObligation, SelectionContext, TraitQueryMode, OverflowError}; impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { @@ -26,11 +16,26 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { /// Evaluates whether the predicate can be satisfied in the given /// `ParamEnv`, and returns `false` if not certain. However, this is /// not entirely accurate if inference variables are involved. - pub fn predicate_must_hold( + /// + /// This version may conservatively fail when outlives obligations + /// are required. + pub fn predicate_must_hold_considering_regions( + &self, + obligation: &PredicateObligation<'tcx>, + ) -> bool { + self.evaluate_obligation_no_overflow(obligation).must_apply_considering_regions() + } + + /// Evaluates whether the predicate can be satisfied in the given + /// `ParamEnv`, and returns `false` if not certain. However, this is + /// not entirely accurate if inference variables are involved. + /// + /// This version ignores all outlives constraints. + pub fn predicate_must_hold_modulo_regions( &self, obligation: &PredicateObligation<'tcx>, ) -> bool { - self.evaluate_obligation_no_overflow(obligation) == EvaluationResult::EvaluatedToOk + self.evaluate_obligation_no_overflow(obligation).must_apply_modulo_regions() } /// Evaluate a given predicate, capturing overflow and propagating it back. diff --git a/src/librustc/traits/query/method_autoderef.rs b/src/librustc/traits/query/method_autoderef.rs new file mode 100644 index 0000000000000..6b9bdfd63f4d0 --- /dev/null +++ b/src/librustc/traits/query/method_autoderef.rs @@ -0,0 +1,45 @@ +use rustc_data_structures::sync::Lrc; +use crate::infer::canonical::{Canonical, QueryResponse}; +use crate::ty::Ty; + +#[derive(Debug)] +pub struct CandidateStep<'tcx> { + pub self_ty: Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>, + pub autoderefs: usize, + // true if the type results from a dereference of a raw pointer. + // when assembling candidates, we include these steps, but not when + // picking methods. This so that if we have `foo: *const Foo` and `Foo` has methods + // `fn by_raw_ptr(self: *const Self)` and `fn by_ref(&self)`, then + // `foo.by_raw_ptr()` will work and `foo.by_ref()` won't. + pub from_unsafe_deref: bool, + pub unsize: bool, +} + +#[derive(Clone, Debug)] +pub struct MethodAutoderefStepsResult<'tcx> { + /// The valid autoderef steps that could be find. + pub steps: Lrc>>, + /// If Some(T), a type autoderef reported an error on. + pub opt_bad_ty: Option>>, + /// If `true`, `steps` has been truncated due to reaching the + /// recursion limit. + pub reached_recursion_limit: bool, +} + +#[derive(Debug)] +pub struct MethodAutoderefBadTy<'tcx> { + pub reached_raw_pointer: bool, + pub ty: Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>, +} + +impl_stable_hash_for!(struct MethodAutoderefBadTy<'tcx> { + reached_raw_pointer, ty +}); + +impl_stable_hash_for!(struct MethodAutoderefStepsResult<'tcx> { + reached_recursion_limit, steps, opt_bad_ty +}); + +impl_stable_hash_for!(struct CandidateStep<'tcx> { + self_ty, autoderefs, from_unsafe_deref, unsize +}); diff --git a/src/librustc/traits/query/mod.rs b/src/librustc/traits/query/mod.rs index 13683d8544496..112a1d0e09c94 100644 --- a/src/librustc/traits/query/mod.rs +++ b/src/librustc/traits/query/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Experimental types for the trait query interface. The methods //! defined in this module are all based on **canonicalization**, //! which makes a canonical query by replacing unbound inference @@ -15,12 +5,13 @@ //! The providers for the queries defined here can be found in //! `librustc_traits`. -use infer::canonical::Canonical; -use ty::error::TypeError; -use ty::{self, Ty}; +use crate::infer::canonical::Canonical; +use crate::ty::error::TypeError; +use crate::ty::{self, Ty}; pub mod dropck_outlives; pub mod evaluate_obligation; +pub mod method_autoderef; pub mod normalize; pub mod normalize_erasing_regions; pub mod outlives_bounds; diff --git a/src/librustc/traits/query/normalize.rs b/src/librustc/traits/query/normalize.rs index 91b2ba301c312..9940249da8ba9 100644 --- a/src/librustc/traits/query/normalize.rs +++ b/src/librustc/traits/query/normalize.rs @@ -1,26 +1,16 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Code for the 'normalization' query. This consists of a wrapper //! which folds deeply, invoking the underlying //! `normalize_projection_ty` query when it encounters projections. -use infer::at::At; -use infer::canonical::OriginalQueryValues; -use infer::{InferCtxt, InferOk}; -use mir::interpret::{ConstValue, GlobalId}; -use traits::project::Normalized; -use traits::{Obligation, ObligationCause, PredicateObligation, Reveal}; -use ty::fold::{TypeFoldable, TypeFolder}; -use ty::subst::{Subst, Substs}; -use ty::{self, Ty, TyCtxt}; +use crate::infer::at::At; +use crate::infer::canonical::OriginalQueryValues; +use crate::infer::{InferCtxt, InferOk}; +use crate::mir::interpret::{GlobalId, ConstValue}; +use crate::traits::project::Normalized; +use crate::traits::{Obligation, ObligationCause, PredicateObligation, Reveal}; +use crate::ty::fold::{TypeFoldable, TypeFolder}; +use crate::ty::subst::{Subst, InternalSubsts}; +use crate::ty::{self, Ty, TyCtxt}; use super::NoSolution; @@ -34,7 +24,7 @@ impl<'cx, 'gcx, 'tcx> At<'cx, 'gcx, 'tcx> { /// the normalized value along with various outlives relations (in /// the form of obligations that must be discharged). /// - /// NB. This will *eventually* be the main means of + /// N.B., this will *eventually* be the main means of /// normalizing, but for now should be used only when we actually /// know that normalization will succeed, since error reporting /// and other details are still "under development". @@ -203,7 +193,7 @@ impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for QueryNormalizer<'cx, 'gcx, 'tcx let tcx = self.infcx.tcx.global_tcx(); if let Some(param_env) = self.tcx().lift_to_global(&self.param_env) { if substs.needs_infer() || substs.has_placeholders() { - let identity_substs = Substs::identity_for_item(tcx, def_id); + let identity_substs = InternalSubsts::identity_for_item(tcx, def_id); let instance = ty::Instance::resolve(tcx, param_env, def_id, identity_substs); if let Some(instance) = instance { let cid = GlobalId { @@ -211,8 +201,10 @@ impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for QueryNormalizer<'cx, 'gcx, 'tcx promoted: None, }; if let Ok(evaluated) = tcx.const_eval(param_env.and(cid)) { - let evaluated = evaluated.subst(self.tcx(), substs); - return self.fold_const(evaluated); + let substs = tcx.lift_to_global(&substs).unwrap(); + let evaluated = tcx.mk_const(evaluated); + let evaluated = evaluated.subst(tcx, substs); + return evaluated; } } } else { @@ -224,7 +216,7 @@ impl<'cx, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for QueryNormalizer<'cx, 'gcx, 'tcx promoted: None, }; if let Ok(evaluated) = tcx.const_eval(param_env.and(cid)) { - return self.fold_const(evaluated) + return tcx.mk_const(evaluated); } } } diff --git a/src/librustc/traits/query/normalize_erasing_regions.rs b/src/librustc/traits/query/normalize_erasing_regions.rs index 1cb96a3e33f43..0c1252680c1db 100644 --- a/src/librustc/traits/query/normalize_erasing_regions.rs +++ b/src/librustc/traits/query/normalize_erasing_regions.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Methods for normalizing when you don't care about regions (and //! aren't doing type inference). If either of those things don't //! apply to you, use `infcx.normalize(...)`. @@ -17,8 +7,8 @@ //! `normalize_ty_after_erasing_regions` query for each type found //! within. (This underlying query is what is cached.) -use ty::{self, Ty, TyCtxt}; -use ty::fold::{TypeFoldable, TypeFolder}; +use crate::ty::{self, Ty, TyCtxt}; +use crate::ty::fold::{TypeFoldable, TypeFolder}; impl<'cx, 'tcx> TyCtxt<'cx, 'tcx, 'tcx> { /// Erase the regions in `value` and then fully normalize all the @@ -55,7 +45,7 @@ impl<'cx, 'tcx> TyCtxt<'cx, 'tcx, 'tcx> { /// a `T` (with regions erased). This is appropriate when the /// binder is being instantiated at the call site. /// - /// NB. Currently, higher-ranked type bounds inhibit + /// N.B., currently, higher-ranked type bounds inhibit /// normalization. Therefore, each time we erase them in /// codegen, we need to normalize the contents. pub fn normalize_erasing_late_bound_regions( diff --git a/src/librustc/traits/query/outlives_bounds.rs b/src/librustc/traits/query/outlives_bounds.rs index b3fae3bab3471..954de15905fb7 100644 --- a/src/librustc/traits/query/outlives_bounds.rs +++ b/src/librustc/traits/query/outlives_bounds.rs @@ -1,29 +1,19 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use infer::InferCtxt; -use infer::canonical::OriginalQueryValues; -use syntax::ast; +use crate::infer::InferCtxt; +use crate::infer::canonical::OriginalQueryValues; +use crate::hir; use syntax::source_map::Span; -use traits::{FulfillmentContext, ObligationCause, TraitEngine, TraitEngineExt}; -use traits::query::NoSolution; -use ty::{self, Ty, TyCtxt}; +use crate::traits::{FulfillmentContext, ObligationCause, TraitEngine, TraitEngineExt}; +use crate::traits::query::NoSolution; +use crate::ty::{self, Ty, TyCtxt}; -use ich::StableHashingContext; +use crate::ich::StableHashingContext; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; use std::mem; /// Outlives bounds are relationships between generic parameters, /// whether they both be regions (`'a: 'b`) or whether types are -/// involved (`T: 'a`). These relationships can be extracted from the +/// involved (`T: 'a`). These relationships can be extracted from the /// full set of predicates we understand or also from types (in which /// case they are called implied bounds). They are fed to the /// `OutlivesEnv` which in turn is supplied to the region checker and @@ -76,7 +66,7 @@ impl<'a, 'tcx> HashStable> for OutlivesBound<'tcx> { impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { /// Implied bounds are region relationships that we deduce - /// automatically. The idea is that (e.g.) a caller must check that a + /// automatically. The idea is that (e.g.) a caller must check that a /// function's argument types are well-formed immediately before /// calling that fn, and hence the *callee* can assume that its /// argument types are well-formed. This may imply certain relationships @@ -99,7 +89,7 @@ impl<'cx, 'gcx, 'tcx> InferCtxt<'cx, 'gcx, 'tcx> { pub fn implied_outlives_bounds( &self, param_env: ty::ParamEnv<'tcx>, - body_id: ast::NodeId, + body_id: hir::HirId, ty: Ty<'tcx>, span: Span, ) -> Vec> { diff --git a/src/librustc/traits/query/type_op/ascribe_user_type.rs b/src/librustc/traits/query/type_op/ascribe_user_type.rs index 23445781eb2f3..d9f573eb7e291 100644 --- a/src/librustc/traits/query/type_op/ascribe_user_type.rs +++ b/src/librustc/traits/query/type_op/ascribe_user_type.rs @@ -1,38 +1,23 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResponse, QueryResponse}; -use traits::query::Fallible; -use hir::def_id::DefId; -use mir::ProjectionKind; -use ty::{self, ParamEnvAnd, Ty, TyCtxt}; -use ty::subst::UserSubsts; +use crate::infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResponse, QueryResponse}; +use crate::traits::query::Fallible; +use crate::hir::def_id::DefId; +use crate::ty::{ParamEnvAnd, Ty, TyCtxt}; +use crate::ty::subst::UserSubsts; #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub struct AscribeUserType<'tcx> { pub mir_ty: Ty<'tcx>, - pub variance: ty::Variance, pub def_id: DefId, pub user_substs: UserSubsts<'tcx>, - pub projs: &'tcx ty::List>, } impl<'tcx> AscribeUserType<'tcx> { pub fn new( mir_ty: Ty<'tcx>, - variance: ty::Variance, def_id: DefId, user_substs: UserSubsts<'tcx>, - projs: &'tcx ty::List>, ) -> Self { - AscribeUserType { mir_ty, variance, def_id, user_substs, projs } + Self { mir_ty, def_id, user_substs } } } @@ -62,19 +47,19 @@ impl<'gcx: 'tcx, 'tcx> super::QueryTypeOp<'gcx, 'tcx> for AscribeUserType<'tcx> BraceStructTypeFoldableImpl! { impl<'tcx> TypeFoldable<'tcx> for AscribeUserType<'tcx> { - mir_ty, variance, def_id, user_substs, projs + mir_ty, def_id, user_substs } } BraceStructLiftImpl! { impl<'a, 'tcx> Lift<'tcx> for AscribeUserType<'a> { type Lifted = AscribeUserType<'tcx>; - mir_ty, variance, def_id, user_substs, projs + mir_ty, def_id, user_substs } } impl_stable_hash_for! { struct AscribeUserType<'tcx> { - mir_ty, variance, def_id, user_substs, projs + mir_ty, def_id, user_substs } } diff --git a/src/librustc/traits/query/type_op/custom.rs b/src/librustc/traits/query/type_op/custom.rs index 54860dd0434a0..7e38282cc1adc 100644 --- a/src/librustc/traits/query/type_op/custom.rs +++ b/src/librustc/traits/query/type_op/custom.rs @@ -1,22 +1,12 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use infer::{InferCtxt, InferOk}; +use crate::infer::{InferCtxt, InferOk}; use std::fmt; -use traits::query::Fallible; +use crate::traits::query::Fallible; -use infer::canonical::query_response; -use infer::canonical::QueryRegionConstraint; +use crate::infer::canonical::query_response; +use crate::infer::canonical::QueryRegionConstraint; use std::rc::Rc; use syntax::source_map::DUMMY_SP; -use traits::{ObligationCause, TraitEngine, TraitEngineExt}; +use crate::traits::{ObligationCause, TraitEngine, TraitEngineExt}; pub struct CustomTypeOp { closure: F, diff --git a/src/librustc/traits/query/type_op/eq.rs b/src/librustc/traits/query/type_op/eq.rs index 43dea442df65a..5c3ccc9a99537 100644 --- a/src/librustc/traits/query/type_op/eq.rs +++ b/src/librustc/traits/query/type_op/eq.rs @@ -1,16 +1,6 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResponse, QueryResponse}; -use traits::query::Fallible; -use ty::{ParamEnvAnd, Ty, TyCtxt}; +use crate::infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResponse, QueryResponse}; +use crate::traits::query::Fallible; +use crate::ty::{ParamEnvAnd, Ty, TyCtxt}; #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub struct Eq<'tcx> { diff --git a/src/librustc/traits/query/type_op/implied_outlives_bounds.rs b/src/librustc/traits/query/type_op/implied_outlives_bounds.rs index d5233851db8c6..c48ca33b13fbc 100644 --- a/src/librustc/traits/query/type_op/implied_outlives_bounds.rs +++ b/src/librustc/traits/query/type_op/implied_outlives_bounds.rs @@ -1,17 +1,7 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResponse, QueryResponse}; -use traits::query::outlives_bounds::OutlivesBound; -use traits::query::Fallible; -use ty::{ParamEnvAnd, Ty, TyCtxt}; +use crate::infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResponse, QueryResponse}; +use crate::traits::query::outlives_bounds::OutlivesBound; +use crate::traits::query::Fallible; +use crate::ty::{ParamEnvAnd, Ty, TyCtxt}; #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub struct ImpliedOutlivesBounds<'tcx> { diff --git a/src/librustc/traits/query/type_op/mod.rs b/src/librustc/traits/query/type_op/mod.rs index f8f9650ebe1ae..fd13acc7796f8 100644 --- a/src/librustc/traits/query/type_op/mod.rs +++ b/src/librustc/traits/query/type_op/mod.rs @@ -1,24 +1,14 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use infer::canonical::{ +use crate::infer::canonical::{ Canonical, Canonicalized, CanonicalizedQueryResponse, OriginalQueryValues, QueryRegionConstraint, QueryResponse, }; -use infer::{InferCtxt, InferOk}; +use crate::infer::{InferCtxt, InferOk}; use std::fmt; use std::rc::Rc; -use traits::query::Fallible; -use traits::ObligationCause; -use ty::fold::TypeFoldable; -use ty::{Lift, ParamEnvAnd, TyCtxt}; +use crate::traits::query::Fallible; +use crate::traits::ObligationCause; +use crate::ty::fold::TypeFoldable; +use crate::ty::{Lift, ParamEnvAnd, TyCtxt}; pub mod ascribe_user_type; pub mod custom; diff --git a/src/librustc/traits/query/type_op/normalize.rs b/src/librustc/traits/query/type_op/normalize.rs index c45e8b2554b0f..e3d7a4d57a564 100644 --- a/src/librustc/traits/query/type_op/normalize.rs +++ b/src/librustc/traits/query/type_op/normalize.rs @@ -1,18 +1,8 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResponse, QueryResponse}; +use crate::infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResponse, QueryResponse}; use std::fmt; -use traits::query::Fallible; -use ty::fold::TypeFoldable; -use ty::{self, Lift, ParamEnvAnd, Ty, TyCtxt}; +use crate::traits::query::Fallible; +use crate::ty::fold::TypeFoldable; +use crate::ty::{self, Lift, ParamEnvAnd, Ty, TyCtxt}; #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub struct Normalize { @@ -62,7 +52,7 @@ pub trait Normalizable<'gcx, 'tcx>: fmt::Debug + TypeFoldable<'tcx> + Lift<'gcx> canonicalized: Canonicalized<'gcx, ParamEnvAnd<'tcx, Normalize>>, ) -> Fallible>; - /// Convert from the `'gcx` (lifted) form of `Self` into the `tcx` + /// Converts from the `'gcx` (lifted) form of `Self` into the `tcx` /// form of `Self`. fn shrink_to_tcx_lifetime( v: &'a CanonicalizedQueryResponse<'gcx, Self>, diff --git a/src/librustc/traits/query/type_op/outlives.rs b/src/librustc/traits/query/type_op/outlives.rs index cd7c6d76eab57..fc0c1c022fc80 100644 --- a/src/librustc/traits/query/type_op/outlives.rs +++ b/src/librustc/traits/query/type_op/outlives.rs @@ -1,18 +1,8 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResponse, QueryResponse}; -use traits::query::dropck_outlives::trivial_dropck_outlives; -use traits::query::dropck_outlives::DropckOutlivesResult; -use traits::query::Fallible; -use ty::{ParamEnvAnd, Ty, TyCtxt}; +use crate::infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResponse, QueryResponse}; +use crate::traits::query::dropck_outlives::trivial_dropck_outlives; +use crate::traits::query::dropck_outlives::DropckOutlivesResult; +use crate::traits::query::Fallible; +use crate::ty::{ParamEnvAnd, Ty, TyCtxt}; #[derive(Copy, Clone, Debug)] pub struct DropckOutlives<'tcx> { diff --git a/src/librustc/traits/query/type_op/prove_predicate.rs b/src/librustc/traits/query/type_op/prove_predicate.rs index 460ddb2a138b5..50dedf6e87f40 100644 --- a/src/librustc/traits/query/type_op/prove_predicate.rs +++ b/src/librustc/traits/query/type_op/prove_predicate.rs @@ -1,16 +1,6 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResponse, QueryResponse}; -use traits::query::Fallible; -use ty::{ParamEnvAnd, Predicate, TyCtxt}; +use crate::infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResponse, QueryResponse}; +use crate::traits::query::Fallible; +use crate::ty::{ParamEnvAnd, Predicate, TyCtxt}; #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub struct ProvePredicate<'tcx> { diff --git a/src/librustc/traits/query/type_op/subtype.rs b/src/librustc/traits/query/type_op/subtype.rs index 2b2939c644aa3..c45fb06313e16 100644 --- a/src/librustc/traits/query/type_op/subtype.rs +++ b/src/librustc/traits/query/type_op/subtype.rs @@ -1,16 +1,6 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResponse, QueryResponse}; -use traits::query::Fallible; -use ty::{ParamEnvAnd, Ty, TyCtxt}; +use crate::infer::canonical::{Canonical, Canonicalized, CanonicalizedQueryResponse, QueryResponse}; +use crate::traits::query::Fallible; +use crate::ty::{ParamEnvAnd, Ty, TyCtxt}; #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub struct Subtype<'tcx> { diff --git a/src/librustc/traits/select.rs b/src/librustc/traits/select.rs index d46389b0ee226..e7cc9618080c2 100644 --- a/src/librustc/traits/select.rs +++ b/src/librustc/traits/select.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Candidate selection. See the [rustc guide] for more information on how this works. //! //! [rustc guide]: https://rust-lang.github.io/rustc-guide/traits/resolution.html#selection @@ -37,27 +27,25 @@ use super::{ VtableGeneratorData, VtableImplData, VtableObjectData, VtableTraitAliasData, }; -use dep_graph::{DepKind, DepNodeIndex}; -use hir::def_id::DefId; -use infer; -use infer::{InferCtxt, InferOk, TypeFreshener}; -use middle::lang_items; -use mir::interpret::GlobalId; -use ty::fast_reject; -use ty::relate::{TypeRelation, TraitObjectMode}; -use ty::subst::{Subst, Substs}; -use ty::{self, ToPolyTraitRef, ToPredicate, Ty, TyCtxt, TypeFoldable}; - -use hir; +use crate::dep_graph::{DepKind, DepNodeIndex}; +use crate::hir::def_id::DefId; +use crate::infer::{CombinedSnapshot, InferCtxt, InferOk, PlaceholderMap, TypeFreshener}; +use crate::middle::lang_items; +use crate::mir::interpret::GlobalId; +use crate::ty::fast_reject; +use crate::ty::relate::TypeRelation; +use crate::ty::subst::{Subst, SubstsRef}; +use crate::ty::{self, ToPolyTraitRef, ToPredicate, Ty, TyCtxt, TypeFoldable}; + +use crate::hir; use rustc_data_structures::bit_set::GrowableBitSet; use rustc_data_structures::sync::Lock; use rustc_target::spec::abi::Abi; use std::cmp; -use std::fmt; +use std::fmt::{self, Display}; use std::iter; -use std::mem; use std::rc::Rc; -use util::nodemap::{FxHashMap, FxHashSet}; +use crate::util::nodemap::{FxHashMap, FxHashSet}; pub struct SelectionContext<'cx, 'gcx: 'cx + 'tcx, 'tcx: 'cx> { infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, @@ -115,7 +103,7 @@ impl IntercrateAmbiguityCause { /// See #23980 for details. pub fn add_intercrate_ambiguity_hint<'a, 'tcx>( &self, - err: &mut ::errors::DiagnosticBuilder<'_>, + err: &mut errors::DiagnosticBuilder<'_>, ) { err.note(&self.intercrate_ambiguity_hint()); } @@ -174,11 +162,11 @@ pub struct SelectionCache<'tcx> { } /// The selection process begins by considering all impls, where -/// clauses, and so forth that might resolve an obligation. Sometimes +/// clauses, and so forth that might resolve an obligation. Sometimes /// we'll be able to say definitively that (e.g.) an impl does not /// apply to the obligation: perhaps it is defined for `usize` but the /// obligation is for `int`. In that case, we drop the impl out of the -/// list. But the other cases are considered *candidates*. +/// list. But the other cases are considered *candidates*. /// /// For selection to succeed, there must be exactly one matching /// candidate. If the obligation is fully known, this is guaranteed @@ -338,15 +326,18 @@ enum BuiltinImplConditions<'tcx> { /// evaluations. /// /// The evaluation results are ordered: -/// - `EvaluatedToOk` implies `EvaluatedToAmbig` implies `EvaluatedToUnknown` +/// - `EvaluatedToOk` implies `EvaluatedToOkModuloRegions` +/// implies `EvaluatedToAmbig` implies `EvaluatedToUnknown` /// - `EvaluatedToErr` implies `EvaluatedToRecur` /// - the "union" of evaluation results is equal to their maximum - /// all the "potential success" candidates can potentially succeed, -/// so they are no-ops when unioned with a definite error, and within +/// so they are noops when unioned with a definite error, and within /// the categories it's easy to see that the unions are correct. pub enum EvaluationResult { /// Evaluation successful EvaluatedToOk, + /// Evaluation successful, but there were unevaluated region obligations + EvaluatedToOkModuloRegions, /// Evaluation is known to be ambiguous - it *might* hold for some /// assignment of inference variables, but it might not. /// @@ -392,27 +383,40 @@ pub enum EvaluationResult { /// ``` /// /// When we try to prove it, we first go the first option, which - /// recurses. This shows us that the impl is "useless" - it won't + /// recurses. This shows us that the impl is "useless" -- it won't /// tell us that `T: Trait` unless it already implemented `Trait` /// by some other means. However, that does not prevent `T: Trait` /// does not hold, because of the bound (which can indeed be satisfied /// by `SomeUnsizedType` from another crate). - /// - /// FIXME: when an `EvaluatedToRecur` goes past its parent root, we - /// ought to convert it to an `EvaluatedToErr`, because we know - /// there definitely isn't a proof tree for that obligation. Not - /// doing so is still sound - there isn't any proof tree, so the - /// branch still can't be a part of a minimal one - but does not - /// re-enable caching. + // + // FIXME: when an `EvaluatedToRecur` goes past its parent root, we + // ought to convert it to an `EvaluatedToErr`, because we know + // there definitely isn't a proof tree for that obligation. Not + // doing so is still sound -- there isn't any proof tree, so the + // branch still can't be a part of a minimal one -- but does not re-enable caching. EvaluatedToRecur, - /// Evaluation failed + /// Evaluation failed. EvaluatedToErr, } impl EvaluationResult { + /// Returns `true` if this evaluation result is known to apply, even + /// considering outlives constraints. + pub fn must_apply_considering_regions(self) -> bool { + self == EvaluatedToOk + } + + /// Returns `true` if this evaluation result is known to apply, ignoring + /// outlives constraints. + pub fn must_apply_modulo_regions(self) -> bool { + self <= EvaluatedToOkModuloRegions + } + pub fn may_apply(self) -> bool { match self { - EvaluatedToOk | EvaluatedToAmbig | EvaluatedToUnknown => true, + EvaluatedToOk | EvaluatedToOkModuloRegions | EvaluatedToAmbig | EvaluatedToUnknown => { + true + } EvaluatedToErr | EvaluatedToRecur => false, } @@ -422,13 +426,14 @@ impl EvaluationResult { match self { EvaluatedToUnknown | EvaluatedToRecur => true, - EvaluatedToOk | EvaluatedToAmbig | EvaluatedToErr => false, + EvaluatedToOk | EvaluatedToOkModuloRegions | EvaluatedToAmbig | EvaluatedToErr => false, } } } impl_stable_hash_for!(enum self::EvaluationResult { EvaluatedToOk, + EvaluatedToOkModuloRegions, EvaluatedToAmbig, EvaluatedToUnknown, EvaluatedToRecur, @@ -541,33 +546,6 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { self.infcx } - /// Wraps the inference context's in_snapshot s.t. snapshot handling is only from the selection - /// context's self. - fn in_snapshot(&mut self, f: F) -> R - where - F: FnOnce(&mut Self, &infer::CombinedSnapshot<'cx, 'tcx>) -> R, - { - self.infcx.in_snapshot(|snapshot| f(self, snapshot)) - } - - /// Wraps a probe s.t. obligations collected during it are ignored and old obligations are - /// retained. - fn probe(&mut self, f: F) -> R - where - F: FnOnce(&mut Self, &infer::CombinedSnapshot<'cx, 'tcx>) -> R, - { - self.infcx.probe(|snapshot| f(self, snapshot)) - } - - /// Wraps a commit_if_ok s.t. obligations collected during it are not returned in selection if - /// the transaction fails and s.t. old obligations are retained. - fn commit_if_ok(&mut self, f: F) -> Result - where - F: FnOnce(&mut Self, &infer::CombinedSnapshot<'cx, 'tcx>) -> Result, - { - self.infcx.commit_if_ok(|snapshot| f(self, snapshot)) - } - /////////////////////////////////////////////////////////////////////////// // Selection // @@ -649,8 +627,22 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { &mut self, obligation: &PredicateObligation<'tcx>, ) -> Result { - self.probe(|this, _| { - this.evaluate_predicate_recursively(TraitObligationStackList::empty(), obligation) + self.evaluation_probe(|this| { + this.evaluate_predicate_recursively(TraitObligationStackList::empty(), + obligation.clone()) + }) + } + + fn evaluation_probe( + &mut self, + op: impl FnOnce(&mut Self) -> Result, + ) -> Result { + self.infcx.probe(|snapshot| -> Result { + let result = op(self)?; + match self.infcx.region_constraints_added_in_snapshot(snapshot) { + None => Ok(result), + Some(_) => Ok(result.max(EvaluatedToOkModuloRegions)), + } }) } @@ -663,12 +655,12 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { predicates: I, ) -> Result where - I: IntoIterator>, + I: IntoIterator>, 'tcx: 'a, { let mut result = EvaluatedToOk; for obligation in predicates { - let eval = self.evaluate_predicate_recursively(stack, obligation)?; + let eval = self.evaluate_predicate_recursively(stack, obligation.clone())?; debug!( "evaluate_predicate_recursively({:?}) = {:?}", obligation, eval @@ -687,9 +679,19 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { fn evaluate_predicate_recursively<'o>( &mut self, previous_stack: TraitObligationStackList<'o, 'tcx>, - obligation: &PredicateObligation<'tcx>, + obligation: PredicateObligation<'tcx>, ) -> Result { - debug!("evaluate_predicate_recursively({:?})", obligation); + debug!("evaluate_predicate_recursively(previous_stack={:?}, obligation={:?})", + previous_stack.head(), obligation); + + // Previous_stack stores a TraitObligatiom, while 'obligation' is + // a PredicateObligation. These are distinct types, so we can't + // use any Option combinator method that would force them to be + // the same + match previous_stack.head() { + Some(h) => self.check_recursion_limit(&obligation, h.obligation)?, + None => self.check_recursion_limit(&obligation, &obligation)? + } match obligation.predicate { ty::Predicate::Trait(ref t) => { @@ -703,8 +705,9 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { match self.infcx .subtype_predicate(&obligation.cause, obligation.param_env, p) { - Some(Ok(InferOk { obligations, .. })) => { - self.evaluate_predicates_recursively(previous_stack, &obligations) + Some(Ok(InferOk { mut obligations, .. })) => { + self.add_depth(obligations.iter_mut(), obligation.recursion_depth); + self.evaluate_predicates_recursively(previous_stack,obligations.into_iter()) } Some(Err(_)) => Ok(EvaluatedToErr), None => Ok(EvaluatedToAmbig), @@ -718,98 +721,17 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ty, obligation.cause.span, ) { - Some(obligations) => { - self.evaluate_predicates_recursively(previous_stack, obligations.iter()) + Some(mut obligations) => { + self.add_depth(obligations.iter_mut(), obligation.recursion_depth); + self.evaluate_predicates_recursively(previous_stack, obligations.into_iter()) } None => Ok(EvaluatedToAmbig), }, - ty::Predicate::TypeOutlives(ref binder) => { - assert!(!binder.has_escaping_bound_vars()); - // Check if the type has higher-ranked vars. - if binder.skip_binder().0.has_escaping_bound_vars() { - // If so, this obligation is an error (for now). Eventually we should be - // able to support additional cases here, like `for<'a> &'a str: 'a`. - - // NOTE: this hack is implemented in both trait fulfillment and - // evaluation. If you fix it in one place, make sure you fix it - // in the other. - - // We don't want to allow this sort of reasoning in intercrate - // mode, for backwards-compatibility reasons. - if self.intercrate.is_some() { - Ok(EvaluatedToAmbig) - } else { - Ok(EvaluatedToErr) - } - } else { - // If the type has no late bound vars, then if we assign all - // the inference variables in it to be 'static, then the type - // will be 'static itself. - // - // Therefore, `staticize(T): 'a` holds for any `'a`, so this - // obligation is fulfilled. Because evaluation works with - // staticized types (yes I know this is involved with #21974), - // we are 100% OK here. - Ok(EvaluatedToOk) - } - } - - ty::Predicate::RegionOutlives(ref binder) => { - let ty::OutlivesPredicate(r_a, r_b) = binder.skip_binder(); - - if r_a == r_b { - // for<'a> 'a: 'a. OK - Ok(EvaluatedToOk) - } else if **r_a == ty::ReStatic { - // 'static: 'x always holds. - // - // This special case is handled somewhat inconsistently - if we - // have an inference variable that is supposed to be equal to - // `'static`, then we don't allow it to be equated to an LBR, - // but if we have a literal `'static`, then we *do*. - // - // This is actually consistent with how our region inference works. - // - // It would appear that this sort of inconsistency would - // cause "instability" problems with evaluation caching. However, - // evaluation caching is only for trait predicates, and when - // trait predicates create nested obligations, they contain - // inference variables for all the regions in the trait - the - // only way this codepath can be reached from trait predicate - // evaluation is when the user typed an explicit `where 'static: 'a` - // lifetime bound (in which case we want to return EvaluatedToOk). - // - // If we ever want to handle inference variables that might be - // equatable with ReStatic, we need to make sure we are not confused by - // technically-allowed-by-RFC-447-but-probably-should-not-be - // impls such as - // ```Rust - // impl<'a, 's, T> X<'s> for T where T: Debug + 'a, 'a: 's - // ``` - Ok(EvaluatedToOk) - } else if r_a.is_late_bound() || r_b.is_late_bound() { - // There is no current way to prove `for<'a> 'a: 'x` - // unless `'a = 'x`, because there are no bounds involving - // lifetimes. - - // It might be possible to prove `for<'a> 'x: 'a` by forcing `'x` - // to be `'static`. However, this is not currently done by type - // inference unless `'x` is literally ReStatic. See the comment - // above. - - // We don't want to allow this sort of reasoning in intercrate - // mode, for backwards-compatibility reasons. - if self.intercrate.is_some() { - Ok(EvaluatedToAmbig) - } else { - Ok(EvaluatedToErr) - } - } else { - // Relating 2 inference variable regions. These will - // always hold if our query is "staticized". - Ok(EvaluatedToOk) - } + ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) => { + // we do not consider region relationships when + // evaluating trait matches + Ok(EvaluatedToOkModuloRegions) } ty::Predicate::ObjectSafe(trait_def_id) => { @@ -823,10 +745,11 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ty::Predicate::Projection(ref data) => { let project_obligation = obligation.with(data.clone()); match project::poly_project_and_unify_type(self, &project_obligation) { - Ok(Some(subobligations)) => { + Ok(Some(mut subobligations)) => { + self.add_depth(subobligations.iter_mut(), obligation.recursion_depth); let result = self.evaluate_predicates_recursively( previous_stack, - subobligations.iter(), + subobligations.into_iter(), ); if let Some(key) = ProjectionCacheKey::from_poly_projection_predicate(self, data) @@ -1023,6 +946,11 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { { debug!("evaluate_stack({:?}) --> recursive", stack.fresh_trait_ref); + // Subtle: when checking for a coinductive cycle, we do + // not compare using the "freshened trait refs" (which + // have erased regions) but rather the fully explicit + // trait refs. This is important because it's only a cycle + // if the regions match exactly. let cycle = stack.iter().skip(1).take(rec_index + 1); let cycle = cycle.map(|stack| ty::Predicate::Trait(stack.obligation.predicate)); if self.coinductive_match(cycle) { @@ -1052,8 +980,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { /// that recursion is ok. This routine returns true if the top of the /// stack (`cycle[0]`): /// - /// - is a defaulted trait, and - /// - it also appears in the backtrace at some position `X`; and, + /// - is a defaulted trait, + /// - it also appears in the backtrace at some position `X`, /// - all the predicates at positions `X..` between `X` an the top are /// also defaulted traits. pub fn coinductive_match(&mut self, cycle: I) -> bool @@ -1074,7 +1002,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } /// Further evaluate `candidate` to decide whether all type parameters match and whether nested - /// obligations are met. Returns true if `candidate` remains viable after this further + /// obligations are met. Returns whether `candidate` remains viable after this further /// scrutiny. fn evaluate_candidate<'o>( &mut self, @@ -1085,12 +1013,12 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { "evaluate_candidate: depth={} candidate={:?}", stack.obligation.recursion_depth, candidate ); - let result = self.probe(|this, _| { + let result = self.evaluation_probe(|this| { let candidate = (*candidate).clone(); match this.confirm_candidate(stack.obligation, candidate) { Ok(selection) => this.evaluate_predicates_recursively( stack.list(), - selection.nested_obligations().iter(), + selection.nested_obligations().into_iter() ), Err(..) => Ok(EvaluatedToErr), } @@ -1165,6 +1093,45 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { .insert(trait_ref, WithDepNode::new(dep_node, result)); } + // For various reasons, it's possible for a subobligation + // to have a *lower* recursion_depth than the obligation used to create it. + // Projection sub-obligations may be returned from the projection cache, + // which results in obligations with an 'old' recursion_depth. + // Additionally, methods like ty::wf::obligations and + // InferCtxt.subtype_predicate produce subobligations without + // taking in a 'parent' depth, causing the generated subobligations + // to have a recursion_depth of 0 + // + // To ensure that obligation_depth never decreasees, we force all subobligations + // to have at least the depth of the original obligation. + fn add_depth>>(&self, it: I, + min_depth: usize) { + it.for_each(|o| o.recursion_depth = cmp::max(min_depth, o.recursion_depth) + 1); + } + + // Check that the recursion limit has not been exceeded. + // + // The weird return type of this function allows it to be used with the 'try' (?) + // operator within certain functions + fn check_recursion_limit, V: Display + TypeFoldable<'tcx>>( + &self, + obligation: &Obligation<'tcx, T>, + error_obligation: &Obligation<'tcx, V> + ) -> Result<(), OverflowError> { + let recursion_limit = *self.infcx.tcx.sess.recursion_limit.get(); + if obligation.recursion_depth >= recursion_limit { + match self.query_mode { + TraitQueryMode::Standard => { + self.infcx().report_overflow_error(error_obligation, true); + } + TraitQueryMode::Canonical => { + return Err(OverflowError); + } + } + } + Ok(()) + } + /////////////////////////////////////////////////////////////////////////// // CANDIDATE ASSEMBLY // @@ -1181,17 +1148,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> { // Watch out for overflow. This intentionally bypasses (and does // not update) the cache. - let recursion_limit = *self.infcx.tcx.sess.recursion_limit.get(); - if stack.obligation.recursion_depth >= recursion_limit { - match self.query_mode { - TraitQueryMode::Standard => { - self.infcx().report_overflow_error(&stack.obligation, true); - } - TraitQueryMode::Canonical => { - return Err(Overflow); - } - } - } + self.check_recursion_limit(&stack.obligation, &stack.obligation)?; + // Check the cache. Note that we freshen the trait-ref // separately rather than using `stack.fresh_trait_ref` -- @@ -1475,7 +1433,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } } - /// Returns true if the global caches can be used. + /// Returns `true` if the global caches can be used. /// Do note that if the type itself is not in the /// global tcx, the local caches will be used. fn can_use_global_caches(&self, param_env: ty::ParamEnv<'tcx>) -> bool { @@ -1501,13 +1459,6 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { return false; } - // Same idea as the above, but for alt trait object modes. These - // should only be used in intercrate mode - better safe than sorry. - if self.infcx.trait_object_mode() != TraitObjectMode::NoSquash { - bug!("using squashing TraitObjectMode outside of intercrate mode? param_env={:?}", - param_env); - } - // Otherwise, we can use the global cache. true } @@ -1716,8 +1667,11 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { _ => return, } - let result = self.probe(|this, snapshot| { - this.match_projection_obligation_against_definition_bounds(obligation, snapshot) + let result = self.infcx.probe(|snapshot| { + self.match_projection_obligation_against_definition_bounds( + obligation, + snapshot, + ) }); if result { @@ -1728,19 +1682,19 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { fn match_projection_obligation_against_definition_bounds( &mut self, obligation: &TraitObligation<'tcx>, - snapshot: &infer::CombinedSnapshot<'cx, 'tcx>, + snapshot: &CombinedSnapshot<'_, 'tcx>, ) -> bool { let poly_trait_predicate = self.infcx() .resolve_type_vars_if_possible(&obligation.predicate); - let (skol_trait_predicate, placeholder_map) = self.infcx() + let (placeholder_trait_predicate, placeholder_map) = self.infcx() .replace_bound_vars_with_placeholders(&poly_trait_predicate); debug!( "match_projection_obligation_against_definition_bounds: \ - skol_trait_predicate={:?} placeholder_map={:?}", - skol_trait_predicate, placeholder_map + placeholder_trait_predicate={:?}", + placeholder_trait_predicate, ); - let (def_id, substs) = match skol_trait_predicate.trait_ref.self_ty().sty { + let (def_id, substs) = match placeholder_trait_predicate.trait_ref.self_ty().sty { ty::Projection(ref data) => (data.trait_ref(self.tcx()).def_id, data.substs), ty::Opaque(def_id, substs) => (def_id, substs), _ => { @@ -1748,7 +1702,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { obligation.cause.span, "match_projection_obligation_against_definition_bounds() called \ but self-ty is not a projection: {:?}", - skol_trait_predicate.trait_ref.self_ty() + placeholder_trait_predicate.trait_ref.self_ty() ); } }; @@ -1769,11 +1723,11 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let matching_bound = util::elaborate_predicates(self.tcx(), bounds.predicates) .filter_to_traits() .find(|bound| { - self.probe(|this, _| { - this.match_projection( + self.infcx.probe(|_| { + self.match_projection( obligation, bound.clone(), - skol_trait_predicate.trait_ref.clone(), + placeholder_trait_predicate.trait_ref.clone(), &placeholder_map, snapshot, ) @@ -1792,13 +1746,11 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let result = self.match_projection( obligation, bound, - skol_trait_predicate.trait_ref.clone(), + placeholder_trait_predicate.trait_ref.clone(), &placeholder_map, snapshot, ); - self.infcx.pop_placeholders(placeholder_map, snapshot); - assert!(result); true } @@ -1809,22 +1761,17 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { &mut self, obligation: &TraitObligation<'tcx>, trait_bound: ty::PolyTraitRef<'tcx>, - skol_trait_ref: ty::TraitRef<'tcx>, - placeholder_map: &infer::PlaceholderMap<'tcx>, - snapshot: &infer::CombinedSnapshot<'cx, 'tcx>, + placeholder_trait_ref: ty::TraitRef<'tcx>, + placeholder_map: &PlaceholderMap<'tcx>, + snapshot: &CombinedSnapshot<'_, 'tcx>, ) -> bool { - debug_assert!(!skol_trait_ref.has_escaping_bound_vars()); - if self.infcx - .at(&obligation.cause, obligation.param_env) - .sup(ty::Binder::dummy(skol_trait_ref), trait_bound) - .is_err() - { - return false; - } - + debug_assert!(!placeholder_trait_ref.has_escaping_bound_vars()); self.infcx - .leak_check(false, obligation.cause.span, placeholder_map, snapshot) + .at(&obligation.cause, obligation.param_env) + .sup(ty::Binder::dummy(placeholder_trait_ref), trait_bound) .is_ok() + && + self.infcx.leak_check(false, placeholder_map, snapshot).is_ok() } /// Given an obligation like ``, search the obligations that the caller @@ -1872,10 +1819,10 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { stack: &TraitObligationStack<'o, 'tcx>, where_clause_trait_ref: ty::PolyTraitRef<'tcx>, ) -> Result { - self.probe(move |this, _| { + self.evaluation_probe(|this| { match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) { Ok(obligations) => { - this.evaluate_predicates_recursively(stack.list(), obligations.iter()) + this.evaluate_predicates_recursively(stack.list(), obligations.into_iter()) } Err(()) => Ok(EvaluatedToErr), } @@ -1914,7 +1861,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { Ok(()) } - /// Check for the artificial impl that the compiler will create for an obligation like `X : + /// Checks for the artificial impl that the compiler will create for an obligation like `X : /// FnMut<..>` where `X` is a closure type. /// /// Note: the type parameters on a closure candidate are modeled as *output* type @@ -1997,7 +1944,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { if let ty::FnSig { unsafety: hir::Unsafety::Normal, abi: Abi::Rust, - variadic: false, + c_variadic: false, .. } = self_ty.fn_sig(self.tcx()).skip_binder() { @@ -2025,14 +1972,10 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { obligation.predicate.def_id(), obligation.predicate.skip_binder().trait_ref.self_ty(), |impl_def_id| { - self.probe(|this, snapshot| { - if let Ok(placeholder_map) = this.match_impl(impl_def_id, obligation, snapshot) + self.infcx.probe(|snapshot| { + if let Ok(_substs) = self.match_impl(impl_def_id, obligation, snapshot) { candidates.vec.push(ImplCandidate(impl_def_id)); - - // N.B., we can safely drop the placeholder map - // since we are in a probe. - mem::drop(placeholder_map); } }); }, @@ -2085,6 +2028,24 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // the auto impl might apply, we don't know candidates.ambiguous = true; } + ty::Generator(_, _, movability) + if self.tcx().lang_items().unpin_trait() == Some(def_id) => + { + match movability { + hir::GeneratorMovability::Static => { + // Immovable generators are never `Unpin`, so + // suppress the normal auto-impl candidate for it. + } + hir::GeneratorMovability::Movable => { + // Movable generators are always `Unpin`, so add an + // unconditional builtin candidate. + candidates.vec.push(BuiltinCandidate { + has_nested: false, + }); + } + } + } + _ => candidates.vec.push(AutoImplCandidate(def_id.clone())), } } @@ -2103,11 +2064,11 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { obligation.self_ty().skip_binder() ); - self.probe(|this, _snapshot| { + self.infcx.probe(|_snapshot| { // The code below doesn't care about regions, and the // self-ty here doesn't escape this probe, so just erase // any LBR. - let self_ty = this.tcx().erase_late_bound_regions(&obligation.self_ty()); + let self_ty = self.tcx().erase_late_bound_regions(&obligation.self_ty()); let poly_trait_ref = match self_ty.sty { ty::Dynamic(ref data, ..) => { if data.auto_traits() @@ -2121,7 +2082,12 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { return; } - data.principal().with_self_ty(this.tcx(), self_ty) + if let Some(principal) = data.principal() { + principal.with_self_ty(self.tcx(), self_ty) + } else { + // Only auto-trait bounds exist. + return; + } } ty::Infer(ty::TyVar(_)) => { debug!("assemble_candidates_from_object_ty: ambiguous"); @@ -2141,11 +2107,11 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // correct trait, but also the correct type parameters. // For example, we may be trying to upcast `Foo` to `Bar`, // but `Foo` is declared as `trait Foo : Bar`. - let upcast_trait_refs = util::supertraits(this.tcx(), poly_trait_ref) + let upcast_trait_refs = util::supertraits(self.tcx(), poly_trait_ref) .filter(|upcast_trait_ref| { - this.probe(|this, _| { + self.infcx.probe(|_| { let upcast_trait_ref = upcast_trait_ref.clone(); - this.match_poly_trait_ref(obligation, upcast_trait_ref) + self.match_poly_trait_ref(obligation, upcast_trait_ref) .is_ok() }) }) @@ -2213,7 +2179,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // // We always upcast when we can because of reason // #2 (region bounds). - data_a.principal().def_id() == data_b.principal().def_id() + data_a.principal_def_id() == data_b.principal_def_id() && data_b.auto_traits() // All of a's auto traits need to be in b's auto traits. .all(|b| data_a.auto_traits().any(|a| a == b)) @@ -2261,7 +2227,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let def_id = obligation.predicate.def_id(); - if ty::is_trait_alias(self.tcx(), def_id) { + if self.tcx().is_trait_alias(def_id) { candidates.vec.push(TraitAliasCandidate(def_id.clone())); } @@ -2276,8 +2242,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // type variables and then we also attempt to evaluate recursive // bounds to see if they are satisfied. - /// Returns true if `victim` should be dropped in favor of - /// `other`. Generally speaking we will drop duplicate + /// Returns `true` if `victim` should be dropped in favor of + /// `other`. Generally speaking we will drop duplicate /// candidates and prefer where-clause candidates. /// /// See the comment for "SelectionCandidate" for more details. @@ -2362,12 +2328,13 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // See if we can toss out `victim` based on specialization. // This requires us to know *for sure* that the `other` impl applies // i.e., EvaluatedToOk: - if other.evaluation == EvaluatedToOk { + if other.evaluation.must_apply_modulo_regions() { match victim.candidate { ImplCandidate(victim_def) => { let tcx = self.tcx().global_tcx(); return tcx.specializes((other_def, victim_def)) - || tcx.impls_are_allowed_to_overlap(other_def, victim_def); + || tcx.impls_are_allowed_to_overlap( + other_def, victim_def).is_some(); } ParamCandidate(ref cand) => { // Prefer the impl to a global where clause candidate. @@ -2389,7 +2356,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ParamCandidate(ref cand) => { // Prefer these to a global where-clause bound // (see issue #50825) - is_global(cand) && other.evaluation == EvaluatedToOk + is_global(cand) && other.evaluation.must_apply_modulo_regions() } _ => false, } @@ -2690,20 +2657,20 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // binder moved -\ let ty: ty::Binder> = ty::Binder::bind(ty); // <----/ - self.in_snapshot(|this, snapshot| { - let (skol_ty, placeholder_map) = this.infcx() + self.infcx.in_snapshot(|_| { + let (skol_ty, _) = self.infcx .replace_bound_vars_with_placeholders(&ty); let Normalized { value: normalized_ty, mut obligations, } = project::normalize_with_depth( - this, + self, param_env, cause.clone(), recursion_depth, &skol_ty, ); - let skol_obligation = this.tcx().predicate_for_trait_def( + let skol_obligation = self.tcx().predicate_for_trait_def( param_env, cause.clone(), trait_def_id, @@ -2712,8 +2679,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { &[], ); obligations.push(skol_obligation); - this.infcx() - .plug_leaks(placeholder_map, snapshot, obligations) + obligations }) }) .collect() @@ -2804,9 +2770,12 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } fn confirm_projection_candidate(&mut self, obligation: &TraitObligation<'tcx>) { - self.in_snapshot(|this, snapshot| { + self.infcx.in_snapshot(|snapshot| { let result = - this.match_projection_obligation_against_definition_bounds(obligation, snapshot); + self.match_projection_obligation_against_definition_bounds( + obligation, + snapshot, + ); assert!(result); }) } @@ -2923,19 +2892,17 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { nested, ); - let trait_obligations: Vec> = self.in_snapshot(|this, snapshot| { + let trait_obligations: Vec> = self.infcx.in_snapshot(|_| { let poly_trait_ref = obligation.predicate.to_poly_trait_ref(); - let (trait_ref, placeholder_map) = this.infcx() + let (trait_ref, _) = self.infcx .replace_bound_vars_with_placeholders(&poly_trait_ref); let cause = obligation.derived_cause(ImplDerivedObligation); - this.impl_or_trait_obligations( + self.impl_or_trait_obligations( cause, obligation.recursion_depth + 1, obligation.param_env, trait_def_id, &trait_ref.substs, - placeholder_map, - snapshot, ) }); @@ -2960,18 +2927,16 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // First, create the substitutions by matching the impl again, // this time not in a probe. - self.in_snapshot(|this, snapshot| { - let (substs, placeholder_map) = this.rematch_impl(impl_def_id, obligation, snapshot); + self.infcx.in_snapshot(|snapshot| { + let substs = self.rematch_impl(impl_def_id, obligation, snapshot); debug!("confirm_impl_candidate: substs={:?}", substs); let cause = obligation.derived_cause(ImplDerivedObligation); - this.vtable_impl( + self.vtable_impl( impl_def_id, substs, cause, obligation.recursion_depth + 1, obligation.param_env, - placeholder_map, - snapshot, ) }) } @@ -2979,16 +2944,14 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { fn vtable_impl( &mut self, impl_def_id: DefId, - mut substs: Normalized<'tcx, &'tcx Substs<'tcx>>, + mut substs: Normalized<'tcx, SubstsRef<'tcx>>, cause: ObligationCause<'tcx>, recursion_depth: usize, param_env: ty::ParamEnv<'tcx>, - placeholder_map: infer::PlaceholderMap<'tcx>, - snapshot: &infer::CombinedSnapshot<'cx, 'tcx>, ) -> VtableImplData<'tcx, PredicateObligation<'tcx>> { debug!( - "vtable_impl(impl_def_id={:?}, substs={:?}, recursion_depth={}, placeholder_map={:?})", - impl_def_id, substs, recursion_depth, placeholder_map + "vtable_impl(impl_def_id={:?}, substs={:?}, recursion_depth={})", + impl_def_id, substs, recursion_depth, ); let mut impl_obligations = self.impl_or_trait_obligations( @@ -2997,8 +2960,6 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { param_env, impl_def_id, &substs.value, - placeholder_map, - snapshot, ); debug!( @@ -3033,7 +2994,10 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let self_ty = self.infcx .shallow_resolve(*obligation.self_ty().skip_binder()); let poly_trait_ref = match self_ty.sty { - ty::Dynamic(ref data, ..) => data.principal().with_self_ty(self.tcx(), self_ty), + ty::Dynamic(ref data, ..) => + data.principal().unwrap_or_else(|| { + span_bug!(obligation.cause.span, "object candidate with no principal") + }).with_self_ty(self.tcx(), self_ty), _ => span_bug!(obligation.cause.span, "object candidate with non-object"), }; @@ -3051,7 +3015,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // reported an ambiguity. (When we do find a match, also // record it for later.) let nonmatching = util::supertraits(tcx, poly_trait_ref).take_while( - |&t| match self.commit_if_ok(|this, _| this.match_poly_trait_ref(obligation, t)) { + |&t| match self.infcx.commit_if_ok(|_| self.match_poly_trait_ref(obligation, t)) { Ok(obligations) => { upcast_trait_ref = Some(t); nested.extend(obligations); @@ -3127,21 +3091,19 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { obligation, alias_def_id ); - self.in_snapshot(|this, snapshot| { - let (predicate, placeholder_map) = this.infcx() + self.infcx.in_snapshot(|_| { + let (predicate, _) = self.infcx() .replace_bound_vars_with_placeholders(&obligation.predicate); let trait_ref = predicate.trait_ref; let trait_def_id = trait_ref.def_id; let substs = trait_ref.substs; - let trait_obligations = this.impl_or_trait_obligations( + let trait_obligations = self.impl_or_trait_obligations( obligation.cause.clone(), obligation.recursion_depth, obligation.param_env, trait_def_id, &substs, - placeholder_map, - snapshot, ); debug!( @@ -3253,11 +3215,14 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { trait_ref, )?); - obligations.push(Obligation::new( - obligation.cause.clone(), - obligation.param_env, - ty::Predicate::ClosureKind(closure_def_id, substs, kind), - )); + // FIXME: chalk + if !self.tcx().sess.opts.debugging_opts.chalk { + obligations.push(Obligation::new( + obligation.cause.clone(), + obligation.param_env, + ty::Predicate::ClosureKind(closure_def_id, substs, kind), + )); + } Ok(VtableClosureData { closure_def_id, @@ -3270,7 +3235,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { /// we currently treat the input type parameters on the trait as /// outputs. This means that when we have a match we have only /// considered the self type, so we have to go back and make sure - /// to relate the argument types too. This is kind of wrong, but + /// to relate the argument types too. This is kind of wrong, but /// since we control the full set of impls, also not that wrong, /// and it DOES yield better error messages (since we don't report /// errors as if there is no applicable impl, but rather report @@ -3284,7 +3249,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { /// impl Fn(int) for Closure { ... } /// /// Now imagine our obligation is `Fn(usize) for Closure`. So far - /// we have matched the self-type `Closure`. At this point we'll + /// we have matched the self type `Closure`. At this point we'll /// compare the `int` to `usize` and generate an error. /// /// Note that this checking occurs *after* the impl has selected, @@ -3335,8 +3300,9 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { (&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => { // See assemble_candidates_for_unsizing for more info. let existential_predicates = data_a.map_bound(|data_a| { - let iter = iter::once(ty::ExistentialPredicate::Trait(data_a.principal())) - .chain( + let iter = + data_a.principal().map(|x| ty::ExistentialPredicate::Trait(x)) + .into_iter().chain( data_a .projection_bounds() .map(|x| ty::ExistentialPredicate::Projection(x)), @@ -3348,10 +3314,28 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { ); tcx.mk_existential_predicates(iter) }); - let new_trait = tcx.mk_dynamic(existential_predicates, r_b); + let source_trait = tcx.mk_dynamic(existential_predicates, r_b); + + // Require that the traits involved in this upcast are **equal**; + // only the **lifetime bound** is changed. + // + // FIXME: This condition is arguably too strong -- it + // would suffice for the source trait to be a + // *subtype* of the target trait. In particular + // changing from something like `for<'a, 'b> Foo<'a, + // 'b>` to `for<'a> Foo<'a, 'a>` should be + // permitted. And, indeed, in the in commit + // 904a0bde93f0348f69914ee90b1f8b6e4e0d7cbc, this + // condition was loosened. However, when the leak check was added + // back, using subtype here actually guies the coercion code in + // such a way that it accepts `old-lub-glb-object.rs`. This is probably + // a good thing, but I've modified this to `.eq` because I want + // to continue rejecting that test (as we have done for quite some time) + // before we are firmly comfortable with what our behavior + // should be there. -nikomatsakis let InferOk { obligations, .. } = self.infcx .at(&obligation.cause, obligation.param_env) - .eq(target, new_trait) + .eq(target, source_trait) // FIXME -- see below .map_err(|_| Unimplemented)?; nested.extend(obligations); @@ -3373,7 +3357,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { // T -> Trait. (_, &ty::Dynamic(ref data, r)) => { let mut object_dids = data.auto_traits() - .chain(iter::once(data.principal().def_id())); + .chain(data.principal_def_id()); if let Some(did) = object_dids.find(|did| !tcx.is_object_safe(*did)) { return Err(TraitNotObjectSafe(did)); } @@ -3553,13 +3537,10 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { &mut self, impl_def_id: DefId, obligation: &TraitObligation<'tcx>, - snapshot: &infer::CombinedSnapshot<'cx, 'tcx>, - ) -> ( - Normalized<'tcx, &'tcx Substs<'tcx>>, - infer::PlaceholderMap<'tcx>, - ) { + snapshot: &CombinedSnapshot<'_, 'tcx>, + ) -> Normalized<'tcx, SubstsRef<'tcx>> { match self.match_impl(impl_def_id, obligation, snapshot) { - Ok((substs, placeholder_map)) => (substs, placeholder_map), + Ok(substs) => substs, Err(()) => { bug!( "Impl {:?} was matchable against {:?} but now is not", @@ -3574,14 +3555,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { &mut self, impl_def_id: DefId, obligation: &TraitObligation<'tcx>, - snapshot: &infer::CombinedSnapshot<'cx, 'tcx>, - ) -> Result< - ( - Normalized<'tcx, &'tcx Substs<'tcx>>, - infer::PlaceholderMap<'tcx>, - ), - (), - > { + snapshot: &CombinedSnapshot<'_, 'tcx>, + ) -> Result>, ()> { let impl_trait_ref = self.tcx().impl_trait_ref(impl_def_id).unwrap(); // Before we create the substitutions and everything, first @@ -3623,22 +3598,16 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { .map_err(|e| debug!("match_impl: failed eq_trait_refs due to `{}`", e))?; nested_obligations.extend(obligations); - if let Err(e) = - self.infcx - .leak_check(false, obligation.cause.span, &placeholder_map, snapshot) - { + if let Err(e) = self.infcx.leak_check(false, &placeholder_map, snapshot) { debug!("match_impl: failed leak check due to `{}`", e); return Err(()); } debug!("match_impl: success impl_substs={:?}", impl_substs); - Ok(( - Normalized { - value: impl_substs, - obligations: nested_obligations, - }, - placeholder_map, - )) + Ok(Normalized { + value: impl_substs, + obligations: nested_obligations, + }) } fn fast_reject_trait_refs( @@ -3667,7 +3636,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { } /// Normalize `where_clause_trait_ref` and try to match it against - /// `obligation`. If successful, return any predicates that + /// `obligation`. If successful, return any predicates that /// result from the normalization. Normalization is necessary /// because where-clauses are stored in the parameter environment /// unnormalized. @@ -3706,8 +3675,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { previous: &ty::PolyTraitRef<'tcx>, current: &ty::PolyTraitRef<'tcx>, ) -> bool { - let mut matcher = ty::_match::Match::new( - self.tcx(), self.infcx.trait_object_mode()); + let mut matcher = ty::_match::Match::new(self.tcx()); matcher.relate(previous, current).is_ok() } @@ -3793,9 +3761,7 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { recursion_depth: usize, param_env: ty::ParamEnv<'tcx>, def_id: DefId, // of impl or trait - substs: &Substs<'tcx>, // for impl or trait - placeholder_map: infer::PlaceholderMap<'tcx>, - snapshot: &infer::CombinedSnapshot<'cx, 'tcx>, + substs: SubstsRef<'tcx>, // for impl or trait ) -> Vec> { debug!("impl_or_trait_obligations(def_id={:?})", def_id); let tcx = self.tcx(); @@ -3857,8 +3823,8 @@ impl<'cx, 'gcx, 'tcx> SelectionContext<'cx, 'gcx, 'tcx> { let mut seen = FxHashSet::default(); predicates.retain(|i| seen.insert(i.clone())); } - self.infcx() - .plug_leaks(placeholder_map, snapshot, predicates) + + predicates } } @@ -3937,6 +3903,10 @@ impl<'o, 'tcx> TraitObligationStackList<'o, 'tcx> { fn with(r: &'o TraitObligationStack<'o, 'tcx>) -> TraitObligationStackList<'o, 'tcx> { TraitObligationStackList { head: Some(r) } } + + fn head(&self) -> Option<&'o TraitObligationStack<'o, 'tcx>> { + self.head + } } impl<'o, 'tcx> Iterator for TraitObligationStackList<'o, 'tcx> { diff --git a/src/librustc/traits/specialize/mod.rs b/src/librustc/traits/specialize/mod.rs index 70d36e9afe192..384a5862cde0c 100644 --- a/src/librustc/traits/specialize/mod.rs +++ b/src/librustc/traits/specialize/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Logic and data structures related to impl specialization, explained in //! greater detail below. //! @@ -21,16 +11,15 @@ pub mod specialization_graph; -use hir::def_id::DefId; -use infer::{InferCtxt, InferOk}; -use lint; -use traits::{self, FutureCompatOverlapErrorKind, ObligationCause, TraitEngine}; +use crate::hir::def_id::DefId; +use crate::infer::{InferCtxt, InferOk}; +use crate::lint; +use crate::traits::{self, coherence, FutureCompatOverlapErrorKind, ObligationCause, TraitEngine}; use rustc_data_structures::fx::FxHashSet; -use rustc_data_structures::sync::Lrc; use syntax_pos::DUMMY_SP; -use traits::select::IntercrateAmbiguityCause; -use ty::{self, TyCtxt, TypeFoldable}; -use ty::subst::{Subst, Substs}; +use crate::traits::select::IntercrateAmbiguityCause; +use crate::ty::{self, TyCtxt, TypeFoldable}; +use crate::ty::subst::{Subst, InternalSubsts, SubstsRef}; use super::{SelectionContext, FulfillmentContext}; use super::util::impl_trait_ref_and_oblig; @@ -42,6 +31,7 @@ pub struct OverlapError { pub trait_desc: String, pub self_desc: Option, pub intercrate_ambiguity_causes: Vec, + pub involves_placeholder: bool, } /// Given a subst for the requested impl, translate it to a subst @@ -67,12 +57,12 @@ pub struct OverlapError { /// Suppose we have selected "source impl" with `V` instantiated with `u32`. /// This function will produce a substitution with `T` and `U` both mapping to `u32`. /// -/// Where clauses add some trickiness here, because they can be used to "define" +/// where-clauses add some trickiness here, because they can be used to "define" /// an argument indirectly: /// /// ```rust /// impl<'a, I, T: 'a> Iterator for Cloned -/// where I: Iterator, T: Clone +/// where I: Iterator, T: Clone /// ``` /// /// In a case like this, the substitution for `T` is determined indirectly, @@ -82,9 +72,9 @@ pub struct OverlapError { pub fn translate_substs<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, source_impl: DefId, - source_substs: &'tcx Substs<'tcx>, + source_substs: SubstsRef<'tcx>, target_node: specialization_graph::Node) - -> &'tcx Substs<'tcx> { + -> SubstsRef<'tcx> { debug!("translate_substs({:?}, {:?}, {:?}, {:?})", param_env, source_impl, source_substs, target_node); let source_trait_ref = infcx.tcx @@ -123,9 +113,9 @@ pub fn find_associated_item<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, item: &ty::AssociatedItem, - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, impl_data: &super::VtableImplData<'tcx, ()>, -) -> (DefId, &'tcx Substs<'tcx>) { +) -> (DefId, SubstsRef<'tcx>) { debug!("find_associated_item({:?}, {:?}, {:?}, {:?})", param_env, item, substs, impl_data); assert!(!substs.needs_infer()); @@ -154,10 +144,10 @@ pub fn find_associated_item<'a, 'tcx>( } } -/// Is impl1 a specialization of impl2? +/// Is `impl1` a specialization of `impl2`? /// /// Specialization is determined by the sets of types to which the impls apply; -/// impl1 specializes impl2 if it applies to a subset of the types impl2 applies +/// `impl1` specializes `impl2` if it applies to a subset of the types `impl2` applies /// to. pub(super) fn specializes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, (impl1_def_id, impl2_def_id): (DefId, DefId)) @@ -223,7 +213,7 @@ fn fulfill_implication<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, source_trait_ref: ty::TraitRef<'tcx>, target_impl: DefId) - -> Result<&'tcx Substs<'tcx>, ()> { + -> Result, ()> { debug!("fulfill_implication({:?}, trait_ref={:?} |- {:?} applies)", param_env, source_trait_ref, target_impl); @@ -298,7 +288,7 @@ fn fulfill_implication<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, pub(super) fn specialization_graph_provider<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_id: DefId, -) -> Lrc { +) -> &'tcx specialization_graph::Graph { let mut sg = specialization_graph::Graph::new(); let mut trait_impls = tcx.all_impls(trait_id); @@ -343,9 +333,9 @@ pub(super) fn specialization_graph_provider<'a, 'tcx>( FutureCompatOverlapErrorKind::Issue33140 => lint::builtin::ORDER_DEPENDENT_TRAIT_OBJECTS, }; - tcx.struct_span_lint_node( + tcx.struct_span_lint_hir( lint, - tcx.hir().as_local_node_id(impl_def_id).unwrap(), + tcx.hir().as_local_hir_id(impl_def_id).unwrap(), impl_span, &msg) } else { @@ -380,6 +370,10 @@ pub(super) fn specialization_graph_provider<'a, 'tcx>( cause.add_intercrate_ambiguity_hint(&mut err); } + if overlap.involves_placeholder { + coherence::add_placeholder_note(&mut err); + } + err.emit(); } } else { @@ -388,7 +382,7 @@ pub(super) fn specialization_graph_provider<'a, 'tcx>( } } - Lrc::new(sg) + tcx.arena.alloc(sg) } /// Recovers the "impl X for Y" signature from `impl_def_id` and returns it as a @@ -404,7 +398,7 @@ fn to_pretty_impl_header(tcx: TyCtxt<'_, '_, '_>, impl_def_id: DefId) -> Option< let mut w = "impl".to_owned(); - let substs = Substs::identity_for_item(tcx, impl_def_id); + let substs = InternalSubsts::identity_for_item(tcx, impl_def_id); // FIXME: Currently only handles ?Sized. // Needs to support ?Move and ?DynSized when they are implemented. @@ -416,7 +410,7 @@ fn to_pretty_impl_header(tcx: TyCtxt<'_, '_, '_>, impl_def_id: DefId) -> Option< w.push('<'); w.push_str(&substs.iter() .map(|k| k.to_string()) - .filter(|k| &k[..] != "'_") + .filter(|k| k != "'_") .collect::>().join(", ")); w.push('>'); } diff --git a/src/librustc/traits/specialize/specialization_graph.rs b/src/librustc/traits/specialize/specialization_graph.rs index 1a22866030641..dae1518d722db 100644 --- a/src/librustc/traits/specialize/specialization_graph.rs +++ b/src/librustc/traits/specialize/specialization_graph.rs @@ -1,27 +1,15 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::OverlapError; -use hir::def_id::DefId; -use ich::{self, StableHashingContext}; +use crate::hir::def_id::DefId; +use crate::ich::{self, StableHashingContext}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; -use traits; -use ty::{self, TyCtxt, TypeFoldable}; -use ty::fast_reject::{self, SimplifiedType}; -use ty::relate::TraitObjectMode; -use rustc_data_structures::sync::Lrc; +use crate::traits; +use crate::ty::{self, TyCtxt, TypeFoldable}; +use crate::ty::fast_reject::{self, SimplifiedType}; use syntax::ast::Ident; -use util::captures::Captures; -use util::nodemap::{DefIdMap, FxHashMap}; +use crate::util::captures::Captures; +use crate::util::nodemap::{DefIdMap, FxHashMap}; /// A per-trait graph of impls in specialization order. At the moment, this /// graph forms a tree rooted with the trait itself, with all other nodes @@ -108,7 +96,7 @@ impl<'a, 'gcx, 'tcx> Children { } } - /// Remove an impl from this set of children. Used when replacing + /// Removes an impl from this set of children. Used when replacing /// an impl with a parent. The impl must be present in the list of /// children already. fn remove_existing(&mut self, @@ -174,6 +162,7 @@ impl<'a, 'gcx, 'tcx> Children { None }, intercrate_ambiguity_causes: overlap.intercrate_ambiguity_causes, + involves_placeholder: overlap.involves_placeholder, } }; @@ -183,9 +172,20 @@ impl<'a, 'gcx, 'tcx> Children { possible_sibling, impl_def_id, traits::IntercrateMode::Issue43355, - TraitObjectMode::NoSquash, |overlap| { - if tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling) { + if let Some(overlap_kind) = + tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling) + { + match overlap_kind { + ty::ImplOverlapKind::Permitted => {} + ty::ImplOverlapKind::Issue33140 => { + last_lint = Some(FutureCompatOverlapError { + error: overlap_error(overlap), + kind: FutureCompatOverlapErrorKind::Issue33140 + }); + } + } + return Ok((false, false)); } @@ -213,31 +213,17 @@ impl<'a, 'gcx, 'tcx> Children { replace_children.push(possible_sibling); } else { - if !tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling) { - // do future-compat checks for overlap. Have issue #43355 - // errors overwrite issue #33140 errors when both are present. - - traits::overlapping_impls( - tcx, - possible_sibling, - impl_def_id, - traits::IntercrateMode::Fixed, - TraitObjectMode::SquashAutoTraitsIssue33140, - |overlap| { - last_lint = Some(FutureCompatOverlapError { - error: overlap_error(overlap), - kind: FutureCompatOverlapErrorKind::Issue33140 - }); - }, - || (), - ); + if let None = tcx.impls_are_allowed_to_overlap( + impl_def_id, possible_sibling) + { + // do future-compat checks for overlap. Have issue #33140 + // errors overwrite issue #43355 errors when both are present. traits::overlapping_impls( tcx, possible_sibling, impl_def_id, traits::IntercrateMode::Fixed, - TraitObjectMode::NoSquash, |overlap| { last_lint = Some(FutureCompatOverlapError { error: overlap_error(overlap), @@ -412,7 +398,7 @@ impl<'a, 'gcx, 'tcx> Graph { self.children.entry(parent).or_default().insert_blindly(tcx, child); } - /// The parent of a given impl, which is the def id of the trait when the + /// The parent of a given impl, which is the `DefId` of the trait when the /// impl is a "specialization root". pub fn parent(&self, child: DefId) -> DefId { *self.parent.get(&child).unwrap() @@ -452,13 +438,13 @@ impl<'a, 'gcx, 'tcx> Node { } } -pub struct Ancestors { +pub struct Ancestors<'tcx> { trait_def_id: DefId, - specialization_graph: Lrc, + specialization_graph: &'tcx Graph, current_source: Option, } -impl Iterator for Ancestors { +impl Iterator for Ancestors<'_> { type Item = Node; fn next(&mut self) -> Option { let cur = self.current_source.take(); @@ -489,7 +475,7 @@ impl NodeItem { } } -impl<'a, 'gcx, 'tcx> Ancestors { +impl<'a, 'gcx, 'tcx> Ancestors<'gcx> { /// Search the items from the given ancestors, returning each definition /// with the given name and the given kind. // FIXME(#35870): avoid closures being unexported due to `impl Trait`. @@ -502,7 +488,7 @@ impl<'a, 'gcx, 'tcx> Ancestors { trait_def_id: DefId, ) -> impl Iterator> + Captures<'gcx> + Captures<'tcx> + 'a { self.flat_map(move |node| { - use ty::AssociatedKind::*; + use crate::ty::AssociatedKind::*; node.items(tcx).filter(move |impl_item| match (trait_item_kind, impl_item.kind) { | (Const, Const) | (Method, Method) @@ -522,10 +508,10 @@ impl<'a, 'gcx, 'tcx> Ancestors { /// Walk up the specialization ancestors of a given impl, starting with that /// impl itself. -pub fn ancestors(tcx: TyCtxt<'_, '_, '_>, +pub fn ancestors(tcx: TyCtxt<'_, 'tcx, '_>, trait_def_id: DefId, start_from_impl: DefId) - -> Ancestors { + -> Ancestors<'tcx> { let specialization_graph = tcx.specialization_graph_of(trait_def_id); Ancestors { trait_def_id, diff --git a/src/librustc/traits/structural_impls.rs b/src/librustc/traits/structural_impls.rs index 36e93cc774089..0711f3539e586 100644 --- a/src/librustc/traits/structural_impls.rs +++ b/src/librustc/traits/structural_impls.rs @@ -1,19 +1,9 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use chalk_engine; use smallvec::SmallVec; -use traits; -use traits::project::Normalized; -use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; -use ty::{self, Lift, TyCtxt}; +use crate::traits; +use crate::traits::project::Normalized; +use crate::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; +use crate::ty::{self, Lift, TyCtxt}; use syntax::symbol::InternedString; use std::fmt; @@ -173,9 +163,10 @@ impl<'tcx> fmt::Debug for traits::MismatchedProjectionTypes<'tcx> { impl<'tcx> fmt::Display for traits::WhereClause<'tcx> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use traits::WhereClause::*; + use crate::traits::WhereClause::*; - // Bypass ppaux because it does not print out anonymous regions. + // Bypass `ty::print` because it does not print out anonymous regions. + // FIXME(eddyb) implement a custom `PrettyPrinter`, or move this to `ty::print`. fn write_region_name<'tcx>( r: ty::Region<'tcx>, fmt: &mut fmt::Formatter<'_> @@ -216,7 +207,7 @@ impl<'tcx> fmt::Display for traits::WhereClause<'tcx> { impl<'tcx> fmt::Display for traits::WellFormed<'tcx> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use traits::WellFormed::*; + use crate::traits::WellFormed::*; match self { Trait(trait_ref) => write!(fmt, "WellFormed({})", trait_ref), @@ -227,7 +218,7 @@ impl<'tcx> fmt::Display for traits::WellFormed<'tcx> { impl<'tcx> fmt::Display for traits::FromEnv<'tcx> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use traits::FromEnv::*; + use crate::traits::FromEnv::*; match self { Trait(trait_ref) => write!(fmt, "FromEnv({})", trait_ref), @@ -238,7 +229,7 @@ impl<'tcx> fmt::Display for traits::FromEnv<'tcx> { impl<'tcx> fmt::Display for traits::DomainGoal<'tcx> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use traits::DomainGoal::*; + use crate::traits::DomainGoal::*; match self { Holds(wc) => write!(fmt, "{}", wc), @@ -256,7 +247,7 @@ impl<'tcx> fmt::Display for traits::DomainGoal<'tcx> { impl fmt::Display for traits::QuantifierKind { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use traits::QuantifierKind::*; + use crate::traits::QuantifierKind::*; match self { Universal => write!(fmt, "forall"), @@ -266,7 +257,7 @@ impl fmt::Display for traits::QuantifierKind { } /// Collect names for regions / types bound by a quantified goal / clause. -/// This collector does not try to do anything clever like in ppaux, it's just used +/// This collector does not try to do anything clever like in `ty::print`, it's just used /// for debug output in tests anyway. struct BoundNamesCollector { // Just sort by name because `BoundRegion::BrNamed` does not have a `BoundVar` index anyway. @@ -371,7 +362,7 @@ impl<'tcx> TypeVisitor<'tcx> for BoundNamesCollector { impl<'tcx> fmt::Display for traits::Goal<'tcx> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use traits::GoalKind::*; + use crate::traits::GoalKind::*; match self { Implies(hypotheses, goal) => { @@ -405,6 +396,7 @@ impl<'tcx> fmt::Display for traits::Goal<'tcx> { Ok(()) } + Subtype(a, b) => write!(fmt, "{} <: {}", a, b), CannotProve => write!(fmt, "CannotProve"), } } @@ -429,7 +421,7 @@ impl<'tcx> fmt::Display for traits::ProgramClause<'tcx> { impl<'tcx> fmt::Display for traits::Clause<'tcx> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - use traits::Clause::*; + use crate::traits::Clause::*; match self { Implies(clause) => write!(fmt, "{}", clause), @@ -522,11 +514,31 @@ impl<'a, 'tcx> Lift<'tcx> for traits::ObligationCauseCode<'a> { trait_item_def_id, }), super::ExprAssignable => Some(super::ExprAssignable), - super::MatchExpressionArm { arm_span, source } => Some(super::MatchExpressionArm { + super::MatchExpressionArm { arm_span, - source: source, + source, + ref prior_arms, + last_ty, + discrim_hir_id, + } => { + tcx.lift(&last_ty).map(|last_ty| { + super::MatchExpressionArm { + arm_span, + source, + prior_arms: prior_arms.clone(), + last_ty, + discrim_hir_id, + } + }) + } + super::MatchExpressionArmPattern { span, ty } => { + tcx.lift(&ty).map(|ty| super::MatchExpressionArmPattern { span, ty }) + } + super::IfExpression { then, outer, semicolon } => Some(super::IfExpression { + then, + outer, + semicolon, }), - super::IfExpression => Some(super::IfExpression), super::IfExpressionWithNoElse => Some(super::IfExpressionWithNoElse), super::MainFunctionType => Some(super::MainFunctionType), super::StartFunctionType => Some(super::StartFunctionType), @@ -678,6 +690,7 @@ EnumLiftImpl! { (traits::GoalKind::Not)(goal), (traits::GoalKind::DomainGoal)(domain_goal), (traits::GoalKind::Quantified)(kind, goal), + (traits::GoalKind::Subtype)(a, b), (traits::GoalKind::CannotProve), } } @@ -710,12 +723,36 @@ impl<'a, 'tcx, G: Lift<'tcx>> Lift<'tcx> for traits::InEnvironment<'a, G> { impl<'tcx, C> Lift<'tcx> for chalk_engine::ExClause where C: chalk_engine::context::Context + Clone, - C: traits::ExClauseLift<'tcx>, + C: traits::ChalkContextLift<'tcx>, { type Lifted = C::LiftedExClause; fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { - ::lift_ex_clause_to_tcx(self, tcx) + ::lift_ex_clause_to_tcx(self, tcx) + } +} + +impl<'tcx, C> Lift<'tcx> for chalk_engine::DelayedLiteral +where + C: chalk_engine::context::Context + Clone, + C: traits::ChalkContextLift<'tcx>, +{ + type Lifted = C::LiftedDelayedLiteral; + + fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { + ::lift_delayed_literal_to_tcx(self, tcx) + } +} + +impl<'tcx, C> Lift<'tcx> for chalk_engine::Literal +where + C: chalk_engine::context::Context + Clone, + C: traits::ChalkContextLift<'tcx>, +{ + type Lifted = C::LiftedLiteral; + + fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { + ::lift_literal_to_tcx(self, tcx) } } @@ -850,6 +887,7 @@ EnumTypeFoldableImpl! { (traits::GoalKind::Not)(goal), (traits::GoalKind::DomainGoal)(domain_goal), (traits::GoalKind::Quantified)(qkind, goal), + (traits::GoalKind::Subtype)(a, b), (traits::GoalKind::CannotProve), } } diff --git a/src/librustc/traits/util.rs b/src/librustc/traits/util.rs index 48db72c1f5615..90f62a4d132c7 100644 --- a/src/librustc/traits/util.rs +++ b/src/librustc/traits/util.rs @@ -1,20 +1,10 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir; -use hir::def_id::DefId; -use traits::specialize::specialization_graph::NodeItem; -use ty::{self, Ty, TyCtxt, ToPredicate, ToPolyTraitRef}; -use ty::outlives::Component; -use ty::subst::{Kind, Subst, Substs}; -use util::nodemap::FxHashSet; +use crate::hir; +use crate::hir::def_id::DefId; +use crate::traits::specialize::specialization_graph::NodeItem; +use crate::ty::{self, Ty, TyCtxt, ToPredicate, ToPolyTraitRef}; +use crate::ty::outlives::Component; +use crate::ty::subst::{Kind, Subst, SubstsRef}; +use crate::util::nodemap::FxHashSet; use super::{Obligation, ObligationCause, PredicateObligation, SelectionContext, Normalized}; @@ -302,11 +292,7 @@ impl<'cx, 'gcx, 'tcx> Iterator for SupertraitDefIds<'cx, 'gcx, 'tcx> { type Item = DefId; fn next(&mut self) -> Option { - let def_id = match self.stack.pop() { - Some(def_id) => def_id, - None => { return None; } - }; - + let def_id = self.stack.pop()?; let predicates = self.tcx.super_predicates_of(def_id); let visited = &mut self.visited; self.stack.extend( @@ -368,7 +354,7 @@ impl<'tcx, I: Iterator>> Iterator for FilterToTraits< pub fn impl_trait_ref_and_oblig<'a, 'gcx, 'tcx>(selcx: &mut SelectionContext<'a, 'gcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, impl_def_id: DefId, - impl_substs: &Substs<'tcx>) + impl_substs: SubstsRef<'tcx>,) -> (ty::TraitRef<'tcx>, Vec>) { @@ -535,9 +521,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } pub fn impl_is_default(self, node_item_def_id: DefId) -> bool { - match self.hir().as_local_node_id(node_item_def_id) { - Some(node_id) => { - let item = self.hir().expect_item(node_id); + match self.hir().as_local_hir_id(node_item_def_id) { + Some(hir_id) => { + let item = self.hir().expect_item_by_hir_id(hir_id); if let hir::ItemKind::Impl(_, _, defaultness, ..) = item.node { defaultness.is_default() } else { diff --git a/src/librustc/ty/_match.rs b/src/librustc/ty/_match.rs index 29067bf518da0..07fa441bb8076 100644 --- a/src/librustc/ty/_match.rs +++ b/src/librustc/ty/_match.rs @@ -1,16 +1,6 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use ty::{self, Ty, TyCtxt}; -use ty::error::TypeError; -use ty::relate::{self, Relate, TypeRelation, RelateResult}; +use crate::ty::{self, Ty, TyCtxt}; +use crate::ty::error::TypeError; +use crate::ty::relate::{self, Relate, TypeRelation, RelateResult}; /// A type "A" *matches* "B" if the fresh types in B could be /// substituted with values so as to make it equal to A. Matching is @@ -29,24 +19,17 @@ use ty::relate::{self, Relate, TypeRelation, RelateResult}; /// important thing about the result is Ok/Err. Also, matching never /// affects any type variables or unification state. pub struct Match<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { - tcx: TyCtxt<'a, 'gcx, 'tcx>, - trait_object_mode: relate::TraitObjectMode + tcx: TyCtxt<'a, 'gcx, 'tcx> } impl<'a, 'gcx, 'tcx> Match<'a, 'gcx, 'tcx> { - pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, - trait_object_mode: relate::TraitObjectMode) - -> Match<'a, 'gcx, 'tcx> { - Match { tcx, trait_object_mode } + pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Match<'a, 'gcx, 'tcx> { + Match { tcx } } } impl<'a, 'gcx, 'tcx> TypeRelation<'a, 'gcx, 'tcx> for Match<'a, 'gcx, 'tcx> { fn tag(&self) -> &'static str { "Match" } - fn trait_object_mode(&self) -> relate::TraitObjectMode { - self.trait_object_mode - } - fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { self.tcx } fn a_is_expected(&self) -> bool { true } // irrelevant diff --git a/src/librustc/ty/adjustment.rs b/src/librustc/ty/adjustment.rs index d91ae7e120f66..c2ef08c4c40fe 100644 --- a/src/librustc/ty/adjustment.rs +++ b/src/librustc/ty/adjustment.rs @@ -1,17 +1,8 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir; -use hir::def_id::DefId; -use ty::{self, Ty, TyCtxt}; -use ty::subst::Substs; +use crate::hir; +use crate::hir::def_id::DefId; +use crate::ty::{self, Ty, TyCtxt}; +use crate::ty::subst::SubstsRef; +use rustc_macros::HashStable; /// Represents coercing a value to a different type of value. @@ -25,7 +16,7 @@ use ty::subst::Substs; /// Here the pointer will be dereferenced N times (where a dereference can /// happen to raw or borrowed pointers or any smart pointer which implements /// Deref, including Box<_>). The types of dereferences is given by -/// `autoderefs`. It can then be auto-referenced zero or one times, indicated +/// `autoderefs`. It can then be auto-referenced zero or one times, indicated /// by `autoref`, to either a raw or borrowed pointer. In these cases unsize is /// `false`. /// @@ -48,19 +39,19 @@ use ty::subst::Substs; /// stored in `unsize` is `Foo<[i32]>`, we don't store any further detail about /// the underlying conversions from `[i32; 4]` to `[i32]`. /// -/// 3. Coercing a `Box` to `Box` is an interesting special case. In +/// 3. Coercing a `Box` to `Box` is an interesting special case. In /// that case, we have the pointer we need coming in, so there are no /// autoderefs, and no autoref. Instead we just do the `Unsize` transformation. /// At some point, of course, `Box` should move out of the compiler, in which /// case this is analogous to transforming a struct. E.g., Box<[i32; 4]> -> /// Box<[i32]> is an `Adjust::Unsize` with the target `Box<[i32]>`. -#[derive(Clone, RustcEncodable, RustcDecodable)] +#[derive(Clone, RustcEncodable, RustcDecodable, HashStable)] pub struct Adjustment<'tcx> { pub kind: Adjust<'tcx>, pub target: Ty<'tcx>, } -#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, Debug, RustcEncodable, RustcDecodable, HashStable)] pub enum Adjust<'tcx> { /// Go from ! to any type. NeverToAny, @@ -71,8 +62,9 @@ pub enum Adjust<'tcx> { /// Go from a safe fn pointer to an unsafe fn pointer. UnsafeFnPointer, - /// Go from a non-capturing closure to an fn pointer. - ClosureFnPointer, + /// Go from a non-capturing closure to an fn pointer or an unsafe fn pointer. + /// It cannot convert a closure that requires unsafe. + ClosureFnPointer(hir::Unsafety), /// Go from a mut raw pointer to a const raw pointer. MutToConstPointer, @@ -88,7 +80,7 @@ pub enum Adjust<'tcx> { /// This will do things like convert thin pointers to fat /// pointers, or convert structs containing thin pointers to /// structs containing fat pointers, or convert between fat - /// pointers. We don't store the details of how the transform is + /// pointers. We don't store the details of how the transform is /// done (in fact, we don't know that, because it might depend on /// the precise type parameters). We just store the target /// type. Codegen backends and miri figure out what has to be done @@ -100,7 +92,7 @@ pub enum Adjust<'tcx> { /// call, with the signature `&'a T -> &'a U` or `&'a mut T -> &'a mut U`. /// The target type is `U` in both cases, with the region and mutability /// being those shared by both the receiver and the returned reference. -#[derive(Copy, Clone, PartialEq, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Debug, RustcEncodable, RustcDecodable, HashStable)] pub struct OverloadedDeref<'tcx> { pub region: ty::Region<'tcx>, pub mutbl: hir::Mutability, @@ -108,7 +100,7 @@ pub struct OverloadedDeref<'tcx> { impl<'a, 'gcx, 'tcx> OverloadedDeref<'tcx> { pub fn method_call(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, source: Ty<'tcx>) - -> (DefId, &'tcx Substs<'tcx>) { + -> (DefId, SubstsRef<'tcx>) { let trait_def_id = match self.mutbl { hir::MutImmutable => tcx.lang_items().deref_trait(), hir::MutMutable => tcx.lang_items().deref_mut_trait() @@ -120,24 +112,24 @@ impl<'a, 'gcx, 'tcx> OverloadedDeref<'tcx> { } /// At least for initial deployment, we want to limit two-phase borrows to -/// only a few specific cases. Right now, those mostly "things that desugar" -/// into method calls -/// - using x.some_method() syntax, where some_method takes &mut self -/// - using Foo::some_method(&mut x, ...) syntax -/// - binary assignment operators (+=, -=, *=, etc.) -/// Anything else should be rejected until generalized two phase borrow support +/// only a few specific cases. Right now, those are mostly "things that desugar" +/// into method calls: +/// - using `x.some_method()` syntax, where some_method takes `&mut self`, +/// - using `Foo::some_method(&mut x, ...)` syntax, +/// - binary assignment operators (`+=`, `-=`, `*=`, etc.). +/// Anything else should be rejected until generalized two-phase borrow support /// is implemented. Right now, dataflow can't handle the general case where there /// is more than one use of a mutable borrow, and we don't want to accept too much /// new code via two-phase borrows, so we try to limit where we create two-phase /// capable mutable borrows. /// See #49434 for tracking. -#[derive(Copy, Clone, PartialEq, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Debug, RustcEncodable, RustcDecodable, HashStable)] pub enum AllowTwoPhase { Yes, No } -#[derive(Copy, Clone, PartialEq, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Debug, RustcEncodable, RustcDecodable, HashStable)] pub enum AutoBorrowMutability { Mutable { allow_two_phase_borrow: AllowTwoPhase }, Immutable, @@ -152,12 +144,12 @@ impl From for hir::Mutability { } } -#[derive(Copy, Clone, PartialEq, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Debug, RustcEncodable, RustcDecodable, HashStable)] pub enum AutoBorrow<'tcx> { - /// Convert from T to &T. + /// Converts from T to &T. Ref(ty::Region<'tcx>, AutoBorrowMutability), - /// Convert from T to *T. + /// Converts from T to *T. RawPtr(hir::Mutability), } @@ -167,7 +159,7 @@ pub enum AutoBorrow<'tcx> { /// This struct can be obtained via the `coerce_impl_info` query. /// Demanding this struct also has the side-effect of reporting errors /// for inappropriate impls. -#[derive(Clone, Copy, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, Copy, RustcEncodable, RustcDecodable, Debug, HashStable)] pub struct CoerceUnsizedInfo { /// If this is a "custom coerce" impl, then what kind of custom /// coercion is it? This applies to impls of `CoerceUnsized` for @@ -176,7 +168,7 @@ pub struct CoerceUnsizedInfo { pub custom_kind: Option } -#[derive(Clone, Copy, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, Copy, RustcEncodable, RustcDecodable, Debug, HashStable)] pub enum CustomCoerceUnsized { /// Records the index of the field being coerced. Struct(usize) diff --git a/src/librustc/ty/binding.rs b/src/librustc/ty/binding.rs index 971b3c3d14aeb..1290141b0a6b0 100644 --- a/src/librustc/ty/binding.rs +++ b/src/librustc/ty/binding.rs @@ -1,16 +1,6 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir::BindingAnnotation::*; -use hir::BindingAnnotation; -use hir::Mutability; +use crate::hir::BindingAnnotation::*; +use crate::hir::BindingAnnotation; +use crate::hir::Mutability; #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug, Copy)] pub enum BindingMode { diff --git a/src/librustc/ty/cast.rs b/src/librustc/ty/cast.rs index ab82f28c8bff4..7ea5c73c5b749 100644 --- a/src/librustc/ty/cast.rs +++ b/src/librustc/ty/cast.rs @@ -1,19 +1,10 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // Helpers for handling cast expressions, used in both // typeck and codegen. -use ty::{self, Ty}; +use crate::ty::{self, Ty}; use syntax::ast; +use rustc_macros::HashStable; /// Types that are represented as ints. #[derive(Copy, Clone, Debug, PartialEq, Eq)] @@ -42,7 +33,7 @@ pub enum CastTy<'tcx> { } /// Cast Kind. See RFC 401 (or librustc_typeck/check/cast.rs) -#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable, HashStable)] pub enum CastKind { CoercionCast, PtrPtrCast, diff --git a/src/librustc/ty/codec.rs b/src/librustc/ty/codec.rs index 5ad7d247fe459..a76cc3dfdec02 100644 --- a/src/librustc/ty/codec.rs +++ b/src/librustc/ty/codec.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // This module contains some shared code for encoding and decoding various // things from the `ty` module, and in particular implements support for // "shorthands" which allow to have pointers back into the already encoded @@ -16,15 +6,16 @@ // The functionality in here is shared between persisting to crate metadata and // persisting to incr. comp. caches. -use hir::def_id::{DefId, CrateNum}; -use infer::canonical::{CanonicalVarInfo, CanonicalVarInfos}; +use crate::arena::ArenaAllocatable; +use crate::hir::def_id::{DefId, CrateNum}; +use crate::infer::canonical::{CanonicalVarInfo, CanonicalVarInfos}; use rustc_data_structures::fx::FxHashMap; -use rustc_serialize::{Decodable, Decoder, Encoder, Encodable, opaque}; +use crate::rustc_serialize::{Decodable, Decoder, Encoder, Encodable, opaque}; use std::hash::Hash; use std::intrinsics; -use ty::{self, Ty, TyCtxt}; -use ty::subst::Substs; -use mir::interpret::Allocation; +use crate::ty::{self, Ty, TyCtxt}; +use crate::ty::subst::SubstsRef; +use crate::mir::interpret::Allocation; /// The shorthand encoding uses an enum's variant index `usize` /// and is offset by this value so it never matches a real variant. @@ -140,6 +131,26 @@ pub trait TyDecoder<'a, 'tcx: 'a>: Decoder { } } +#[inline] +pub fn decode_arena_allocable<'a, 'tcx, D, T: ArenaAllocatable + Decodable>( + decoder: &mut D +) -> Result<&'tcx T, D::Error> + where D: TyDecoder<'a, 'tcx>, + 'tcx: 'a, +{ + Ok(decoder.tcx().arena.alloc(Decodable::decode(decoder)?)) +} + +#[inline] +pub fn decode_arena_allocable_slice<'a, 'tcx, D, T: ArenaAllocatable + Decodable>( + decoder: &mut D +) -> Result<&'tcx [T], D::Error> + where D: TyDecoder<'a, 'tcx>, + 'tcx: 'a, +{ + Ok(decoder.tcx().arena.alloc_from_iter( as Decodable>::decode(decoder)?)) +} + #[inline] pub fn decode_cnum<'a, 'tcx, D>(decoder: &mut D) -> Result where D: TyDecoder<'a, 'tcx>, @@ -195,7 +206,7 @@ pub fn decode_predicates<'a, 'tcx, D>(decoder: &mut D) } #[inline] -pub fn decode_substs<'a, 'tcx, D>(decoder: &mut D) -> Result<&'tcx Substs<'tcx>, D::Error> +pub fn decode_substs<'a, 'tcx, D>(decoder: &mut D) -> Result, D::Error> where D: TyDecoder<'a, 'tcx>, 'tcx: 'a, { @@ -283,6 +294,39 @@ macro_rules! __impl_decoder_methods { } } +#[macro_export] +macro_rules! impl_arena_allocatable_decoder { + ([]$args:tt) => {}; + ([decode $(, $attrs:ident)*] + [[$DecoderName:ident [$($typaram:tt),*]], [$name:ident: $ty:ty], $tcx:lifetime]) => { + impl<$($typaram),*> SpecializedDecoder<&$tcx $ty> for $DecoderName<$($typaram),*> { + #[inline] + fn specialized_decode(&mut self) -> Result<&$tcx $ty, Self::Error> { + decode_arena_allocable(self) + } + } + + impl<$($typaram),*> SpecializedDecoder<&$tcx [$ty]> for $DecoderName<$($typaram),*> { + #[inline] + fn specialized_decode(&mut self) -> Result<&$tcx [$ty], Self::Error> { + decode_arena_allocable_slice(self) + } + } + }; + ([$ignore:ident $(, $attrs:ident)*]$args:tt) => { + impl_arena_allocatable_decoder!([$($attrs),*]$args); + }; +} + +#[macro_export] +macro_rules! impl_arena_allocatable_decoders { + ($args:tt, [$($a:tt $name:ident: $ty:ty,)*], $tcx:lifetime) => { + $( + impl_arena_allocatable_decoder!($a [$args, [$name: $ty], $tcx]); + )* + } +} + #[macro_export] macro_rules! implement_ty_decoder { ($DecoderName:ident <$($typaram:tt),*>) => { @@ -291,9 +335,9 @@ macro_rules! implement_ty_decoder { use $crate::infer::canonical::CanonicalVarInfos; use $crate::ty; use $crate::ty::codec::*; - use $crate::ty::subst::Substs; + use $crate::ty::subst::SubstsRef; use $crate::hir::def_id::{CrateNum}; - use rustc_serialize::{Decoder, SpecializedDecoder}; + use crate::rustc_serialize::{Decoder, SpecializedDecoder}; use std::borrow::Cow; impl<$($typaram ),*> Decoder for $DecoderName<$($typaram),*> { @@ -332,6 +376,8 @@ macro_rules! implement_ty_decoder { // the caller to pick any lifetime for 'tcx, including 'static, // by using the unspecialized proxies to them. + arena_types!(impl_arena_allocatable_decoders, [$DecoderName [$($typaram),*]], 'tcx); + impl<$($typaram),*> SpecializedDecoder for $DecoderName<$($typaram),*> { fn specialized_decode(&mut self) -> Result { @@ -354,9 +400,9 @@ macro_rules! implement_ty_decoder { } } - impl<$($typaram),*> SpecializedDecoder<&'tcx Substs<'tcx>> + impl<$($typaram),*> SpecializedDecoder> for $DecoderName<$($typaram),*> { - fn specialized_decode(&mut self) -> Result<&'tcx Substs<'tcx>, Self::Error> { + fn specialized_decode(&mut self) -> Result, Self::Error> { decode_substs(self) } } diff --git a/src/librustc/ty/constness.rs b/src/librustc/ty/constness.rs index 3741f4051b896..e33d0a74ea013 100644 --- a/src/librustc/ty/constness.rs +++ b/src/librustc/ty/constness.rs @@ -1,9 +1,9 @@ -use ty::query::Providers; -use hir::def_id::DefId; -use hir; -use ty::TyCtxt; +use crate::ty::query::Providers; +use crate::hir::def_id::DefId; +use crate::hir; +use crate::ty::TyCtxt; use syntax_pos::symbol::Symbol; -use hir::map::blocks::FnLikeNode; +use crate::hir::map::blocks::FnLikeNode; use syntax::attr; impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { @@ -37,7 +37,7 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { } } - /// Returns true if this function must conform to `min_const_fn` + /// Returns `true` if this function must conform to `min_const_fn` pub fn is_min_const_fn(self, def_id: DefId) -> bool { // Bail out if the signature doesn't contain `const` if !self.is_const_fn_raw(def_id) { @@ -67,10 +67,10 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { pub fn provide<'tcx>(providers: &mut Providers<'tcx>) { /// only checks whether the function has a `const` modifier fn is_const_fn_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> bool { - let node_id = tcx.hir().as_local_node_id(def_id) - .expect("Non-local call to local provider is_const_fn"); + let hir_id = tcx.hir().as_local_hir_id(def_id) + .expect("Non-local call to local provider is_const_fn"); - if let Some(fn_like) = FnLikeNode::from_node(tcx.hir().get(node_id)) { + if let Some(fn_like) = FnLikeNode::from_node(tcx.hir().get_by_hir_id(hir_id)) { fn_like.constness() == hir::Constness::Const } else { false diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index e27d7349877fc..7dc4dee3fbf91 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -1,66 +1,58 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! type context book-keeping - -use dep_graph::DepGraph; -use dep_graph::{DepNode, DepConstructor}; +//! Type context book-keeping. + +use crate::arena::Arena; +use crate::dep_graph::DepGraph; +use crate::dep_graph::{self, DepNode, DepConstructor}; +use crate::session::Session; +use crate::session::config::{BorrowckMode, OutputFilenames}; +use crate::session::config::CrateType; +use crate::middle; +use crate::hir::{TraitCandidate, HirId, ItemKind, ItemLocalId, Node}; +use crate::hir::def::{Def, Export}; +use crate::hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE}; +use crate::hir::map as hir_map; +use crate::hir::map::DefPathHash; +use crate::lint::{self, Lint}; +use crate::ich::{StableHashingContext, NodeIdHashingMode}; +use crate::infer::canonical::{Canonical, CanonicalVarInfo, CanonicalVarInfos}; +use crate::infer::outlives::free_region_map::FreeRegionMap; +use crate::middle::cstore::CrateStoreDyn; +use crate::middle::cstore::EncodedMetadata; +use crate::middle::lang_items; +use crate::middle::resolve_lifetime::{self, ObjectLifetimeDefault}; +use crate::middle::stability; +use crate::mir::{self, Mir, interpret, ProjectionKind}; +use crate::mir::interpret::{ConstValue, Allocation}; +use crate::ty::subst::{Kind, InternalSubsts, SubstsRef, Subst}; +use crate::ty::ReprOptions; +use crate::traits; +use crate::traits::{Clause, Clauses, GoalKind, Goal, Goals}; +use crate::ty::{self, DefIdTree, Ty, TypeAndMut}; +use crate::ty::{TyS, TyKind, List}; +use crate::ty::{AdtKind, AdtDef, ClosureSubsts, GeneratorSubsts, Region, Const}; +use crate::ty::{PolyFnSig, InferTy, ParamTy, ProjectionTy, ExistentialPredicate, Predicate}; +use crate::ty::RegionKind; +use crate::ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid, ConstVid}; +use crate::ty::TyKind::*; +use crate::ty::{InferConst, ParamConst}; +use crate::ty::GenericParamDefKind; +use crate::ty::layout::{LayoutDetails, TargetDataLayout, VariantIdx}; +use crate::ty::query; +use crate::ty::steal::Steal; +use crate::ty::subst::{UserSubsts, UnpackedKind}; +use crate::ty::{BoundVar, BindingMode}; +use crate::ty::CanonicalPolyFnSig; +use crate::util::nodemap::{DefIdMap, DefIdSet, ItemLocalMap, ItemLocalSet}; +use crate::util::nodemap::{FxHashMap, FxHashSet}; use errors::DiagnosticBuilder; -use session::Session; -use session::config::{BorrowckMode, OutputFilenames}; -use session::config::CrateType; -use middle; -use hir::{TraitCandidate, HirId, ItemKind, ItemLocalId, Node}; -use hir::def::{Def, Export}; -use hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE}; -use hir::map as hir_map; -use hir::map::DefPathHash; -use lint::{self, Lint}; -use ich::{StableHashingContext, NodeIdHashingMode}; -use infer::canonical::{CanonicalVarInfo, CanonicalVarInfos}; -use infer::outlives::free_region_map::FreeRegionMap; -use middle::cstore::CrateStoreDyn; -use middle::cstore::EncodedMetadata; -use middle::lang_items; -use middle::resolve_lifetime::{self, ObjectLifetimeDefault}; -use middle::stability; -use mir::{self, Mir, interpret, ProjectionKind}; -use mir::interpret::Allocation; -use ty::subst::{CanonicalUserSubsts, Kind, Substs, Subst}; -use ty::ReprOptions; -use traits; -use traits::{Clause, Clauses, GoalKind, Goal, Goals}; -use ty::{self, Ty, TypeAndMut}; -use ty::{TyS, TyKind, List}; -use ty::{AdtKind, AdtDef, ClosureSubsts, GeneratorSubsts, Region, Const}; -use ty::{PolyFnSig, InferTy, ParamTy, ProjectionTy, ExistentialPredicate, Predicate}; -use ty::RegionKind; -use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid}; -use ty::TyKind::*; -use ty::GenericParamDefKind; -use ty::layout::{LayoutDetails, TargetDataLayout, VariantIdx}; -use ty::query; -use ty::steal::Steal; -use ty::BindingMode; -use ty::CanonicalTy; -use ty::CanonicalPolyFnSig; -use util::nodemap::{DefIdMap, DefIdSet, ItemLocalMap}; -use util::nodemap::{FxHashMap, FxHashSet}; use rustc_data_structures::interner::HashInterner; use smallvec::SmallVec; use rustc_data_structures::stable_hasher::{HashStable, hash_stable_hashmap, StableHasher, StableHasherResult, StableVec}; use arena::{TypedArena, SyncDroplessArena}; -use rustc_data_structures::indexed_vec::IndexVec; -use rustc_data_structures::sync::{self, Lrc, Lock, WorkerLocal}; +use rustc_data_structures::indexed_vec::{Idx, IndexVec}; +use rustc_data_structures::sync::{Lrc, Lock, WorkerLocal}; use std::any::Any; use std::borrow::Borrow; use std::cmp::Ordering; @@ -72,8 +64,10 @@ use std::ops::{Deref, Bound}; use std::iter; use std::sync::mpsc; use std::sync::Arc; +use std::marker::PhantomData; use rustc_target::spec::abi; -use syntax::ast::{self, NodeId}; +use rustc_macros::HashStable; +use syntax::ast; use syntax::attr; use syntax::source_map::MultiSpan; use syntax::edition::Edition; @@ -81,7 +75,7 @@ use syntax::feature_gate; use syntax::symbol::{Symbol, keywords, InternedString}; use syntax_pos::Span; -use hir; +use crate::hir; pub struct AllArenas<'tcx> { pub global: WorkerLocal>, @@ -124,16 +118,16 @@ pub struct CtxtInterners<'tcx> { /// they're accessed quite often. type_: InternedSet<'tcx, TyS<'tcx>>, type_list: InternedSet<'tcx, List>>, - substs: InternedSet<'tcx, Substs<'tcx>>, + substs: InternedSet<'tcx, InternalSubsts<'tcx>>, canonical_var_infos: InternedSet<'tcx, List>, region: InternedSet<'tcx, RegionKind>, existential_predicates: InternedSet<'tcx, List>>, predicates: InternedSet<'tcx, List>>, - const_: InternedSet<'tcx, Const<'tcx>>, clauses: InternedSet<'tcx, List>>, goal: InternedSet<'tcx, GoalKind<'tcx>>, goal_list: InternedSet<'tcx, List>>, - projs: InternedSet<'tcx, List>>, + projs: InternedSet<'tcx, List>, + const_: InternedSet<'tcx, Const<'tcx>>, } impl<'gcx: 'tcx, 'tcx> CtxtInterners<'tcx> { @@ -147,11 +141,11 @@ impl<'gcx: 'tcx, 'tcx> CtxtInterners<'tcx> { existential_predicates: Default::default(), canonical_var_infos: Default::default(), predicates: Default::default(), - const_: Default::default(), clauses: Default::default(), goal: Default::default(), goal_list: Default::default(), projs: Default::default(), + const_: Default::default(), } } @@ -177,7 +171,7 @@ impl<'gcx: 'tcx, 'tcx> CtxtInterners<'tcx> { // Make sure we don't end up with inference // types/regions in the global interner - if local as *const _ as usize == global as *const _ as usize { + if ptr_eq(local, global) { bug!("Attempted to intern `{:?}` which contains \ inference types/regions in the global type context", &ty_struct); @@ -226,6 +220,11 @@ pub struct CommonTypes<'tcx> { pub never: Ty<'tcx>, pub err: Ty<'tcx>, + /// Dummy type used for the `Self` of a `TraitRef` created for converting + /// a trait object, and which gets removed in `ExistentialTraitRef`. + /// This type must not appear anywhere in other converted types. + pub trait_object_dummy_self: Ty<'tcx>, + pub re_empty: Region<'tcx>, pub re_static: Region<'tcx>, pub re_erased: Region<'tcx>, @@ -323,6 +322,17 @@ impl<'a, V> LocalTableInContextMut<'a, V> { } } +/// All information necessary to validate and reveal an `impl Trait` or `existential Type` +#[derive(RustcEncodable, RustcDecodable, Debug, HashStable)] +pub struct ResolvedOpaqueTy<'tcx> { + /// The revealed type as seen by this function. + pub concrete_type: Ty<'tcx>, + /// Generic parameters on the opaque type as passed by this function. + /// For `existential type Foo; fn foo() -> Foo { .. }` this is `[T, U]`, not + /// `[A, B]` + pub substs: SubstsRef<'tcx>, +} + #[derive(RustcEncodable, RustcDecodable, Debug)] pub struct TypeckTables<'tcx> { /// The HirId::owner all ItemLocalIds in this table are relative to. @@ -338,37 +348,32 @@ pub struct TypeckTables<'tcx> { /// belongs, but it may not exist if it's a tuple field (`tuple.0`). field_indices: ItemLocalMap, - /// Stores the types for various nodes in the AST. Note that this table - /// is not guaranteed to be populated until after typeck. See + /// Stores the types for various nodes in the AST. Note that this table + /// is not guaranteed to be populated until after typeck. See /// typeck::check::fn_ctxt for details. node_types: ItemLocalMap>, /// Stores the type parameters which were substituted to obtain the type - /// of this node. This only applies to nodes that refer to entities + /// of this node. This only applies to nodes that refer to entities /// parameterized by type parameters, such as generic fns, types, or /// other items. - node_substs: ItemLocalMap<&'tcx Substs<'tcx>>, + node_substs: ItemLocalMap>, - /// Stores the canonicalized types provided by the user. See also - /// `AscribeUserType` statement in MIR. - user_provided_tys: ItemLocalMap>, + /// This will either store the canonicalized types provided by the user + /// or the substitutions that the user explicitly gave (if any) attached + /// to `id`. These will not include any inferred values. The canonical form + /// is used to capture things like `_` or other unspecified values. + /// + /// For example, if the user wrote `foo.collect::>()`, then the + /// canonical substitutions would include only `for { Vec }`. + /// + /// See also `AscribeUserType` statement in MIR. + user_provided_types: ItemLocalMap>, /// Stores the canonicalized types provided by the user. See also /// `AscribeUserType` statement in MIR. pub user_provided_sigs: DefIdMap>, - /// Stores the substitutions that the user explicitly gave (if any) - /// attached to `id`. These will not include any inferred - /// values. The canonical form is used to capture things like `_` - /// or other unspecified values. - /// - /// Example: - /// - /// If the user wrote `foo.collect::>()`, then the - /// canonical substitutions would include only `for { Vec - /// }`. - user_substs: ItemLocalMap>, - adjustments: ItemLocalMap>>, /// Stores the actual binding mode for all instances of hir::BindingAnnotation. @@ -409,9 +414,9 @@ pub struct TypeckTables<'tcx> { /// MIR construction and hence is not serialized to metadata. fru_field_types: ItemLocalMap>>, - /// Maps a cast expression to its kind. This is keyed on the - /// *from* expression of the cast, not the cast itself. - cast_kinds: ItemLocalMap, + /// For every coercion cast we add the HIR node ID of the cast + /// expression to this set. + coercion_casts: ItemLocalSet, /// Set of trait imports actually used in the method resolution. /// This is used for warning unused imports. During type @@ -424,13 +429,19 @@ pub struct TypeckTables<'tcx> { pub tainted_by_errors: bool, /// Stores the free-region relationships that were deduced from - /// its where clauses and parameter types. These are then + /// its where-clauses and parameter types. These are then /// read-again by borrowck. pub free_region_map: FreeRegionMap<'tcx>, /// All the existential types that are restricted to concrete types /// by this function - pub concrete_existential_types: FxHashMap>, + pub concrete_existential_types: FxHashMap>, + + /// Given the closure ID this map provides the list of UpvarIDs used by it. + /// The upvarID contains the HIR node ID and it also contains the full path + /// leading to the member of the struct or tuple that is used instead of the + /// entire variable. + pub upvar_list: ty::UpvarListMap, } impl<'tcx> TypeckTables<'tcx> { @@ -439,11 +450,10 @@ impl<'tcx> TypeckTables<'tcx> { local_id_root, type_dependent_defs: Default::default(), field_indices: Default::default(), - user_provided_tys: Default::default(), + user_provided_types: Default::default(), user_provided_sigs: Default::default(), node_types: Default::default(), node_substs: Default::default(), - user_substs: Default::default(), adjustments: Default::default(), pat_binding_modes: Default::default(), pat_adjustments: Default::default(), @@ -451,11 +461,12 @@ impl<'tcx> TypeckTables<'tcx> { closure_kind_origins: Default::default(), liberated_fn_sigs: Default::default(), fru_field_types: Default::default(), - cast_kinds: Default::default(), + coercion_casts: Default::default(), used_trait_imports: Lrc::new(Default::default()), tainted_by_errors: false, free_region_map: Default::default(), concrete_existential_types: Default::default(), + upvar_list: Default::default(), } } @@ -477,6 +488,15 @@ impl<'tcx> TypeckTables<'tcx> { } } + pub fn type_dependent_def(&self, id: HirId) -> Option { + validate_hir_id_for_typeck_tables(self.local_id_root, id, false); + self.type_dependent_defs.get(&id.local_id).cloned() + } + + pub fn type_dependent_def_id(&self, id: HirId) -> Option { + self.type_dependent_def(id).map(|def| def.def_id()) + } + pub fn type_dependent_defs_mut(&mut self) -> LocalTableInContextMut<'_, Def> { LocalTableInContextMut { local_id_root: self.local_id_root, @@ -498,17 +518,21 @@ impl<'tcx> TypeckTables<'tcx> { } } - pub fn user_provided_tys(&self) -> LocalTableInContext<'_, CanonicalTy<'tcx>> { + pub fn user_provided_types( + &self + ) -> LocalTableInContext<'_, CanonicalUserType<'tcx>> { LocalTableInContext { local_id_root: self.local_id_root, - data: &self.user_provided_tys + data: &self.user_provided_types } } - pub fn user_provided_tys_mut(&mut self) -> LocalTableInContextMut<'_, CanonicalTy<'tcx>> { + pub fn user_provided_types_mut( + &mut self + ) -> LocalTableInContextMut<'_, CanonicalUserType<'tcx>> { LocalTableInContextMut { local_id_root: self.local_id_root, - data: &mut self.user_provided_tys + data: &mut self.user_provided_types } } @@ -526,58 +550,43 @@ impl<'tcx> TypeckTables<'tcx> { } } - pub fn node_id_to_type(&self, id: hir::HirId) -> Ty<'tcx> { - self.node_id_to_type_opt(id).unwrap_or_else(|| - bug!("node_id_to_type: no type for node `{}`", - tls::with(|tcx| { - let id = tcx.hir().hir_to_node_id(id); - tcx.hir().node_to_string(id) - })) + pub fn node_type(&self, id: hir::HirId) -> Ty<'tcx> { + self.node_type_opt(id).unwrap_or_else(|| + bug!("node_type: no type for node `{}`", + tls::with(|tcx| tcx.hir().hir_to_string(id))) ) } - pub fn node_id_to_type_opt(&self, id: hir::HirId) -> Option> { + pub fn node_type_opt(&self, id: hir::HirId) -> Option> { validate_hir_id_for_typeck_tables(self.local_id_root, id, false); self.node_types.get(&id.local_id).cloned() } - pub fn node_substs_mut(&mut self) -> LocalTableInContextMut<'_, &'tcx Substs<'tcx>> { + pub fn node_substs_mut(&mut self) -> LocalTableInContextMut<'_, SubstsRef<'tcx>> { LocalTableInContextMut { local_id_root: self.local_id_root, data: &mut self.node_substs } } - pub fn node_substs(&self, id: hir::HirId) -> &'tcx Substs<'tcx> { + pub fn node_substs(&self, id: hir::HirId) -> SubstsRef<'tcx> { validate_hir_id_for_typeck_tables(self.local_id_root, id, false); - self.node_substs.get(&id.local_id).cloned().unwrap_or_else(|| Substs::empty()) + self.node_substs.get(&id.local_id).cloned().unwrap_or_else(|| InternalSubsts::empty()) } - pub fn node_substs_opt(&self, id: hir::HirId) -> Option<&'tcx Substs<'tcx>> { + pub fn node_substs_opt(&self, id: hir::HirId) -> Option> { validate_hir_id_for_typeck_tables(self.local_id_root, id, false); self.node_substs.get(&id.local_id).cloned() } - pub fn user_substs_mut(&mut self) -> LocalTableInContextMut<'_, CanonicalUserSubsts<'tcx>> { - LocalTableInContextMut { - local_id_root: self.local_id_root, - data: &mut self.user_substs - } - } - - pub fn user_substs(&self, id: hir::HirId) -> Option> { - validate_hir_id_for_typeck_tables(self.local_id_root, id, false); - self.user_substs.get(&id.local_id).cloned() - } - // Returns the type of a pattern as a monotype. Like @expr_ty, this function // doesn't provide type parameter substitutions. pub fn pat_ty(&self, pat: &hir::Pat) -> Ty<'tcx> { - self.node_id_to_type(pat.hir_id) + self.node_type(pat.hir_id) } pub fn pat_ty_opt(&self, pat: &hir::Pat) -> Option> { - self.node_id_to_type_opt(pat.hir_id) + self.node_type_opt(pat.hir_id) } // Returns the type of an expression as a monotype. @@ -591,11 +600,11 @@ impl<'tcx> TypeckTables<'tcx> { // ask for the type of "id" in "id(3)", it will return "fn(&isize) -> isize" // instead of "fn(ty) -> T with T = isize". pub fn expr_ty(&self, expr: &hir::Expr) -> Ty<'tcx> { - self.node_id_to_type(expr.hir_id) + self.node_type(expr.hir_id) } pub fn expr_ty_opt(&self, expr: &hir::Expr) -> Option> { - self.node_id_to_type_opt(expr.hir_id) + self.node_type_opt(expr.hir_id) } pub fn adjustments(&self) -> LocalTableInContext<'_, Vec>> { @@ -723,19 +732,19 @@ impl<'tcx> TypeckTables<'tcx> { } } - pub fn cast_kinds(&self) -> LocalTableInContext<'_, ty::cast::CastKind> { - LocalTableInContext { - local_id_root: self.local_id_root, - data: &self.cast_kinds - } + pub fn is_coercion_cast(&self, hir_id: hir::HirId) -> bool { + validate_hir_id_for_typeck_tables(self.local_id_root, hir_id, true); + self.coercion_casts.contains(&hir_id.local_id) } - pub fn cast_kinds_mut(&mut self) -> LocalTableInContextMut<'_, ty::cast::CastKind> { - LocalTableInContextMut { - local_id_root: self.local_id_root, - data: &mut self.cast_kinds - } + pub fn set_coercion_cast(&mut self, id: ItemLocalId) { + self.coercion_casts.insert(id); + } + + pub fn coercion_casts(&self) -> &ItemLocalSet { + &self.coercion_casts } + } impl<'a, 'gcx> HashStable> for TypeckTables<'gcx> { @@ -746,11 +755,10 @@ impl<'a, 'gcx> HashStable> for TypeckTables<'gcx> { local_id_root, ref type_dependent_defs, ref field_indices, - ref user_provided_tys, + ref user_provided_types, ref user_provided_sigs, ref node_types, ref node_substs, - ref user_substs, ref adjustments, ref pat_binding_modes, ref pat_adjustments, @@ -759,22 +767,23 @@ impl<'a, 'gcx> HashStable> for TypeckTables<'gcx> { ref liberated_fn_sigs, ref fru_field_types, - ref cast_kinds, + ref coercion_casts, ref used_trait_imports, tainted_by_errors, ref free_region_map, ref concrete_existential_types, + ref upvar_list, + } = *self; hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| { type_dependent_defs.hash_stable(hcx, hasher); field_indices.hash_stable(hcx, hasher); - user_provided_tys.hash_stable(hcx, hasher); + user_provided_types.hash_stable(hcx, hasher); user_provided_sigs.hash_stable(hcx, hasher); node_types.hash_stable(hcx, hasher); node_substs.hash_stable(hcx, hasher); - user_substs.hash_stable(hcx, hasher); adjustments.hash_stable(hcx, hasher); pat_binding_modes.hash_stable(hcx, hasher); pat_adjustments.hash_stable(hcx, hasher); @@ -803,15 +812,125 @@ impl<'a, 'gcx> HashStable> for TypeckTables<'gcx> { closure_kind_origins.hash_stable(hcx, hasher); liberated_fn_sigs.hash_stable(hcx, hasher); fru_field_types.hash_stable(hcx, hasher); - cast_kinds.hash_stable(hcx, hasher); + coercion_casts.hash_stable(hcx, hasher); used_trait_imports.hash_stable(hcx, hasher); tainted_by_errors.hash_stable(hcx, hasher); free_region_map.hash_stable(hcx, hasher); concrete_existential_types.hash_stable(hcx, hasher); + upvar_list.hash_stable(hcx, hasher); }) } } +newtype_index! { + pub struct UserTypeAnnotationIndex { + derive [HashStable] + DEBUG_FORMAT = "UserType({})", + const START_INDEX = 0, + } +} + +/// Mapping of type annotation indices to canonical user type annotations. +pub type CanonicalUserTypeAnnotations<'tcx> = + IndexVec>; + +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] +pub struct CanonicalUserTypeAnnotation<'tcx> { + pub user_ty: CanonicalUserType<'tcx>, + pub span: Span, + pub inferred_ty: Ty<'tcx>, +} + +BraceStructTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for CanonicalUserTypeAnnotation<'tcx> { + user_ty, span, inferred_ty + } +} + +BraceStructLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for CanonicalUserTypeAnnotation<'a> { + type Lifted = CanonicalUserTypeAnnotation<'tcx>; + user_ty, span, inferred_ty + } +} + + +/// Canonicalized user type annotation. +pub type CanonicalUserType<'gcx> = Canonical<'gcx, UserType<'gcx>>; + +impl CanonicalUserType<'gcx> { + /// Returns `true` if this represents a substitution of the form `[?0, ?1, ?2]`, + /// i.e., each thing is mapped to a canonical variable with the same index. + pub fn is_identity(&self) -> bool { + match self.value { + UserType::Ty(_) => false, + UserType::TypeOf(_, user_substs) => { + if user_substs.user_self_ty.is_some() { + return false; + } + + user_substs.substs.iter().zip(BoundVar::new(0)..).all(|(kind, cvar)| { + match kind.unpack() { + UnpackedKind::Type(ty) => match ty.sty { + ty::Bound(debruijn, b) => { + // We only allow a `ty::INNERMOST` index in substitutions. + assert_eq!(debruijn, ty::INNERMOST); + cvar == b.var + } + _ => false, + }, + + UnpackedKind::Lifetime(r) => match r { + ty::ReLateBound(debruijn, br) => { + // We only allow a `ty::INNERMOST` index in substitutions. + assert_eq!(*debruijn, ty::INNERMOST); + cvar == br.assert_bound_var() + } + _ => false, + }, + + UnpackedKind::Const(ct) => match ct.val { + ConstValue::Infer(InferConst::Canonical(debruijn, b)) => { + // We only allow a `ty::INNERMOST` index in substitutions. + assert_eq!(debruijn, ty::INNERMOST); + cvar == b + } + _ => false, + }, + } + }) + }, + } + } +} + +/// A user-given type annotation attached to a constant. These arise +/// from constants that are named via paths, like `Foo::::new` and +/// so forth. +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] +pub enum UserType<'tcx> { + Ty(Ty<'tcx>), + + /// The canonical type is the result of `type_of(def_id)` with the + /// given substitutions applied. + TypeOf(DefId, UserSubsts<'tcx>), +} + +EnumTypeFoldableImpl! { + impl<'tcx> TypeFoldable<'tcx> for UserType<'tcx> { + (UserType::Ty)(ty), + (UserType::TypeOf)(def, substs), + } +} + +EnumLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for UserType<'a> { + type Lifted = UserType<'tcx>; + (UserType::Ty)(ty), + (UserType::TypeOf)(def, substs), + } +} + impl<'tcx> CommonTypes<'tcx> { fn new(interners: &CtxtInterners<'tcx>) -> CommonTypes<'tcx> { let mk = |sty| CtxtInterners::intern_ty(interners, interners, sty); @@ -842,6 +961,8 @@ impl<'tcx> CommonTypes<'tcx> { f32: mk(Float(ast::FloatTy::F32)), f64: mk(Float(ast::FloatTy::F64)), + trait_object_dummy_self: mk(Infer(ty::FreshTy(0))), + re_empty: mk_region(RegionKind::ReEmpty), re_static: mk_region(RegionKind::ReStatic), re_erased: mk_region(RegionKind::ReErased), @@ -869,12 +990,13 @@ pub struct FreeRegionInfo { /// [rustc guide]: https://rust-lang.github.io/rustc-guide/ty.html #[derive(Copy, Clone)] pub struct TyCtxt<'a, 'gcx: 'tcx, 'tcx: 'a> { - gcx: &'a GlobalCtxt<'gcx>, - interners: &'a CtxtInterners<'tcx> + gcx: &'gcx GlobalCtxt<'gcx>, + interners: &'tcx CtxtInterners<'tcx>, + dummy: PhantomData<&'a ()>, } -impl<'a, 'gcx, 'tcx> Deref for TyCtxt<'a, 'gcx, 'tcx> { - type Target = &'a GlobalCtxt<'gcx>; +impl<'gcx> Deref for TyCtxt<'_, 'gcx, '_> { + type Target = &'gcx GlobalCtxt<'gcx>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.gcx @@ -882,6 +1004,7 @@ impl<'a, 'gcx, 'tcx> Deref for TyCtxt<'a, 'gcx, 'tcx> { } pub struct GlobalCtxt<'tcx> { + pub arena: WorkerLocal>, global_arenas: &'tcx WorkerLocal>, global_interners: CtxtInterners<'tcx>, @@ -909,7 +1032,7 @@ pub struct GlobalCtxt<'tcx> { /// as well as all upstream crates. Only populated in incremental mode. pub def_path_hash_to_def_id: Option>, - pub(crate) queries: query::Queries<'tcx>, + pub queries: query::Queries<'tcx>, // Records the free variables referenced by every closure // expression. Do not track deps for this, just recompute it from @@ -918,6 +1041,9 @@ pub struct GlobalCtxt<'tcx> { maybe_unused_trait_imports: FxHashSet, maybe_unused_extern_crates: Vec<(DefId, Span)>, + /// A map of glob use to a set of names it actually imports. Currently only + /// used in save-analysis. + glob_map: FxHashMap>, /// Extern prelude entries. The value is `true` if the entry was introduced /// via `extern crate` item and not `--extern` option or compiler built-in. pub extern_prelude: FxHashMap, @@ -962,12 +1088,13 @@ pub struct GlobalCtxt<'tcx> { } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { - /// Get the global TyCtxt. + /// Gets the global `TyCtxt`. #[inline] - pub fn global_tcx(self) -> TyCtxt<'a, 'gcx, 'gcx> { + pub fn global_tcx(self) -> TyCtxt<'gcx, 'gcx, 'gcx> { TyCtxt { gcx: self.gcx, interners: &self.gcx.global_interners, + dummy: PhantomData, } } @@ -1006,36 +1133,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.global_arenas.adt_def.alloc(def) } - pub fn alloc_byte_array(self, bytes: &[u8]) -> &'gcx [u8] { - if bytes.is_empty() { - &[] - } else { - self.global_interners.arena.alloc_slice(bytes) - } - } - - pub fn alloc_const_slice(self, values: &[&'tcx ty::Const<'tcx>]) - -> &'tcx [&'tcx ty::Const<'tcx>] { - if values.is_empty() { - &[] - } else { - self.interners.arena.alloc_slice(values) - } - } - - pub fn alloc_name_const_slice(self, values: &[(ast::Name, &'tcx ty::Const<'tcx>)]) - -> &'tcx [(ast::Name, &'tcx ty::Const<'tcx>)] { - if values.is_empty() { - &[] - } else { - self.interners.arena.alloc_slice(values) - } - } - - pub fn intern_const_alloc( - self, - alloc: Allocation, - ) -> &'gcx Allocation { + pub fn intern_const_alloc(self, alloc: Allocation) -> &'gcx Allocation { self.allocation_interner.borrow_mut().intern(alloc, |alloc| { self.global_arenas.const_allocs.alloc(alloc) }) @@ -1090,31 +1188,28 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { value.lift_to_tcx(self.global_tcx()) } - /// Returns true if self is the same as self.global_tcx(). + /// Returns `true` if self is the same as self.global_tcx(). fn is_global(self) -> bool { - let local = self.interners as *const _; - let global = &self.global_interners as *const _; - local as usize == global as usize + ptr_eq(self.interners, &self.global_interners) } - /// Create a type context and call the closure with a `TyCtxt` reference + /// Creates a type context and call the closure with a `TyCtxt` reference /// to the context. The closure enforces that the type context and any interned /// value (types, substs, etc.) can only be used while `ty::tls` has a valid /// reference to the context, to allow formatting values that need it. - pub fn create_and_enter(s: &'tcx Session, - cstore: &'tcx CrateStoreDyn, - local_providers: ty::query::Providers<'tcx>, - extern_providers: ty::query::Providers<'tcx>, - arenas: &'tcx AllArenas<'tcx>, - resolutions: ty::Resolutions, - hir: hir_map::Map<'tcx>, - on_disk_query_result_cache: query::OnDiskCache<'tcx>, - crate_name: &str, - tx: mpsc::Sender>, - output_filenames: &OutputFilenames, - f: F) -> R - where F: for<'b> FnOnce(TyCtxt<'b, 'tcx, 'tcx>) -> R - { + pub fn create_global_ctxt( + s: &'tcx Session, + cstore: &'tcx CrateStoreDyn, + local_providers: ty::query::Providers<'tcx>, + extern_providers: ty::query::Providers<'tcx>, + arenas: &'tcx AllArenas<'tcx>, + resolutions: ty::Resolutions, + hir: hir_map::Map<'tcx>, + on_disk_query_result_cache: query::OnDiskCache<'tcx>, + crate_name: &str, + tx: mpsc::Sender>, + output_filenames: &OutputFilenames, + ) -> GlobalCtxt<'tcx> { let data_layout = TargetDataLayout::parse(&s.target.target).unwrap_or_else(|err| { s.fatal(&err); }); @@ -1166,9 +1261,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { Lrc::new(StableVec::new(v))); } - let gcx = &GlobalCtxt { + GlobalCtxt { sess: s, cstore, + arena: WorkerLocal::new(|_| Arena::default()), global_arenas: &arenas.global, global_interners: interners, dep_graph, @@ -1190,6 +1286,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { .into_iter() .map(|(id, sp)| (hir.local_def_id(id), sp)) .collect(), + glob_map: resolutions.glob_map.into_iter().map(|(id, names)| { + (hir.local_def_id(id), names) + }).collect(), extern_prelude: resolutions.extern_prelude, hir_map: hir, def_path_hash_to_def_id, @@ -1209,11 +1308,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { alloc_map: Lock::new(interpret::AllocMap::new()), tx_to_llvm_workers: Lock::new(tx), output_filenames: Arc::new(output_filenames.clone()), - }; - - sync::assert_send_val(&gcx); - - tls::enter_global(gcx, f) + } } pub fn consider_optimizing String>(&self, msg: T) -> bool { @@ -1283,7 +1378,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - /// Convert a `DefId` into its fully expanded `DefPath` (every + /// Converts a `DefId` into its fully expanded `DefPath` (every /// `DefId` is really just an interned def-path). /// /// Note that if `id` is not local to this crate, the result will @@ -1336,8 +1431,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.cstore.crate_data_as_rc_any(cnum) } + #[inline(always)] pub fn create_stable_hashing_context(self) -> StableHashingContext<'a> { - let krate = self.dep_graph.with_ignore(|| self.hir().krate()); + let krate = self.gcx.hir_map.forest.untracked_krate(); StableHashingContext::new(self.sess, krate, @@ -1359,21 +1455,12 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.dep_graph.with_task(dep_node, self, crate_hash, - |_, x| x // No transformation needed + |_, x| x, // No transformation needed + dep_graph::hash_result, ); } } - // This method exercises the `in_scope_traits_map` query for all possible - // values so that we have their fingerprints available in the DepGraph. - // This is only required as long as we still use the old dependency tracking - // which needs to have the fingerprints of all input nodes beforehand. - pub fn precompute_in_scope_traits_hashes(self) { - for &def_index in self.trait_map.keys() { - self.in_scope_traits_map(def_index); - } - } - pub fn serialize_query_result_cache(self, encoder: &mut E) -> Result<(), E::Error> @@ -1523,16 +1610,16 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { let (suitable_region_binding_scope, bound_region) = match *region { ty::ReFree(ref free_region) => (free_region.scope, free_region.bound_region), ty::ReEarlyBound(ref ebr) => ( - self.parent_def_id(ebr.def_id).unwrap(), + self.parent(ebr.def_id).unwrap(), ty::BoundRegion::BrNamed(ebr.def_id, ebr.name), ), _ => return None, // not a free region }; - let node_id = self.hir() - .as_local_node_id(suitable_region_binding_scope) + let hir_id = self.hir() + .as_local_hir_id(suitable_region_binding_scope) .unwrap(); - let is_impl_item = match self.hir().find(node_id) { + let is_impl_item = match self.hir().find_by_hir_id(hir_id) { Some(Node::Item(..)) | Some(Node::TraitItem(..)) => false, Some(Node::ImplItem(..)) => { self.is_bound_region_in_impl_item(suitable_region_binding_scope) @@ -1552,8 +1639,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { scope_def_id: DefId, ) -> Option> { // HACK: `type_of_def_id()` will fail on these (#55796), so return None - let node_id = self.hir().as_local_node_id(scope_def_id).unwrap(); - match self.hir().get(node_id) { + let hir_id = self.hir().as_local_hir_id(scope_def_id).unwrap(); + match self.hir().get_by_hir_id(hir_id) { Node::Item(item) => { match item.node { ItemKind::Fn(..) => { /* type_of_def_id() will work */ } @@ -1599,6 +1686,12 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } false } + + /// Determine whether identifiers in the assembly have strict naming rules. + /// Currently, only NVPTX* targets need it. + pub fn has_strict_asm_symbol_naming(&self) -> bool { + self.gcx.sess.target.target.arch.contains("nvptx") + } } impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { @@ -1609,30 +1702,36 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { } } -impl<'gcx: 'tcx, 'tcx> GlobalCtxt<'gcx> { +impl<'gcx> GlobalCtxt<'gcx> { /// Call the closure with a local `TyCtxt` using the given arena. - pub fn enter_local( - &self, + /// `interners` is a slot passed so we can create a CtxtInterners + /// with the same lifetime as `arena`. + pub fn enter_local<'tcx, F, R>( + &'gcx self, arena: &'tcx SyncDroplessArena, + interners: &'tcx mut Option>, f: F ) -> R where - F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R + F: FnOnce(TyCtxt<'tcx, 'gcx, 'tcx>) -> R, + 'gcx: 'tcx, { - let interners = CtxtInterners::new(arena); + *interners = Some(CtxtInterners::new(&arena)); let tcx = TyCtxt { gcx: self, - interners: &interners, + interners: interners.as_ref().unwrap(), + dummy: PhantomData, }; ty::tls::with_related_context(tcx.global_tcx(), |icx| { let new_icx = ty::tls::ImplicitCtxt { tcx, query: icx.query.clone(), + diagnostics: icx.diagnostics, layout_depth: icx.layout_depth, - task: icx.task, + task_deps: icx.task_deps, }; - ty::tls::enter_context(&new_icx, |new_icx| { - f(new_icx.tcx) + ty::tls::enter_context(&new_icx, |_| { + f(tcx) }) }) } @@ -1641,7 +1740,7 @@ impl<'gcx: 'tcx, 'tcx> GlobalCtxt<'gcx> { /// A trait implemented for all X<'a> types which can be safely and /// efficiently converted to X<'tcx> as long as they are part of the /// provided TyCtxt<'tcx>. -/// This can be done, for example, for Ty<'tcx> or &'tcx Substs<'tcx> +/// This can be done, for example, for Ty<'tcx> or SubstsRef<'tcx> /// by looking them up in their respective interners. /// /// However, this is still not the best implementation as it does @@ -1660,230 +1759,91 @@ pub trait Lift<'tcx>: fmt::Debug { fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option; } -impl<'a, 'tcx> Lift<'tcx> for Ty<'a> { - type Lifted = Ty<'tcx>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option> { - if tcx.interners.arena.in_arena(*self as *const _) { - return Some(unsafe { mem::transmute(*self) }); - } - // Also try in the global tcx if we're not that. - if !tcx.is_global() { - self.lift_to_tcx(tcx.global_tcx()) - } else { - None - } - } -} - -impl<'a, 'tcx> Lift<'tcx> for Region<'a> { - type Lifted = Region<'tcx>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option> { - if tcx.interners.arena.in_arena(*self as *const _) { - return Some(unsafe { mem::transmute(*self) }); - } - // Also try in the global tcx if we're not that. - if !tcx.is_global() { - self.lift_to_tcx(tcx.global_tcx()) - } else { - None - } - } -} - -impl<'a, 'tcx> Lift<'tcx> for Goal<'a> { - type Lifted = Goal<'tcx>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option> { - if tcx.interners.arena.in_arena(*self as *const _) { - return Some(unsafe { mem::transmute(*self) }); - } - // Also try in the global tcx if we're not that. - if !tcx.is_global() { - self.lift_to_tcx(tcx.global_tcx()) - } else { - None - } - } -} - -impl<'a, 'tcx> Lift<'tcx> for &'a List> { - type Lifted = &'tcx List>; - fn lift_to_tcx<'b, 'gcx>( - &self, - tcx: TyCtxt<'b, 'gcx, 'tcx>, - ) -> Option<&'tcx List>> { - if tcx.interners.arena.in_arena(*self as *const _) { - return Some(unsafe { mem::transmute(*self) }); - } - // Also try in the global tcx if we're not that. - if !tcx.is_global() { - self.lift_to_tcx(tcx.global_tcx()) - } else { - None - } - } -} - -impl<'a, 'tcx> Lift<'tcx> for &'a List> { - type Lifted = &'tcx List>; - fn lift_to_tcx<'b, 'gcx>( - &self, - tcx: TyCtxt<'b, 'gcx, 'tcx>, - ) -> Option<&'tcx List>> { - if tcx.interners.arena.in_arena(*self as *const _) { - return Some(unsafe { mem::transmute(*self) }); - } - // Also try in the global tcx if we're not that. - if !tcx.is_global() { - self.lift_to_tcx(tcx.global_tcx()) - } else { - None - } - } -} - -impl<'a, 'tcx> Lift<'tcx> for &'a Const<'a> { - type Lifted = &'tcx Const<'tcx>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<&'tcx Const<'tcx>> { - if tcx.interners.arena.in_arena(*self as *const _) { - return Some(unsafe { mem::transmute(*self) }); - } - // Also try in the global tcx if we're not that. - if !tcx.is_global() { - self.lift_to_tcx(tcx.global_tcx()) - } else { - None - } - } -} -impl<'a, 'tcx> Lift<'tcx> for &'a Substs<'a> { - type Lifted = &'tcx Substs<'tcx>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option<&'tcx Substs<'tcx>> { - if self.len() == 0 { - return Some(List::empty()); - } - if tcx.interners.arena.in_arena(&self[..] as *const _) { - return Some(unsafe { mem::transmute(*self) }); - } - // Also try in the global tcx if we're not that. - if !tcx.is_global() { - self.lift_to_tcx(tcx.global_tcx()) - } else { - None +macro_rules! nop_lift { + ($ty:ty => $lifted:ty) => { + impl<'a, 'tcx> Lift<'tcx> for $ty { + type Lifted = $lifted; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + if tcx.interners.arena.in_arena(*self as *const _) { + return Some(unsafe { mem::transmute(*self) }); + } + // Also try in the global tcx if we're not that. + if !tcx.is_global() { + self.lift_to_tcx(tcx.global_tcx()) + } else { + None + } + } } - } + }; } -impl<'a, 'tcx> Lift<'tcx> for &'a List> { - type Lifted = &'tcx List>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) - -> Option<&'tcx List>> { - if self.len() == 0 { - return Some(List::empty()); - } - if tcx.interners.arena.in_arena(*self as *const _) { - return Some(unsafe { mem::transmute(*self) }); - } - // Also try in the global tcx if we're not that. - if !tcx.is_global() { - self.lift_to_tcx(tcx.global_tcx()) - } else { - None +macro_rules! nop_list_lift { + ($ty:ty => $lifted:ty) => { + impl<'a, 'tcx> Lift<'tcx> for &'a List<$ty> { + type Lifted = &'tcx List<$lifted>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + if self.is_empty() { + return Some(List::empty()); + } + if tcx.interners.arena.in_arena(*self as *const _) { + return Some(unsafe { mem::transmute(*self) }); + } + // Also try in the global tcx if we're not that. + if !tcx.is_global() { + self.lift_to_tcx(tcx.global_tcx()) + } else { + None + } + } } - } + }; } -impl<'a, 'tcx> Lift<'tcx> for &'a List> { - type Lifted = &'tcx List>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) - -> Option<&'tcx List>> { - if self.is_empty() { - return Some(List::empty()); - } - if tcx.interners.arena.in_arena(*self as *const _) { - return Some(unsafe { mem::transmute(*self) }); - } - // Also try in the global tcx if we're not that. - if !tcx.is_global() { - self.lift_to_tcx(tcx.global_tcx()) - } else { - None - } - } -} +nop_lift!{Ty<'a> => Ty<'tcx>} +nop_lift!{Region<'a> => Region<'tcx>} +nop_lift!{Goal<'a> => Goal<'tcx>} +nop_lift!{&'a Const<'a> => &'tcx Const<'tcx>} -impl<'a, 'tcx> Lift<'tcx> for &'a List> { - type Lifted = &'tcx List>; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) - -> Option<&'tcx List>> { - if self.is_empty() { - return Some(List::empty()); - } - if tcx.interners.arena.in_arena(*self as *const _) { - return Some(unsafe { mem::transmute(*self) }); - } - // Also try in the global tcx if we're not that. - if !tcx.is_global() { - self.lift_to_tcx(tcx.global_tcx()) - } else { - None - } - } -} +nop_list_lift!{Goal<'a> => Goal<'tcx>} +nop_list_lift!{Clause<'a> => Clause<'tcx>} +nop_list_lift!{Ty<'a> => Ty<'tcx>} +nop_list_lift!{ExistentialPredicate<'a> => ExistentialPredicate<'tcx>} +nop_list_lift!{Predicate<'a> => Predicate<'tcx>} +nop_list_lift!{CanonicalVarInfo => CanonicalVarInfo} +nop_list_lift!{ProjectionKind => ProjectionKind} -impl<'a, 'tcx> Lift<'tcx> for &'a List { - type Lifted = &'tcx List; - fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { - if self.len() == 0 { - return Some(List::empty()); - } - if tcx.interners.arena.in_arena(*self as *const _) { - return Some(unsafe { mem::transmute(*self) }); - } - // Also try in the global tcx if we're not that. - if !tcx.is_global() { - self.lift_to_tcx(tcx.global_tcx()) - } else { - None - } - } -} +// this is the impl for `&'a InternalSubsts<'a>` +nop_list_lift!{Kind<'a> => Kind<'tcx>} -impl<'a, 'tcx> Lift<'tcx> for &'a List> { - type Lifted = &'tcx List>; +impl<'a, 'tcx> Lift<'tcx> for &'a mir::interpret::Allocation { + type Lifted = &'tcx mir::interpret::Allocation; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { - if self.len() == 0 { - return Some(List::empty()); - } - if tcx.interners.arena.in_arena(*self as *const _) { - return Some(unsafe { mem::transmute(*self) }); - } - // Also try in the global tcx if we're not that. - if !tcx.is_global() { - self.lift_to_tcx(tcx.global_tcx()) - } else { - None - } + assert!(tcx.global_arenas.const_allocs.in_arena(*self as *const _)); + Some(unsafe { mem::transmute(*self) }) } } pub mod tls { - use super::{GlobalCtxt, TyCtxt}; + use super::{GlobalCtxt, TyCtxt, ptr_eq}; use std::fmt; use std::mem; + use std::marker::PhantomData; use syntax_pos; - use ty::query; + use crate::ty::query; use errors::{Diagnostic, TRACK_DIAGNOSTICS}; use rustc_data_structures::OnDrop; use rustc_data_structures::sync::{self, Lrc, Lock}; - use dep_graph::OpenTask; + use rustc_data_structures::thin_vec::ThinVec; + use crate::dep_graph::TaskDeps; - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] use std::cell::Cell; - #[cfg(parallel_queries)] - use rayon_core; + #[cfg(parallel_compiler)] + use rustc_rayon_core as rayon_core; /// This is the implicit state of rustc. It contains the current /// TyCtxt and query. It is updated when creating a local interner or @@ -1891,46 +1851,55 @@ pub mod tls { /// you should also have access to an ImplicitCtxt through the functions /// in this module. #[derive(Clone)] - pub struct ImplicitCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + pub struct ImplicitCtxt<'a, 'gcx: 'tcx, 'tcx> { /// The current TyCtxt. Initially created by `enter_global` and updated /// by `enter_local` with a new local interner - pub tcx: TyCtxt<'a, 'gcx, 'tcx>, + pub tcx: TyCtxt<'tcx, 'gcx, 'tcx>, - /// The current query job, if any. This is updated by start_job in + /// The current query job, if any. This is updated by JobOwner::start in /// ty::query::plumbing when executing a query pub query: Option>>, + /// Where to store diagnostics for the current query job, if any. + /// This is updated by JobOwner::start in ty::query::plumbing when executing a query + pub diagnostics: Option<&'a Lock>>, + /// Used to prevent layout from recursing too deeply. pub layout_depth: usize, /// The current dep graph task. This is used to add dependencies to queries /// when executing them - pub task: &'a OpenTask, + pub task_deps: Option<&'a Lock>, } /// Sets Rayon's thread local variable which is preserved for Rayon jobs /// to `value` during the call to `f`. It is restored to its previous value after. /// This is used to set the pointer to the new ImplicitCtxt. - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] + #[inline] fn set_tlv R, R>(value: usize, f: F) -> R { rayon_core::tlv::with(value, f) } /// Gets Rayon's thread local variable which is preserved for Rayon jobs. /// This is used to get the pointer to the current ImplicitCtxt. - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] + #[inline] fn get_tlv() -> usize { rayon_core::tlv::get() } - /// A thread local variable which stores a pointer to the current ImplicitCtxt - #[cfg(not(parallel_queries))] - thread_local!(static TLV: Cell = Cell::new(0)); + #[cfg(not(parallel_compiler))] + thread_local! { + /// A thread local variable which stores a pointer to the current ImplicitCtxt. + static TLV: Cell = Cell::new(0); + } /// Sets TLV to `value` during the call to `f`. /// It is restored to its previous value after. /// This is used to set the pointer to the new ImplicitCtxt. - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] + #[inline] fn set_tlv R, R>(value: usize, f: F) -> R { let old = get_tlv(); let _reset = OnDrop(move || TLV.with(|tlv| tlv.set(old))); @@ -1939,7 +1908,7 @@ pub mod tls { } /// This is used to get the pointer to the current ImplicitCtxt. - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] fn get_tlv() -> usize { TLV.with(|tlv| tlv.get()) } @@ -1962,8 +1931,9 @@ pub mod tls { fn track_diagnostic(diagnostic: &Diagnostic) { with_context_opt(|icx| { if let Some(icx) = icx { - if let Some(ref query) = icx.query { - query.diagnostics.lock().push(diagnostic.clone()); + if let Some(ref diagnostics) = icx.diagnostics { + let mut diagnostics = diagnostics.lock(); + diagnostics.extend(Some(diagnostic.clone())); } } }) @@ -1995,6 +1965,7 @@ pub mod tls { } /// Sets `context` as the new current ImplicitCtxt for the duration of the function `f` + #[inline] pub fn enter_context<'a, 'gcx: 'tcx, 'tcx, F, R>(context: &ImplicitCtxt<'a, 'gcx, 'tcx>, f: F) -> R where F: FnOnce(&ImplicitCtxt<'a, 'gcx, 'tcx>) -> R @@ -2008,39 +1979,40 @@ pub mod tls { /// creating a initial TyCtxt and ImplicitCtxt. /// This happens once per rustc session and TyCtxts only exists /// inside the `f` function. - pub fn enter_global<'gcx, F, R>(gcx: &GlobalCtxt<'gcx>, f: F) -> R - where F: for<'a> FnOnce(TyCtxt<'a, 'gcx, 'gcx>) -> R + pub fn enter_global<'gcx, F, R>(gcx: &'gcx GlobalCtxt<'gcx>, f: F) -> R + where F: FnOnce(TyCtxt<'gcx, 'gcx, 'gcx>) -> R { - with_thread_locals(|| { - // Update GCX_PTR to indicate there's a GlobalCtxt available - GCX_PTR.with(|lock| { - *lock.lock() = gcx as *const _ as usize; - }); - // Set GCX_PTR back to 0 when we exit - let _on_drop = OnDrop(move || { - GCX_PTR.with(|lock| *lock.lock() = 0); - }); + // Update GCX_PTR to indicate there's a GlobalCtxt available + GCX_PTR.with(|lock| { + *lock.lock() = gcx as *const _ as usize; + }); + // Set GCX_PTR back to 0 when we exit + let _on_drop = OnDrop(move || { + GCX_PTR.with(|lock| *lock.lock() = 0); + }); - let tcx = TyCtxt { - gcx, - interners: &gcx.global_interners, - }; - let icx = ImplicitCtxt { - tcx, - query: None, - layout_depth: 0, - task: &OpenTask::Ignore, - }; - enter_context(&icx, |_| { - f(tcx) - }) + let tcx = TyCtxt { + gcx, + interners: &gcx.global_interners, + dummy: PhantomData, + }; + let icx = ImplicitCtxt { + tcx, + query: None, + diagnostics: None, + layout_depth: 0, + task_deps: None, + }; + enter_context(&icx, |_| { + f(tcx) }) } - /// Stores a pointer to the GlobalCtxt if one is available. - /// This is used to access the GlobalCtxt in the deadlock handler - /// given to Rayon. - scoped_thread_local!(pub static GCX_PTR: Lock); + scoped_thread_local! { + /// Stores a pointer to the GlobalCtxt if one is available. + /// This is used to access the GlobalCtxt in the deadlock handler given to Rayon. + pub static GCX_PTR: Lock + } /// Creates a TyCtxt and ImplicitCtxt based on the GCX_PTR thread local. /// This is used in the deadlock handler. @@ -2053,17 +2025,20 @@ pub mod tls { let tcx = TyCtxt { gcx, interners: &gcx.global_interners, + dummy: PhantomData, }; let icx = ImplicitCtxt { query: None, + diagnostics: None, tcx, layout_depth: 0, - task: &OpenTask::Ignore, + task_deps: None, }; enter_context(&icx, |_| f(tcx)) } /// Allows access to the current ImplicitCtxt in a closure if one is available + #[inline] pub fn with_context_opt(f: F) -> R where F: for<'a, 'gcx, 'tcx> FnOnce(Option<&ImplicitCtxt<'a, 'gcx, 'tcx>>) -> R { @@ -2081,6 +2056,7 @@ pub mod tls { /// Allows access to the current ImplicitCtxt. /// Panics if there is no ImplicitCtxt available + #[inline] pub fn with_context(f: F) -> R where F: for<'a, 'gcx, 'tcx> FnOnce(&ImplicitCtxt<'a, 'gcx, 'tcx>) -> R { @@ -2092,13 +2068,13 @@ pub mod tls { /// with the same 'gcx lifetime as the TyCtxt passed in. /// This will panic if you pass it a TyCtxt which has a different global interner from /// the current ImplicitCtxt's tcx field. + #[inline] pub fn with_related_context<'a, 'gcx, 'tcx1, F, R>(tcx: TyCtxt<'a, 'gcx, 'tcx1>, f: F) -> R where F: for<'b, 'tcx2> FnOnce(&ImplicitCtxt<'b, 'gcx, 'tcx2>) -> R { with_context(|context| { unsafe { - let gcx = tcx.gcx as *const _ as usize; - assert!(context.tcx.gcx as *const _ as usize == gcx); + assert!(ptr_eq(context.tcx.gcx, tcx.gcx)); let context: &ImplicitCtxt<'_, '_, '_> = mem::transmute(context); f(context) } @@ -2110,15 +2086,14 @@ pub mod tls { /// is given an ImplicitCtxt with the same 'tcx and 'gcx lifetimes as the TyCtxt passed in. /// This will panic if you pass it a TyCtxt which has a different global interner or /// a different local interner from the current ImplicitCtxt's tcx field. + #[inline] pub fn with_fully_related_context<'a, 'gcx, 'tcx, F, R>(tcx: TyCtxt<'a, 'gcx, 'tcx>, f: F) -> R where F: for<'b> FnOnce(&ImplicitCtxt<'b, 'gcx, 'tcx>) -> R { with_context(|context| { unsafe { - let gcx = tcx.gcx as *const _ as usize; - let interners = tcx.interners as *const _ as usize; - assert!(context.tcx.gcx as *const _ as usize == gcx); - assert!(context.tcx.interners as *const _ as usize == interners); + assert!(ptr_eq(context.tcx.gcx, tcx.gcx)); + assert!(ptr_eq(context.tcx.interners, tcx.interners)); let context: &ImplicitCtxt<'_, '_, '_> = mem::transmute(context); f(context) } @@ -2127,6 +2102,7 @@ pub mod tls { /// Allows access to the TyCtxt in the current ImplicitCtxt. /// Panics if there is no ImplicitCtxt available + #[inline] pub fn with(f: F) -> R where F: for<'a, 'gcx, 'tcx> FnOnce(TyCtxt<'a, 'gcx, 'tcx>) -> R { @@ -2135,6 +2111,7 @@ pub mod tls { /// Allows access to the TyCtxt in the current ImplicitCtxt. /// The closure is passed None if there is no ImplicitCtxt available + #[inline] pub fn with_opt(f: F) -> R where F: for<'a, 'gcx, 'tcx> FnOnce(Option>) -> R { @@ -2148,21 +2125,25 @@ macro_rules! sty_debug_print { // variable names. #[allow(non_snake_case)] mod inner { - use ty::{self, TyCtxt}; - use ty::context::Interned; + use crate::ty::{self, TyCtxt}; + use crate::ty::context::Interned; #[derive(Copy, Clone)] struct DebugStat { total: usize, - region_infer: usize, + lt_infer: usize, ty_infer: usize, - both_infer: usize, + ct_infer: usize, + all_infer: usize, } pub fn go(tcx: TyCtxt<'_, '_, '_>) { let mut total = DebugStat { total: 0, - region_infer: 0, ty_infer: 0, both_infer: 0, + lt_infer: 0, + ty_infer: 0, + ct_infer: 0, + all_infer: 0, }; $(let mut $variant = total;)* @@ -2173,31 +2154,35 @@ macro_rules! sty_debug_print { ty::Error => /* unimportant */ continue, $(ty::$variant(..) => &mut $variant,)* }; - let region = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER); + let lt = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER); let ty = t.flags.intersects(ty::TypeFlags::HAS_TY_INFER); + let ct = t.flags.intersects(ty::TypeFlags::HAS_CT_INFER); variant.total += 1; total.total += 1; - if region { total.region_infer += 1; variant.region_infer += 1 } + if lt { total.lt_infer += 1; variant.lt_infer += 1 } if ty { total.ty_infer += 1; variant.ty_infer += 1 } - if region && ty { total.both_infer += 1; variant.both_infer += 1 } + if ct { total.ct_infer += 1; variant.ct_infer += 1 } + if lt && ty && ct { total.all_infer += 1; variant.all_infer += 1 } } - println!("Ty interner total ty region both"); + println!("Ty interner total ty lt ct all"); $(println!(" {:18}: {uses:6} {usespc:4.1}%, \ - {ty:4.1}% {region:5.1}% {both:4.1}%", - stringify!($variant), - uses = $variant.total, - usespc = $variant.total as f64 * 100.0 / total.total as f64, - ty = $variant.ty_infer as f64 * 100.0 / total.total as f64, - region = $variant.region_infer as f64 * 100.0 / total.total as f64, - both = $variant.both_infer as f64 * 100.0 / total.total as f64); - )* + {ty:4.1}% {lt:5.1}% {ct:4.1}% {all:4.1}%", + stringify!($variant), + uses = $variant.total, + usespc = $variant.total as f64 * 100.0 / total.total as f64, + ty = $variant.ty_infer as f64 * 100.0 / total.total as f64, + lt = $variant.lt_infer as f64 * 100.0 / total.total as f64, + ct = $variant.ct_infer as f64 * 100.0 / total.total as f64, + all = $variant.all_infer as f64 * 100.0 / total.total as f64); + )* println!(" total {uses:6} \ - {ty:4.1}% {region:5.1}% {both:4.1}%", - uses = total.total, - ty = total.ty_infer as f64 * 100.0 / total.total as f64, - region = total.region_infer as f64 * 100.0 / total.total as f64, - both = total.both_infer as f64 * 100.0 / total.total as f64) + {ty:4.1}% {lt:5.1}% {ct:4.1}% {all:4.1}%", + uses = total.total, + ty = total.ty_infer as f64 * 100.0 / total.total as f64, + lt = total.lt_infer as f64 * 100.0 / total.total as f64, + ct = total.ct_infer as f64 * 100.0 / total.total as f64, + all = total.all_infer as f64 * 100.0 / total.total as f64) } } @@ -2213,7 +2198,7 @@ impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> { Generator, GeneratorWitness, Dynamic, Closure, Tuple, Bound, Param, Infer, UnnormalizedProjection, Projection, Opaque, Foreign); - println!("Substs interner: #{}", self.interners.substs.borrow().len()); + println!("InternalSubsts interner: #{}", self.interners.substs.borrow().len()); println!("Region interner: #{}", self.interners.region.borrow().len()); println!("Stability interner: #{}", self.stability_interner.borrow().len()); println!("Allocation interner: #{}", self.allocation_interner.borrow().len()); @@ -2280,15 +2265,15 @@ impl<'tcx: 'lcx, 'lcx> Borrow<[CanonicalVarInfo]> for Interned<'tcx, List Borrow<[Kind<'lcx>]> for Interned<'tcx, Substs<'tcx>> { +impl<'tcx: 'lcx, 'lcx> Borrow<[Kind<'lcx>]> for Interned<'tcx, InternalSubsts<'tcx>> { fn borrow<'a>(&'a self) -> &'a [Kind<'lcx>] { &self.0[..] } } -impl<'tcx: 'lcx, 'lcx> Borrow<[ProjectionKind<'lcx>]> - for Interned<'tcx, List>> { - fn borrow<'a>(&'a self) -> &'a [ProjectionKind<'lcx>] { +impl<'tcx> Borrow<[ProjectionKind]> + for Interned<'tcx, List> { + fn borrow<'a>(&'a self) -> &'a [ProjectionKind] { &self.0[..] } } @@ -2411,27 +2396,27 @@ pub fn keep_local<'tcx, T: ty::TypeFoldable<'tcx>>(x: &T) -> bool { direct_interners!('tcx, region: mk_region(|r: &RegionKind| r.keep_in_local_tcx()) -> RegionKind, - const_: mk_const(|c: &Const<'_>| keep_local(&c.ty) || keep_local(&c.val)) -> Const<'tcx>, - goal: mk_goal(|c: &GoalKind<'_>| keep_local(c)) -> GoalKind<'tcx> + goal: mk_goal(|c: &GoalKind<'_>| keep_local(c)) -> GoalKind<'tcx>, + const_: mk_const(|c: &Const<'_>| keep_local(&c)) -> Const<'tcx> ); macro_rules! slice_interners { - ($($field:ident: $method:ident($ty:ident)),+) => ( + ($($field:ident: $method:ident($ty:ty)),+) => ( $(intern_method!( 'tcx, $field: $method( - &[$ty<'tcx>], + &[$ty], |a, v| List::from_arena(a, v), Deref::deref, - |xs: &[$ty<'_>]| xs.iter().any(keep_local)) -> List<$ty<'tcx>>);)+ - ) + |xs: &[$ty]| xs.iter().any(keep_local)) -> List<$ty>);)+ + ); } slice_interners!( - existential_predicates: _intern_existential_predicates(ExistentialPredicate), - predicates: _intern_predicates(Predicate), - type_list: _intern_type_list(Ty), - substs: _intern_substs(Kind), - clauses: _intern_clauses(Clause), - goal_list: _intern_goals(Goal), + existential_predicates: _intern_existential_predicates(ExistentialPredicate<'tcx>), + predicates: _intern_predicates(Predicate<'tcx>), + type_list: _intern_type_list(Ty<'tcx>), + substs: _intern_substs(Kind<'tcx>), + clauses: _intern_clauses(Clause<'tcx>), + goal_list: _intern_goals(Goal<'tcx>), projs: _intern_projs(ProjectionKind) ); @@ -2466,7 +2451,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// type with the same signature. Detuples and so forth -- so /// e.g., if we have a sig with `Fn<(u32, i32)>` then you would get /// a `fn(u32, i32)`. - pub fn coerce_closure_fn_ty(self, sig: PolyFnSig<'tcx>) -> Ty<'tcx> { + /// `unsafety` determines the unsafety of the `fn` type. If you pass + /// `hir::Unsafety::Unsafe` in the previous example, then you would get + /// an `unsafe fn (u32, i32)`. + /// It cannot convert a closure that requires unsafe. + pub fn coerce_closure_fn_ty(self, sig: PolyFnSig<'tcx>, unsafety: hir::Unsafety) -> Ty<'tcx> { let converted_sig = sig.map_bound(|s| { let params_iter = match s.inputs()[0].sty { ty::Tuple(params) => { @@ -2477,8 +2466,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.mk_fn_sig( params_iter, s.output(), - s.variadic, - hir::Unsafety::Normal, + s.c_variadic, + unsafety, abi::Abi::Rust, ) }); @@ -2531,7 +2520,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } #[inline] - pub fn mk_adt(self, def: &'tcx AdtDef, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { + pub fn mk_adt(self, def: &'tcx AdtDef, substs: SubstsRef<'tcx>) -> Ty<'tcx> { // take a copy of substs so that we own the vectors inside self.mk_ty(Adt(def, substs)) } @@ -2544,9 +2533,12 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn mk_box(self, ty: Ty<'tcx>) -> Ty<'tcx> { let def_id = self.require_lang_item(lang_items::OwnedBoxLangItem); let adt_def = self.adt_def(def_id); - let substs = Substs::for_item(self, def_id, |param, substs| { + let substs = InternalSubsts::for_item(self, def_id, |param, substs| { match param.kind { - GenericParamDefKind::Lifetime => bug!(), + GenericParamDefKind::Lifetime | + GenericParamDefKind::Const => { + bug!() + } GenericParamDefKind::Type { has_default, .. } => { if param.index == 0 { ty.into() @@ -2597,7 +2589,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { #[inline] pub fn mk_array(self, ty: Ty<'tcx>, n: u64) -> Ty<'tcx> { - self.mk_ty(Array(ty, ty::Const::from_usize(self, n))) + self.mk_ty(Array(ty, self.mk_const( + ty::Const::from_usize(self.global_tcx(), n) + ))) } #[inline] @@ -2635,7 +2629,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { #[inline] pub fn mk_fn_def(self, def_id: DefId, - substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { + substs: SubstsRef<'tcx>) -> Ty<'tcx> { self.mk_ty(FnDef(def_id, substs)) } @@ -2656,7 +2650,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { #[inline] pub fn mk_projection(self, item_def_id: DefId, - substs: &'tcx Substs<'tcx>) + substs: SubstsRef<'tcx>) -> Ty<'tcx> { self.mk_ty(Projection(ProjectionTy { item_def_id, @@ -2685,10 +2679,18 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } #[inline] - pub fn mk_var(self, v: TyVid) -> Ty<'tcx> { + pub fn mk_ty_var(self, v: TyVid) -> Ty<'tcx> { self.mk_infer(TyVar(v)) } + #[inline] + pub fn mk_const_var(self, v: ConstVid<'tcx>, ty: Ty<'tcx>) -> &'tcx Const<'tcx> { + self.mk_const(ty::Const { + val: ConstValue::Infer(InferConst::Var(v)), + ty, + }) + } + #[inline] pub fn mk_int_var(self, v: IntVid) -> Ty<'tcx> { self.mk_infer(IntVar(v)) @@ -2711,6 +2713,19 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.mk_ty(Param(ParamTy { idx: index, name: name })) } + #[inline] + pub fn mk_const_param( + self, + index: u32, + name: InternedString, + ty: Ty<'tcx> + ) -> &'tcx Const<'tcx> { + self.mk_const(ty::Const { + val: ConstValue::Param(ParamConst { index, name }), + ty, + }) + } + #[inline] pub fn mk_self_type(self) -> Ty<'tcx> { self.mk_ty_param(0, keywords::SelfUpper.name().as_interned_str()) @@ -2721,12 +2736,15 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { GenericParamDefKind::Lifetime => { self.mk_region(ty::ReEarlyBound(param.to_early_bound_region_data())).into() } - GenericParamDefKind::Type {..} => self.mk_ty_param(param.index, param.name).into(), + GenericParamDefKind::Type { .. } => self.mk_ty_param(param.index, param.name).into(), + GenericParamDefKind::Const => { + self.mk_const_param(param.index, param.name, self.type_of(param.def_id)).into() + } } } #[inline] - pub fn mk_opaque(self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> { + pub fn mk_opaque(self, def_id: DefId, substs: SubstsRef<'tcx>) -> Ty<'tcx> { self.mk_ty(Opaque(def_id, substs)) } @@ -2766,7 +2784,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - pub fn intern_projs(self, ps: &[ProjectionKind<'tcx>]) -> &'tcx List> { + pub fn intern_projs(self, ps: &[ProjectionKind]) -> &'tcx List { if ps.len() == 0 { List::empty() } else { @@ -2801,7 +2819,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn mk_fn_sig(self, inputs: I, output: I::Item, - variadic: bool, + c_variadic: bool, unsafety: hir::Unsafety, abi: abi::Abi) -> , ty::FnSig<'tcx>>>::Output @@ -2810,7 +2828,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { { inputs.chain(iter::once(output)).intern_with(|xs| ty::FnSig { inputs_and_output: self.intern_type_list(xs), - variadic, unsafety, abi + c_variadic, unsafety, abi }) } @@ -2839,7 +2857,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn mk_substs_trait(self, self_ty: Ty<'tcx>, rest: &[Kind<'tcx>]) - -> &'tcx Substs<'tcx> + -> SubstsRef<'tcx> { self.mk_substs(iter::once(self_ty.into()).chain(rest.iter().cloned())) } @@ -2860,14 +2878,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.struct_span_lint_hir(lint, hir_id, span.into(), msg).emit() } - pub fn lint_node>(self, - lint: &'static Lint, - id: NodeId, - span: S, - msg: &str) { - self.struct_span_lint_node(lint, id, span.into(), msg).emit() - } - pub fn lint_hir_note>(self, lint: &'static Lint, hir_id: HirId, @@ -2881,40 +2891,53 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn lint_node_note>(self, lint: &'static Lint, - id: NodeId, + id: hir::HirId, span: S, msg: &str, note: &str) { - let mut err = self.struct_span_lint_node(lint, id, span.into(), msg); + let mut err = self.struct_span_lint_hir(lint, id, span.into(), msg); err.note(note); err.emit() } - pub fn lint_level_at_node(self, lint: &'static Lint, mut id: NodeId) - -> (lint::Level, lint::LintSource) - { - // Right now we insert a `with_ignore` node in the dep graph here to - // ignore the fact that `lint_levels` below depends on the entire crate. - // For now this'll prevent false positives of recompiling too much when - // anything changes. - // - // Once red/green incremental compilation lands we should be able to - // remove this because while the crate changes often the lint level map - // will change rarely. - self.dep_graph.with_ignore(|| { - let sets = self.lint_levels(LOCAL_CRATE); - loop { - let hir_id = self.hir().definitions().node_to_hir_id(id); - if let Some(pair) = sets.level_and_source(lint, hir_id, self.sess) { - return pair - } - let next = self.hir().get_parent_node(id); - if next == id { - bug!("lint traversal reached the root of the crate"); - } - id = next; + /// Walks upwards from `id` to find a node which might change lint levels with attributes. + /// It stops at `bound` and just returns it if reached. + pub fn maybe_lint_level_root_bounded( + self, + mut id: hir::HirId, + bound: hir::HirId, + ) -> hir::HirId { + loop { + if id == bound { + return bound; } - }) + if lint::maybe_lint_level_root(self, id) { + return id; + } + let next = self.hir().get_parent_node_by_hir_id(id); + if next == id { + bug!("lint traversal reached the root of the crate"); + } + id = next; + } + } + + pub fn lint_level_at_node( + self, + lint: &'static Lint, + mut id: hir::HirId + ) -> (lint::Level, lint::LintSource) { + let sets = self.lint_levels(LOCAL_CRATE); + loop { + if let Some(pair) = sets.level_and_source(lint, id, self.sess) { + return pair + } + let next = self.hir().get_parent_node_by_hir_id(id); + if next == id { + bug!("lint traversal reached the root of the crate"); + } + id = next; + } } pub fn struct_span_lint_hir>(self, @@ -2924,23 +2947,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { msg: &str) -> DiagnosticBuilder<'tcx> { - let node_id = self.hir().hir_to_node_id(hir_id); - let (level, src) = self.lint_level_at_node(lint, node_id); - lint::struct_lint_level(self.sess, lint, level, src, Some(span.into()), msg) - } - - pub fn struct_span_lint_node>(self, - lint: &'static Lint, - id: NodeId, - span: S, - msg: &str) - -> DiagnosticBuilder<'tcx> - { - let (level, src) = self.lint_level_at_node(lint, id); + let (level, src) = self.lint_level_at_node(lint, hir_id); lint::struct_lint_level(self.sess, lint, level, src, Some(span.into()), msg) } - pub fn struct_lint_node(self, lint: &'static Lint, id: NodeId, msg: &str) + pub fn struct_lint_node(self, lint: &'static Lint, id: HirId, msg: &str) -> DiagnosticBuilder<'tcx> { let (level, src) = self.lint_level_at_node(lint, id); @@ -3015,10 +3026,13 @@ impl InternIteratorElement for Result { } } +// We are comparing types with different invariant lifetimes, so `ptr::eq` +// won't work for us. +fn ptr_eq(t: *const T, u: *const U) -> bool { + t as *const () == u as *const () +} + pub fn provide(providers: &mut ty::query::Providers<'_>) { - // FIXME(#44234): almost all of these queries have no sub-queries and - // therefore no actual inputs, they're just reading tables calculated in - // resolve! Does this work? Unsure! That's what the issue is about. providers.in_scope_traits_map = |tcx, id| tcx.gcx.trait_map.get(&id).cloned(); providers.module_exports = |tcx, id| tcx.gcx.export_map.get(&id).cloned(); providers.crate_name = |tcx, id| { @@ -3041,6 +3055,10 @@ pub fn provide(providers: &mut ty::query::Providers<'_>) { assert_eq!(cnum, LOCAL_CRATE); Lrc::new(tcx.maybe_unused_extern_crates.clone()) }; + providers.names_imported_by_glob_use = |tcx, id| { + assert_eq!(id.krate, LOCAL_CRATE); + Lrc::new(tcx.glob_map.get(&id).cloned().unwrap_or_default()) + }; providers.stability_index = |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); diff --git a/src/librustc/ty/erase_regions.rs b/src/librustc/ty/erase_regions.rs index a361ad057c74a..0431afcc76c9e 100644 --- a/src/librustc/ty/erase_regions.rs +++ b/src/librustc/ty/erase_regions.rs @@ -1,15 +1,5 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use ty::{self, Ty, TyCtxt}; -use ty::fold::{TypeFolder, TypeFoldable}; +use crate::ty::{self, Ty, TyCtxt, TypeFlags}; +use crate::ty::fold::{TypeFolder, TypeFoldable}; pub(super) fn provide(providers: &mut ty::query::Providers<'_>) { *providers = ty::query::Providers { @@ -31,6 +21,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn erase_regions(self, value: &T) -> T where T : TypeFoldable<'tcx> { + // If there's nothing to erase avoid performing the query at all + if !value.has_type_flags(TypeFlags::HAS_RE_LATE_BOUND | TypeFlags::HAS_FREE_REGIONS) { + return value.clone(); + } + let value1 = value.fold_with(&mut RegionEraserVisitor { tcx: self }); debug!("erase_regions({:?}) = {:?}", value, value1); value1 diff --git a/src/librustc/ty/error.rs b/src/librustc/ty/error.rs index 90022a770c114..74d0a29bcff00 100644 --- a/src/librustc/ty/error.rs +++ b/src/librustc/ty/error.rs @@ -1,15 +1,5 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir::def_id::DefId; -use ty::{self, BoundRegion, Region, Ty, TyCtxt}; +use crate::hir::def_id::DefId; +use crate::ty::{self, BoundRegion, Region, Ty, TyCtxt}; use std::borrow::Cow; use std::fmt; use rustc_target::spec::abi; @@ -17,9 +7,9 @@ use syntax::ast; use errors::{Applicability, DiagnosticBuilder}; use syntax_pos::Span; -use hir; +use crate::hir; -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct ExpectedFound { pub expected: T, pub found: T, @@ -39,6 +29,7 @@ pub enum TypeError<'tcx> { RegionsDoesNotOutlive(Region<'tcx>, Region<'tcx>), RegionsInsufficientlyPolymorphic(BoundRegion, Region<'tcx>), RegionsOverlyPolymorphic(BoundRegion, Region<'tcx>), + RegionsPlaceholderMismatch, Sorts(ExpectedFound>), IntMismatch(ExpectedFound), @@ -80,6 +71,13 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { } } + let br_string = |br: ty::BoundRegion| { + match br { + ty::BrNamed(_, name) => format!(" {}", name), + _ => String::new(), + } + }; + match *self { CyclicTy(_) => write!(f, "cyclic type of infinite size"), Mismatch => write!(f, "types differ"), @@ -114,15 +112,16 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { } RegionsInsufficientlyPolymorphic(br, _) => { write!(f, - "expected bound lifetime parameter{}{}, found concrete lifetime", - if br.is_named() { " " } else { "" }, - br) + "expected bound lifetime parameter{}, found concrete lifetime", + br_string(br)) } RegionsOverlyPolymorphic(br, _) => { write!(f, - "expected concrete lifetime, found bound lifetime parameter{}{}", - if br.is_named() { " " } else { "" }, - br) + "expected concrete lifetime, found bound lifetime parameter{}", + br_string(br)) + } + RegionsPlaceholderMismatch => { + write!(f, "one type is more general than the other") } Sorts(values) => ty::tls::with(|tcx| { report_maybe_different(f, &values.expected.sort_string(tcx), @@ -131,9 +130,9 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { Traits(values) => ty::tls::with(|tcx| { report_maybe_different(f, &format!("trait `{}`", - tcx.item_path_str(values.expected)), + tcx.def_path_str(values.expected)), &format!("trait `{}`", - tcx.item_path_str(values.found))) + tcx.def_path_str(values.found))) }), IntMismatch(ref values) => { write!(f, "expected `{:?}`, found `{:?}`", @@ -152,8 +151,8 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { } ProjectionMismatched(ref values) => ty::tls::with(|tcx| { write!(f, "expected {}, found {}", - tcx.item_path_str(values.expected), - tcx.item_path_str(values.found)) + tcx.def_path_str(values.expected), + tcx.def_path_str(values.found)) }), ProjectionBoundsLength(ref values) => { write!(f, "expected {} associated type bindings, found {}", @@ -175,13 +174,11 @@ impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> { ty::Uint(_) | ty::Float(_) | ty::Str | ty::Never => self.to_string().into(), ty::Tuple(ref tys) if tys.is_empty() => self.to_string().into(), - ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.item_path_str(def.did)).into(), - ty::Foreign(def_id) => format!("extern type `{}`", tcx.item_path_str(def_id)).into(), - ty::Array(_, n) => { - match n.assert_usize(tcx) { - Some(n) => format!("array of {} elements", n).into(), - None => "array".into(), - } + ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.def_path_str(def.did)).into(), + ty::Foreign(def_id) => format!("extern type `{}`", tcx.def_path_str(def_id)).into(), + ty::Array(_, n) => match n.assert_usize(tcx) { + Some(n) => format!("array of {} elements", n).into(), + None => "array".into(), } ty::Slice(_) => "slice".into(), ty::RawPtr(_) => "*-ptr".into(), @@ -190,7 +187,7 @@ impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> { let tymut_string = tymut.to_string(); if tymut_string == "_" || //unknown type name, tymut_string.len() > 10 || //name longer than saying "reference", - region.to_string() != "" //... or a complex type + region.to_string() != "'_" //... or a complex type { format!("{}reference", match mutbl { hir::Mutability::MutMutable => "mutable ", @@ -203,15 +200,19 @@ impl<'a, 'gcx, 'lcx, 'tcx> ty::TyS<'tcx> { ty::FnDef(..) => "fn item".into(), ty::FnPtr(_) => "fn pointer".into(), ty::Dynamic(ref inner, ..) => { - format!("trait {}", tcx.item_path_str(inner.principal().def_id())).into() + if let Some(principal) = inner.principal() { + format!("trait {}", tcx.def_path_str(principal.def_id())).into() + } else { + "trait".into() + } } ty::Closure(..) => "closure".into(), ty::Generator(..) => "generator".into(), ty::GeneratorWitness(..) => "generator witness".into(), ty::Tuple(..) => "tuple".into(), ty::Infer(ty::TyVar(_)) => "inferred type".into(), - ty::Infer(ty::IntVar(_)) => "integral variable".into(), - ty::Infer(ty::FloatVar(_)) => "floating-point variable".into(), + ty::Infer(ty::IntVar(_)) => "integer".into(), + ty::Infer(ty::FloatVar(_)) => "floating-point number".into(), ty::Placeholder(..) => "placeholder type".into(), ty::Bound(..) => "bound type".into(), ty::Infer(ty::FreshTy(_)) => "fresh type".into(), @@ -252,7 +253,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { { if let Ok(snippet) = self.sess.source_map().span_to_snippet(sp) { if snippet.chars().all(|c| c.is_digit(10) || c == '-' || c == '_') { - db.span_suggestion_with_applicability( + db.span_suggestion( sp, "use a float literal", format!("{}.0", snippet), diff --git a/src/librustc/ty/fast_reject.rs b/src/librustc/ty/fast_reject.rs index bd01dd8cb0ce6..59ab4561f2c87 100644 --- a/src/librustc/ty/fast_reject.rs +++ b/src/librustc/ty/fast_reject.rs @@ -1,22 +1,12 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir::def_id::DefId; -use ich::StableHashingContext; +use crate::hir::def_id::DefId; +use crate::ich::StableHashingContext; use rustc_data_structures::stable_hasher::{StableHasher, StableHasherResult, HashStable}; use std::fmt::Debug; use std::hash::Hash; use std::mem; use syntax::ast; -use ty::{self, Ty, TyCtxt}; +use crate::ty::{self, Ty, TyCtxt}; use self::SimplifiedTypeGen::*; @@ -81,11 +71,11 @@ pub fn simplify_type<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, ty::Array(..) | ty::Slice(_) => Some(ArraySimplifiedType), ty::RawPtr(_) => Some(PtrSimplifiedType), ty::Dynamic(ref trait_info, ..) => { - let principal_def_id = trait_info.principal().def_id(); - if tcx.trait_is_auto(principal_def_id) { - Some(MarkerTraitObjectSimplifiedType) - } else { - Some(TraitSimplifiedType(principal_def_id)) + match trait_info.principal_def_id() { + Some(principal_def_id) if !tcx.trait_is_auto(principal_def_id) => { + Some(TraitSimplifiedType(principal_def_id)) + } + _ => Some(MarkerTraitObjectSimplifiedType) } } ty::Ref(_, ty, _) => { @@ -150,9 +140,9 @@ impl SimplifiedTypeGen { ArraySimplifiedType => ArraySimplifiedType, PtrSimplifiedType => PtrSimplifiedType, NeverSimplifiedType => NeverSimplifiedType, + MarkerTraitObjectSimplifiedType => MarkerTraitObjectSimplifiedType, TupleSimplifiedType(n) => TupleSimplifiedType(n), TraitSimplifiedType(d) => TraitSimplifiedType(map(d)), - MarkerTraitObjectSimplifiedType => MarkerTraitObjectSimplifiedType, ClosureSimplifiedType(d) => ClosureSimplifiedType(map(d)), GeneratorSimplifiedType(d) => GeneratorSimplifiedType(map(d)), GeneratorWitnessSimplifiedType(n) => GeneratorWitnessSimplifiedType(n), diff --git a/src/librustc/ty/flags.rs b/src/librustc/ty/flags.rs index 1ea7e27c0dcdb..7aed2a4288c8e 100644 --- a/src/librustc/ty/flags.rs +++ b/src/librustc/ty/flags.rs @@ -1,16 +1,6 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use mir::interpret::ConstValue; -use ty::subst::Substs; -use ty::{self, Ty, TypeFlags, TypeFoldable}; +use crate::ty::subst::{SubstsRef, UnpackedKind}; +use crate::ty::{self, Ty, TypeFlags, TypeFoldable, InferConst}; +use crate::mir::interpret::ConstValue; #[derive(Debug)] pub struct FlagComputation { @@ -34,6 +24,12 @@ impl FlagComputation { result } + pub fn for_const(c: &ty::Const<'_>) -> TypeFlags { + let mut result = FlagComputation::new(); + result.add_const(c); + result.flags + } + fn add_flags(&mut self, flags: TypeFlags) { self.flags = self.flags | (flags & TypeFlags::NOMINAL_FLAGS); } @@ -240,11 +236,25 @@ impl FlagComputation { } } - fn add_const(&mut self, constant: &ty::Const<'_>) { - self.add_ty(constant.ty); - if let ConstValue::Unevaluated(_, substs) = constant.val { - self.add_flags(TypeFlags::HAS_PROJECTION); - self.add_substs(substs); + fn add_const(&mut self, c: &ty::Const<'_>) { + self.add_ty(c.ty); + match c.val { + ConstValue::Unevaluated(_, substs) => { + self.add_substs(substs); + self.add_flags(TypeFlags::HAS_NORMALIZABLE_PROJECTION | TypeFlags::HAS_PROJECTION); + }, + ConstValue::Infer(infer) => { + self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES | TypeFlags::HAS_CT_INFER); + match infer { + InferConst::Fresh(_) => {} + InferConst::Canonical(debruijn, _) => self.add_binder(debruijn), + InferConst::Var(_) => self.add_flags(TypeFlags::KEEP_IN_LOCAL_TCX), + } + } + ConstValue::Param(_) => { + self.add_flags(TypeFlags::HAS_FREE_LOCAL_NAMES | TypeFlags::HAS_PARAMS); + } + _ => {}, } } @@ -257,13 +267,13 @@ impl FlagComputation { self.add_substs(projection_ty.substs); } - fn add_substs(&mut self, substs: &Substs<'_>) { - for ty in substs.types() { - self.add_ty(ty); - } - - for r in substs.regions() { - self.add_region(r); + fn add_substs(&mut self, substs: SubstsRef<'_>) { + for kind in substs { + match kind.unpack() { + UnpackedKind::Type(ty) => self.add_ty(ty), + UnpackedKind::Lifetime(lt) => self.add_region(lt), + UnpackedKind::Const(ct) => self.add_const(ct), + } } } } diff --git a/src/librustc/ty/fold.rs b/src/librustc/ty/fold.rs index a40e1df14f8e8..321e55270c689 100644 --- a/src/librustc/ty/fold.rs +++ b/src/librustc/ty/fold.rs @@ -1,20 +1,10 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Generalized type folding mechanism. The setup is a bit convoluted //! but allows for convenient usage. Let T be an instance of some //! "foldable type" (one which implements `TypeFoldable`) and F be an //! instance of a "folder" (a type which implements `TypeFolder`). Then //! the setup is intended to be: //! -//! T.fold_with(F) --calls--> F.fold_T(T) --calls--> T.super_fold_with(F) +//! T.fold_with(F) --calls--> F.fold_T(T) --calls--> T.super_fold_with(F) //! //! This way, when you define a new folder F, you can override //! `fold_T()` to customize the behavior, and invoke `T.super_fold_with()` @@ -35,17 +25,18 @@ //! proper thing. //! //! A `TypeFoldable` T can also be visited by a `TypeVisitor` V using similar setup: -//! T.visit_with(V) --calls--> V.visit_T(T) --calls--> T.super_visit_with(V). -//! These methods return true to indicate that the visitor has found what it is looking for -//! and does not need to visit anything else. +//! +//! T.visit_with(V) --calls--> V.visit_T(T) --calls--> T.super_visit_with(V). +//! +//! These methods return true to indicate that the visitor has found what it is +//! looking for, and does not need to visit anything else. -use mir::interpret::ConstValue; -use hir::def_id::DefId; -use ty::{self, Binder, Ty, TyCtxt, TypeFlags}; +use crate::hir::def_id::DefId; +use crate::ty::{self, Binder, Ty, TyCtxt, TypeFlags, flags::FlagComputation}; use std::collections::BTreeMap; use std::fmt; -use util::nodemap::FxHashSet; +use crate::util::nodemap::FxHashSet; /// The TypeFoldable trait is implemented for every type that can be folded. /// Basically, every type that has a corresponding method in TypeFolder. @@ -63,7 +54,7 @@ pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { self.super_visit_with(visitor) } - /// True if `self` has any late-bound regions that are either + /// Returns `true` if `self` has any late-bound regions that are either /// bound by `binder` or bound by some binder outside of `binder`. /// If `binder` is `ty::INNERMOST`, this indicates whether /// there are any late-bound regions that appear free. @@ -71,7 +62,7 @@ pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { self.visit_with(&mut HasEscapingVarsVisitor { outer_index: binder }) } - /// True if this `self` has any regions that escape `binder` (and + /// Returns `true` if this `self` has any regions that escape `binder` (and /// hence are not bound by it). fn has_vars_bound_above(&self, binder: ty::DebruijnIndex) -> bool { self.has_vars_bound_at_or_above(binder.shifted_in(1)) @@ -100,7 +91,9 @@ pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { self.has_type_flags(TypeFlags::HAS_TY_INFER) } fn needs_infer(&self) -> bool { - self.has_type_flags(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_RE_INFER) + self.has_type_flags( + TypeFlags::HAS_TY_INFER | TypeFlags::HAS_RE_INFER | TypeFlags::HAS_CT_INFER + ) } fn has_placeholders(&self) -> bool { self.has_type_flags(TypeFlags::HAS_RE_PLACEHOLDER | TypeFlags::HAS_TY_PLACEHOLDER) @@ -120,13 +113,13 @@ pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { self.has_type_flags(TypeFlags::HAS_FREE_REGIONS) } - /// True if there any any un-erased free regions. + /// True if there are any un-erased free regions. fn has_erasable_regions(&self) -> bool { self.has_type_flags(TypeFlags::HAS_FREE_REGIONS) } /// Indicates whether this value references only 'global' - /// types/lifetimes that are the same regardless of what fn we are + /// generic parameters that are the same regardless of what fn we are /// in. This is used for caching. fn is_global(&self) -> bool { !self.has_type_flags(TypeFlags::HAS_FREE_LOCAL_NAMES) @@ -152,7 +145,7 @@ pub trait TypeFoldable<'tcx>: fmt::Debug + Clone { } } -/// The TypeFolder trait defines the actual *folding*. There is a +/// The `TypeFolder` trait defines the actual *folding*. There is a /// method defined for every foldable type. Each of these has a /// default implementation that does an "identity" fold. Within each /// identity fold, it should invoke `foo.fold_with(self)` to fold each @@ -273,7 +266,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { }); } - /// True if `callback` returns true for every region appearing free in `value`. + /// Returns `true` if `callback` returns true for every region appearing free in `value`. pub fn all_free_regions_meet( self, value: &impl TypeFoldable<'tcx>, @@ -282,7 +275,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { !self.any_free_region_meets(value, |r| !callback(r)) } - /// True if `callback` returns true for some region appearing free in `value`. + /// Returns `true` if `callback` returns true for some region appearing free in `value`. pub fn any_free_region_meets( self, value: &impl TypeFoldable<'tcx>, @@ -303,8 +296,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// ^ ^ ^ ^ /// | | | | here, would be shifted in 1 /// | | | here, would be shifted in 2 - /// | | here, would be INNERMOST shifted in by 1 - /// | here, initially, binder would be INNERMOST + /// | | here, would be `INNERMOST` shifted in by 1 + /// | here, initially, binder would be `INNERMOST` /// ``` /// /// You see that, initially, *any* bound value is free, @@ -507,12 +500,12 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for BoundVarReplacer<'a, 'gcx, 'tcx> } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { - /// Replace all regions bound by the given `Binder` with the + /// Replaces all regions bound by the given `Binder` with the /// results returned by the closure; the closure is expected to /// return a free region (relative to this binder), and hence the /// binder is removed in the return type. The closure is invoked /// once for each unique `BoundRegion`; multiple references to the - /// same `BoundRegion` will reuse the previous result. A map is + /// same `BoundRegion` will reuse the previous result. A map is /// returned at the end with each bound region and the free region /// that replaced it. /// @@ -531,7 +524,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.replace_escaping_bound_vars(value.skip_binder(), fld_r, fld_t) } - /// Replace all escaping bound vars. The `fld_r` closure replaces escaping + /// Replaces all escaping bound vars. The `fld_r` closure replaces escaping /// bound regions while the `fld_t` closure replaces escaping bound types. pub fn replace_escaping_bound_vars( self, @@ -543,22 +536,29 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { G: FnMut(ty::BoundTy) -> ty::Ty<'tcx>, T: TypeFoldable<'tcx> { - let mut map = BTreeMap::new(); + use rustc_data_structures::fx::FxHashMap; + + let mut region_map = BTreeMap::new(); + let mut type_map = FxHashMap::default(); if !value.has_escaping_bound_vars() { - (value.clone(), map) + (value.clone(), region_map) } else { let mut real_fld_r = |br| { - *map.entry(br).or_insert_with(|| fld_r(br)) + *region_map.entry(br).or_insert_with(|| fld_r(br)) }; - let mut replacer = BoundVarReplacer::new(self, &mut real_fld_r, &mut fld_t); + let mut real_fld_t = |bound_ty| { + *type_map.entry(bound_ty).or_insert_with(|| fld_t(bound_ty)) + }; + + let mut replacer = BoundVarReplacer::new(self, &mut real_fld_r, &mut real_fld_t); let result = value.fold_with(&mut replacer); - (result, map) + (result, region_map) } } - /// Replace all types or regions bound by the given `Binder`. The `fld_r` + /// Replaces all types or regions bound by the given `Binder`. The `fld_r` /// closure replaces bound regions while the `fld_t` closure replaces bound /// types. pub fn replace_bound_vars( @@ -574,7 +574,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.replace_escaping_bound_vars(value.skip_binder(), fld_r, fld_t) } - /// Replace any late-bound regions bound in `value` with + /// Replaces any late-bound regions bound in `value` with /// free variants attached to `all_outlive_scope`. pub fn liberate_late_bound_regions( &self, @@ -590,31 +590,6 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { }).0 } - /// Flattens multiple binding levels into one. So `for<'a> for<'b> Foo` - /// becomes `for<'a,'b> Foo`. - pub fn flatten_late_bound_regions(self, bound2_value: &Binder>) - -> Binder - where T: TypeFoldable<'tcx> - { - let bound0_value = bound2_value.skip_binder().skip_binder(); - let value = self.fold_regions(bound0_value, &mut false, |region, current_depth| { - match *region { - ty::ReLateBound(debruijn, br) => { - // We assume no regions bound *outside* of the - // binders in `bound2_value` (nmatsakis added in - // the course of this PR; seems like a reasonable - // sanity check though). - assert!(debruijn == current_depth); - self.mk_region(ty::ReLateBound(current_depth, br)) - } - _ => { - region - } - } - }); - Binder::bind(value) - } - /// Returns a set of all late-bound regions that are constrained /// by `value`, meaning that if we instantiate those LBR with /// variables and equate `value` with something else, those @@ -644,7 +619,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { collector.regions } - /// Replace any late-bound regions bound in `value` with `'erased`. Useful in codegen but also + /// Replaces any late-bound regions bound in `value` with `'erased`. Useful in codegen but also /// method lookup and a few other places where precise region relationships are not required. pub fn erase_late_bound_regions(self, value: &Binder) -> T where T : TypeFoldable<'tcx> @@ -652,13 +627,13 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.replace_late_bound_regions(value, |_| self.types.re_erased).0 } - /// Rewrite any late-bound regions so that they are anonymous. Region numbers are + /// Rewrite any late-bound regions so that they are anonymous. Region numbers are /// assigned starting at 1 and increasing monotonically in the order traversed /// by the fold operation. /// /// The chief purpose of this function is to canonicalize regions so that two /// `FnSig`s or `TraitRef`s which are equivalent up to region naming will become - /// structurally identical. For example, `for<'a, 'b> fn(&'a isize, &'b isize)` and + /// structurally identical. For example, `for<'a, 'b> fn(&'a isize, &'b isize)` and /// `for<'a, 'b> fn(&'b isize, &'a isize)` will become identical after anonymization. pub fn anonymize_late_bound_regions(self, sig: &Binder) -> Binder where T : TypeFoldable<'tcx>, @@ -822,7 +797,7 @@ pub fn shift_out_vars<'a, 'gcx, 'tcx, T>( /// scope to which it is attached, etc. An escaping var represents /// a bound var for which this processing has not yet been done. struct HasEscapingVarsVisitor { - /// Anything bound by `outer_index` or "above" is escaping + /// Anything bound by `outer_index` or "above" is escaping. outer_index: ty::DebruijnIndex, } @@ -868,14 +843,9 @@ impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor { } fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool { - if let ConstValue::Unevaluated(..) = c.val { - let projection_flags = TypeFlags::HAS_NORMALIZABLE_PROJECTION | - TypeFlags::HAS_PROJECTION; - if projection_flags.intersects(self.flags) { - return true; - } - } - c.super_visit_with(self) + let flags = FlagComputation::for_const(c); + debug!("HasTypeFlagsVisitor: c={:?} c.flags={:?} self.flags={:?}", c, flags, self.flags); + flags.intersects(self.flags) || c.super_visit_with(self) } } @@ -885,10 +855,10 @@ struct LateBoundRegionsCollector { current_index: ty::DebruijnIndex, regions: FxHashSet, - /// If true, we only want regions that are known to be + /// `true` if we only want regions that are known to be /// "constrained" when you equate this type with another type. In /// particular, if you have e.g., `&'a u32` and `&'b u32`, equating - /// them constraints `'a == 'b`. But if you have `<&'a u32 as + /// them constraints `'a == 'b`. But if you have `<&'a u32 as /// Trait>::Foo` and `<&'b u32 as Trait>::Foo`, normalizing those /// types may mean that `'a` and `'b` don't appear in the results, /// so they are not considered *constrained*. diff --git a/src/librustc/ty/inhabitedness/def_id_forest.rs b/src/librustc/ty/inhabitedness/def_id_forest.rs index 163263babf899..3b393c3ca15bb 100644 --- a/src/librustc/ty/inhabitedness/def_id_forest.rs +++ b/src/librustc/ty/inhabitedness/def_id_forest.rs @@ -1,18 +1,8 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::mem; use smallvec::SmallVec; use syntax::ast::CRATE_NODE_ID; -use ty::context::TyCtxt; -use ty::{DefId, DefIdTree}; +use crate::ty::context::TyCtxt; +use crate::ty::{DefId, DefIdTree}; /// Represents a forest of DefIds closed under the ancestor relation. That is, /// if a DefId representing a module is contained in the forest then all @@ -32,14 +22,14 @@ pub struct DefIdForest { } impl<'a, 'gcx, 'tcx> DefIdForest { - /// Create an empty forest. + /// Creates an empty forest. pub fn empty() -> DefIdForest { DefIdForest { root_ids: SmallVec::new(), } } - /// Create a forest consisting of a single tree representing the entire + /// Creates a forest consisting of a single tree representing the entire /// crate. #[inline] pub fn full(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> DefIdForest { @@ -47,7 +37,7 @@ impl<'a, 'gcx, 'tcx> DefIdForest { DefIdForest::from_id(crate_id) } - /// Create a forest containing a DefId and all its descendants. + /// Creates a forest containing a DefId and all its descendants. pub fn from_id(id: DefId) -> DefIdForest { let mut root_ids = SmallVec::new(); root_ids.push(id); @@ -56,12 +46,12 @@ impl<'a, 'gcx, 'tcx> DefIdForest { } } - /// Test whether the forest is empty. + /// Tests whether the forest is empty. pub fn is_empty(&self) -> bool { self.root_ids.is_empty() } - /// Test whether the forest contains a given DefId. + /// Tests whether the forest contains a given DefId. pub fn contains(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, id: DefId) -> bool @@ -74,10 +64,21 @@ impl<'a, 'gcx, 'tcx> DefIdForest { iter: I) -> DefIdForest where I: IntoIterator { - let mut ret = DefIdForest::full(tcx); + let mut iter = iter.into_iter(); + let mut ret = if let Some(first) = iter.next() { + first + } else { + return DefIdForest::full(tcx); + }; + let mut next_ret = SmallVec::new(); let mut old_ret: SmallVec<[DefId; 1]> = SmallVec::new(); for next_forest in iter { + // No need to continue if the intersection is already empty. + if ret.is_empty() { + break; + } + for id in ret.root_ids.drain() { if next_forest.contains(tcx, id) { next_ret.push(id); diff --git a/src/librustc/ty/inhabitedness/mod.rs b/src/librustc/ty/inhabitedness/mod.rs index 721d5e14ccc63..963b4b439f84d 100644 --- a/src/librustc/ty/inhabitedness/mod.rs +++ b/src/librustc/ty/inhabitedness/mod.rs @@ -1,19 +1,8 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use util::nodemap::{FxHashMap, FxHashSet}; -use ty::context::TyCtxt; -use ty::{AdtDef, VariantDef, FieldDef, Ty, TyS}; -use ty::{DefId, Substs}; -use ty::{AdtKind, Visibility}; -use ty::TyKind::*; +use crate::ty::context::TyCtxt; +use crate::ty::{AdtDef, VariantDef, FieldDef, Ty, TyS}; +use crate::ty::{DefId, SubstsRef}; +use crate::ty::{AdtKind, Visibility}; +use crate::ty::TyKind::*; pub use self::def_id_forest::DefIdForest; @@ -113,34 +102,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } fn ty_inhabitedness_forest(self, ty: Ty<'tcx>) -> DefIdForest { - ty.uninhabited_from(&mut FxHashMap::default(), self) - } - - pub fn is_enum_variant_uninhabited_from(self, - module: DefId, - variant: &'tcx VariantDef, - substs: &'tcx Substs<'tcx>) - -> bool - { - self.variant_inhabitedness_forest(variant, substs).contains(self, module) - } - - pub fn is_variant_uninhabited_from_all_modules(self, - variant: &'tcx VariantDef, - substs: &'tcx Substs<'tcx>) - -> bool - { - !self.variant_inhabitedness_forest(variant, substs).is_empty() - } - - fn variant_inhabitedness_forest(self, variant: &'tcx VariantDef, substs: &'tcx Substs<'tcx>) - -> DefIdForest { - // Determine the ADT kind: - let adt_def_id = self.adt_def_id_of_variant(variant); - let adt_kind = self.adt_def(adt_def_id).adt_kind(); - - // Compute inhabitedness forest: - variant.uninhabited_from(&mut FxHashMap::default(), self, substs, adt_kind) + ty.uninhabited_from(self) } } @@ -148,23 +110,21 @@ impl<'a, 'gcx, 'tcx> AdtDef { /// Calculate the forest of DefIds from which this adt is visibly uninhabited. fn uninhabited_from( &self, - visited: &mut FxHashMap>>, tcx: TyCtxt<'a, 'gcx, 'tcx>, - substs: &'tcx Substs<'tcx>) -> DefIdForest + substs: SubstsRef<'tcx>) -> DefIdForest { DefIdForest::intersection(tcx, self.variants.iter().map(|v| { - v.uninhabited_from(visited, tcx, substs, self.adt_kind()) + v.uninhabited_from(tcx, substs, self.adt_kind()) })) } } impl<'a, 'gcx, 'tcx> VariantDef { /// Calculate the forest of DefIds from which this variant is visibly uninhabited. - fn uninhabited_from( + pub fn uninhabited_from( &self, - visited: &mut FxHashMap>>, tcx: TyCtxt<'a, 'gcx, 'tcx>, - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, adt_kind: AdtKind) -> DefIdForest { let is_enum = match adt_kind { @@ -175,7 +135,7 @@ impl<'a, 'gcx, 'tcx> VariantDef { AdtKind::Struct => false, }; DefIdForest::union(tcx, self.fields.iter().map(|f| { - f.uninhabited_from(visited, tcx, substs, is_enum) + f.uninhabited_from(tcx, substs, is_enum) })) } } @@ -184,13 +144,12 @@ impl<'a, 'gcx, 'tcx> FieldDef { /// Calculate the forest of DefIds from which this field is visibly uninhabited. fn uninhabited_from( &self, - visited: &mut FxHashMap>>, tcx: TyCtxt<'a, 'gcx, 'tcx>, - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, is_enum: bool, ) -> DefIdForest { - let mut data_uninhabitedness = move || { - self.ty(tcx, substs).uninhabited_from(visited, tcx) + let data_uninhabitedness = move || { + self.ty(tcx, substs).uninhabited_from(tcx) }; // FIXME(canndrew): Currently enum fields are (incorrectly) stored with // Visibility::Invisible so we need to override self.vis if we're @@ -213,57 +172,25 @@ impl<'a, 'gcx, 'tcx> FieldDef { impl<'a, 'gcx, 'tcx> TyS<'tcx> { /// Calculate the forest of DefIds from which this type is visibly uninhabited. - fn uninhabited_from( - &self, - visited: &mut FxHashMap>>, - tcx: TyCtxt<'a, 'gcx, 'tcx>) -> DefIdForest + fn uninhabited_from(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> DefIdForest { match self.sty { - Adt(def, substs) => { - { - let substs_set = visited.entry(def.did).or_default(); - if !substs_set.insert(substs) { - // We are already calculating the inhabitedness of this type. - // The type must contain a reference to itself. Break the - // infinite loop. - return DefIdForest::empty(); - } - if substs_set.len() >= tcx.sess.recursion_limit.get() / 4 { - // We have gone very deep, reinstantiating this ADT inside - // itself with different type arguments. We are probably - // hitting an infinite loop. For example, it's possible to write: - // a type Foo - // which contains a Foo<(T, T)> - // which contains a Foo<((T, T), (T, T))> - // which contains a Foo<(((T, T), (T, T)), ((T, T), (T, T)))> - // etc. - let error = format!("reached recursion limit while checking \ - inhabitedness of `{}`", self); - tcx.sess.fatal(&error); - } - } - let ret = def.uninhabited_from(visited, tcx, substs); - let substs_set = visited.get_mut(&def.did).unwrap(); - substs_set.remove(substs); - ret - } + Adt(def, substs) => def.uninhabited_from(tcx, substs), Never => DefIdForest::full(tcx), Tuple(ref tys) => { DefIdForest::union(tcx, tys.iter().map(|ty| { - ty.uninhabited_from(visited, tcx) + ty.uninhabited_from(tcx) })) } - Array(ty, len) => { - match len.assert_usize(tcx) { - // If the array is definitely non-empty, it's uninhabited if - // the type of its elements is uninhabited. - Some(n) if n != 0 => ty.uninhabited_from(visited, tcx), - _ => DefIdForest::empty() - } - } + Array(ty, len) => match len.assert_usize(tcx) { + // If the array is definitely non-empty, it's uninhabited if + // the type of its elements is uninhabited. + Some(n) if n != 0 => ty.uninhabited_from(tcx), + _ => DefIdForest::empty() + }, // References to uninitialised memory is valid for any type, including // uninhabited types, in unsafe code, so we treat all references as diff --git a/src/librustc/ty/instance.rs b/src/librustc/ty/instance.rs index a24920da158e2..f54e69f352a4e 100644 --- a/src/librustc/ty/instance.rs +++ b/src/librustc/ty/instance.rs @@ -1,30 +1,22 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir::Unsafety; -use hir::def_id::DefId; -use ty::{self, Ty, PolyFnSig, TypeFoldable, Substs, TyCtxt}; -use traits; +use crate::hir::Unsafety; +use crate::hir::def::Namespace; +use crate::hir::def_id::DefId; +use crate::ty::{self, Ty, PolyFnSig, TypeFoldable, SubstsRef, TyCtxt}; +use crate::ty::print::{FmtPrinter, Printer}; +use crate::traits; use rustc_target::spec::abi::Abi; -use util::ppaux; +use rustc_macros::HashStable; use std::fmt; use std::iter; -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable, HashStable)] pub struct Instance<'tcx> { pub def: InstanceDef<'tcx>, - pub substs: &'tcx Substs<'tcx>, + pub substs: SubstsRef<'tcx>, } -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable, HashStable)] pub enum InstanceDef<'tcx> { Item(DefId), Intrinsic(DefId), @@ -32,17 +24,17 @@ pub enum InstanceDef<'tcx> { /// `::method` where `method` receives unsizeable `self: Self`. VtableShim(DefId), - /// \::call_* - /// def-id is FnTrait::call_* + /// `::call_*` + /// `DefId` is `FnTrait::call_*` FnPtrShim(DefId, Ty<'tcx>), - /// ::fn + /// `::fn` Virtual(DefId, usize), - /// <[mut closure] as FnOnce>::call_once + /// `<[mut closure] as FnOnce>::call_once` ClosureOnceShim { call_once: DefId }, - /// drop_in_place::; None for empty drop glue. + /// `drop_in_place::; None` for empty drop glue. DropGlue(DefId, Option>), ///`::clone` shim. @@ -75,7 +67,7 @@ impl<'a, 'tcx> Instance<'tcx> { sig.map_bound(|sig| tcx.mk_fn_sig( iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()), sig.output(), - sig.variadic, + sig.c_variadic, sig.unsafety, sig.abi )) @@ -86,6 +78,11 @@ impl<'a, 'tcx> Instance<'tcx> { let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv); let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty); + let pin_did = tcx.lang_items().pin_type().unwrap(); + let pin_adt_ref = tcx.adt_def(pin_did); + let pin_substs = tcx.intern_substs(&[env_ty.into()]); + let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs); + sig.map_bound(|sig| { let state_did = tcx.lang_items().gen_state().unwrap(); let state_adt_ref = tcx.adt_def(state_did); @@ -146,16 +143,14 @@ impl<'tcx> InstanceDef<'tcx> { &self, tcx: TyCtxt<'a, 'tcx, 'tcx> ) -> bool { - use hir::map::DefPathData; + use crate::hir::map::DefPathData; let def_id = match *self { ty::InstanceDef::Item(def_id) => def_id, ty::InstanceDef::DropGlue(_, Some(_)) => return false, _ => return true }; match tcx.def_key(def_id).disambiguated_data.data { - DefPathData::StructCtor | - DefPathData::EnumVariant(..) | - DefPathData::ClosureExpr => true, + DefPathData::Ctor | DefPathData::ClosureExpr => true, _ => false } } @@ -179,7 +174,13 @@ impl<'tcx> InstanceDef<'tcx> { impl<'tcx> fmt::Display for Instance<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ppaux::parameterized(f, self.substs, self.def_id(), &[])?; + ty::tls::with(|tcx| { + let substs = tcx.lift(&self.substs).expect("could not lift for printing"); + FmtPrinter::new(tcx, &mut *f, Namespace::ValueNS) + .print_def_path(self.def_id(), substs)?; + Ok(()) + })?; + match self.def { InstanceDef::Item(_) => Ok(()), InstanceDef::VtableShim(_) => { @@ -208,7 +209,7 @@ impl<'tcx> fmt::Display for Instance<'tcx> { } impl<'a, 'b, 'tcx> Instance<'tcx> { - pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>) + pub fn new(def_id: DefId, substs: SubstsRef<'tcx>) -> Instance<'tcx> { assert!(!substs.has_escaping_bound_vars(), "substs of instance {:?} not normalized for codegen: {:?}", @@ -225,7 +226,7 @@ impl<'a, 'b, 'tcx> Instance<'tcx> { self.def.def_id() } - /// Resolve a (def_id, substs) pair to an (optional) instance -- most commonly, + /// Resolves a `(def_id, substs)` pair to an (optional) instance -- most commonly, /// this is used to find the precise code that will run for a trait method invocation, /// if known. /// @@ -246,7 +247,7 @@ impl<'a, 'b, 'tcx> Instance<'tcx> { pub fn resolve(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, def_id: DefId, - substs: &'tcx Substs<'tcx>) -> Option> { + substs: SubstsRef<'tcx>) -> Option> { debug!("resolve(def_id={:?}, substs={:?})", def_id, substs); let result = if let Some(trait_def_id) = tcx.trait_of_item(def_id) { debug!(" => associated item, attempting to find impl in param_env {:#?}", param_env); @@ -298,7 +299,7 @@ impl<'a, 'b, 'tcx> Instance<'tcx> { pub fn resolve_for_vtable(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, def_id: DefId, - substs: &'tcx Substs<'tcx>) -> Option> { + substs: SubstsRef<'tcx>) -> Option> { debug!("resolve(def_id={:?}, substs={:?})", def_id, substs); let fn_sig = tcx.fn_sig(def_id); let is_vtable_shim = @@ -343,7 +344,7 @@ fn resolve_associated_item<'a, 'tcx>( trait_item: &ty::AssociatedItem, param_env: ty::ParamEnv<'tcx>, trait_id: DefId, - rcvr_substs: &'tcx Substs<'tcx>, + rcvr_substs: SubstsRef<'tcx>, ) -> Option> { let def_id = trait_item.def_id; debug!("resolve_associated_item(trait_item={:?}, \ diff --git a/src/librustc/ty/item_path.rs b/src/librustc/ty/item_path.rs deleted file mode 100644 index a39eb004fd786..0000000000000 --- a/src/librustc/ty/item_path.rs +++ /dev/null @@ -1,581 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir::map::DefPathData; -use hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE}; -use ty::{self, DefIdTree, Ty, TyCtxt}; -use middle::cstore::{ExternCrate, ExternCrateSource}; -use syntax::ast; -use syntax::symbol::{keywords, LocalInternedString, Symbol}; - -use std::cell::Cell; -use std::fmt::Debug; - -thread_local! { - static FORCE_ABSOLUTE: Cell = Cell::new(false); - static FORCE_IMPL_FILENAME_LINE: Cell = Cell::new(false); - static SHOULD_PREFIX_WITH_CRATE: Cell = Cell::new(false); -} - -/// Enforces that item_path_str always returns an absolute path and -/// also enables "type-based" impl paths. This is used when building -/// symbols that contain types, where we want the crate name to be -/// part of the symbol. -pub fn with_forced_absolute_paths R, R>(f: F) -> R { - FORCE_ABSOLUTE.with(|force| { - let old = force.get(); - force.set(true); - let result = f(); - force.set(old); - result - }) -} - -/// Force us to name impls with just the filename/line number. We -/// normally try to use types. But at some points, notably while printing -/// cycle errors, this can result in extra or suboptimal error output, -/// so this variable disables that check. -pub fn with_forced_impl_filename_line R, R>(f: F) -> R { - FORCE_IMPL_FILENAME_LINE.with(|force| { - let old = force.get(); - force.set(true); - let result = f(); - force.set(old); - result - }) -} - -/// Add the `crate::` prefix to paths where appropriate. -pub fn with_crate_prefix R, R>(f: F) -> R { - SHOULD_PREFIX_WITH_CRATE.with(|flag| { - let old = flag.get(); - flag.set(true); - let result = f(); - flag.set(old); - result - }) -} - -impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { - /// Returns a string identifying this def-id. This string is - /// suitable for user output. It is relative to the current crate - /// root, unless with_forced_absolute_paths was used. - pub fn item_path_str(self, def_id: DefId) -> String { - let mode = FORCE_ABSOLUTE.with(|force| { - if force.get() { - RootMode::Absolute - } else { - RootMode::Local - } - }); - let mut buffer = LocalPathBuffer::new(mode); - debug!("item_path_str: buffer={:?} def_id={:?}", buffer, def_id); - self.push_item_path(&mut buffer, def_id, false); - buffer.into_string() - } - - /// Returns a string identifying this local node-id. - pub fn node_path_str(self, id: ast::NodeId) -> String { - self.item_path_str(self.hir().local_def_id(id)) - } - - /// Returns a string identifying this def-id. This string is - /// suitable for user output. It always begins with a crate identifier. - pub fn absolute_item_path_str(self, def_id: DefId) -> String { - let mut buffer = LocalPathBuffer::new(RootMode::Absolute); - debug!("absolute_item_path_str: buffer={:?} def_id={:?}", buffer, def_id); - self.push_item_path(&mut buffer, def_id, false); - buffer.into_string() - } - - /// Returns the "path" to a particular crate. This can proceed in - /// various ways, depending on the `root_mode` of the `buffer`. - /// (See `RootMode` enum for more details.) - /// - /// `pushed_prelude_crate` argument should be `true` when the buffer - /// has had a prelude crate pushed to it. If this is the case, then - /// we do not want to prepend `crate::` (as that would not be a valid - /// path). - pub fn push_krate_path(self, buffer: &mut T, cnum: CrateNum, pushed_prelude_crate: bool) - where T: ItemPathBuffer + Debug - { - debug!( - "push_krate_path: buffer={:?} cnum={:?} LOCAL_CRATE={:?}", - buffer, cnum, LOCAL_CRATE - ); - match *buffer.root_mode() { - RootMode::Local => { - // In local mode, when we encounter a crate other than - // LOCAL_CRATE, execution proceeds in one of two ways: - // - // 1. for a direct dependency, where user added an - // `extern crate` manually, we put the `extern - // crate` as the parent. So you wind up with - // something relative to the current crate. - // 2. for an extern inferred from a path or an indirect crate, - // where there is no explicit `extern crate`, we just prepend - // the crate name. - // - // Returns `None` for the local crate. - if cnum != LOCAL_CRATE { - let opt_extern_crate = self.extern_crate(cnum.as_def_id()); - if let Some(ExternCrate { - src: ExternCrateSource::Extern(def_id), - direct: true, - .. - }) = *opt_extern_crate - { - debug!("push_krate_path: def_id={:?}", def_id); - self.push_item_path(buffer, def_id, pushed_prelude_crate); - } else { - let name = self.crate_name(cnum).as_str(); - debug!("push_krate_path: name={:?}", name); - buffer.push(&name); - } - } else if self.sess.rust_2018() && !pushed_prelude_crate { - SHOULD_PREFIX_WITH_CRATE.with(|flag| { - // We only add the `crate::` keyword where appropriate. In particular, - // when we've not previously pushed a prelude crate to this path. - if flag.get() { - buffer.push(&keywords::Crate.name().as_str()) - } - }) - } - } - RootMode::Absolute => { - // In absolute mode, just write the crate name - // unconditionally. - let name = self.original_crate_name(cnum).as_str(); - debug!("push_krate_path: original_name={:?}", name); - buffer.push(&name); - } - } - } - - /// If possible, this pushes a global path resolving to `external_def_id` that is visible - /// from at least one local module and returns true. If the crate defining `external_def_id` is - /// declared with an `extern crate`, the path is guaranteed to use the `extern crate`. - pub fn try_push_visible_item_path( - self, - buffer: &mut T, - external_def_id: DefId, - pushed_prelude_crate: bool, - ) -> bool - where T: ItemPathBuffer + Debug - { - debug!( - "try_push_visible_item_path: buffer={:?} external_def_id={:?}", - buffer, external_def_id - ); - let visible_parent_map = self.visible_parent_map(LOCAL_CRATE); - - let (mut cur_def, mut cur_path) = (external_def_id, Vec::::new()); - loop { - debug!( - "try_push_visible_item_path: cur_def={:?} cur_path={:?} CRATE_DEF_INDEX={:?}", - cur_def, cur_path, CRATE_DEF_INDEX, - ); - // If `cur_def` is a direct or injected extern crate, push the path to the crate - // followed by the path to the item within the crate and return. - if cur_def.index == CRATE_DEF_INDEX { - match *self.extern_crate(cur_def) { - Some(ExternCrate { - src: ExternCrateSource::Extern(def_id), - direct: true, - .. - }) => { - debug!("try_push_visible_item_path: def_id={:?}", def_id); - self.push_item_path(buffer, def_id, pushed_prelude_crate); - cur_path.iter().rev().for_each(|segment| buffer.push(&segment)); - return true; - } - None => { - buffer.push(&self.crate_name(cur_def.krate).as_str()); - cur_path.iter().rev().for_each(|segment| buffer.push(&segment)); - return true; - } - _ => {}, - } - } - - let mut cur_def_key = self.def_key(cur_def); - debug!("try_push_visible_item_path: cur_def_key={:?}", cur_def_key); - - // For a UnitStruct or TupleStruct we want the name of its parent rather than . - if let DefPathData::StructCtor = cur_def_key.disambiguated_data.data { - let parent = DefId { - krate: cur_def.krate, - index: cur_def_key.parent.expect("DefPathData::StructCtor missing a parent"), - }; - - cur_def_key = self.def_key(parent); - } - - let visible_parent = visible_parent_map.get(&cur_def).cloned(); - let actual_parent = self.parent(cur_def); - debug!( - "try_push_visible_item_path: visible_parent={:?} actual_parent={:?}", - visible_parent, actual_parent, - ); - - let data = cur_def_key.disambiguated_data.data; - let symbol = match data { - // In order to output a path that could actually be imported (valid and visible), - // we need to handle re-exports correctly. - // - // For example, take `std::os::unix::process::CommandExt`, this trait is actually - // defined at `std::sys::unix::ext::process::CommandExt` (at time of writing). - // - // `std::os::unix` rexports the contents of `std::sys::unix::ext`. `std::sys` is - // private so the "true" path to `CommandExt` isn't accessible. - // - // In this case, the `visible_parent_map` will look something like this: - // - // (child) -> (parent) - // `std::sys::unix::ext::process::CommandExt` -> `std::sys::unix::ext::process` - // `std::sys::unix::ext::process` -> `std::sys::unix::ext` - // `std::sys::unix::ext` -> `std::os` - // - // This is correct, as the visible parent of `std::sys::unix::ext` is in fact - // `std::os`. - // - // When printing the path to `CommandExt` and looking at the `cur_def_key` that - // corresponds to `std::sys::unix::ext`, we would normally print `ext` and then go - // to the parent - resulting in a mangled path like - // `std::os::ext::process::CommandExt`. - // - // Instead, we must detect that there was a re-export and instead print `unix` - // (which is the name `std::sys::unix::ext` was re-exported as in `std::os`). To - // do this, we compare the parent of `std::sys::unix::ext` (`std::sys::unix`) with - // the visible parent (`std::os`). If these do not match, then we iterate over - // the children of the visible parent (as was done when computing - // `visible_parent_map`), looking for the specific child we currently have and then - // have access to the re-exported name. - DefPathData::Module(module_name) if visible_parent != actual_parent => { - let mut name: Option = None; - if let Some(visible_parent) = visible_parent { - for child in self.item_children(visible_parent).iter() { - if child.def.def_id() == cur_def { - name = Some(child.ident); - } - } - } - name.map(|n| n.as_str()).unwrap_or(module_name.as_str()) - }, - _ => { - data.get_opt_name().map(|n| n.as_str()).unwrap_or_else(|| { - // Re-exported `extern crate` (#43189). - if let DefPathData::CrateRoot = data { - self.original_crate_name(cur_def.krate).as_str() - } else { - Symbol::intern("").as_str() - } - }) - }, - }; - debug!("try_push_visible_item_path: symbol={:?}", symbol); - cur_path.push(symbol); - - match visible_parent { - Some(def) => cur_def = def, - None => return false, - }; - } - } - - pub fn push_item_path(self, buffer: &mut T, def_id: DefId, pushed_prelude_crate: bool) - where T: ItemPathBuffer + Debug - { - debug!( - "push_item_path: buffer={:?} def_id={:?} pushed_prelude_crate={:?}", - buffer, def_id, pushed_prelude_crate - ); - match *buffer.root_mode() { - RootMode::Local if !def_id.is_local() => - if self.try_push_visible_item_path(buffer, def_id, pushed_prelude_crate) { return }, - _ => {} - } - - let key = self.def_key(def_id); - debug!("push_item_path: key={:?}", key); - match key.disambiguated_data.data { - DefPathData::CrateRoot => { - assert!(key.parent.is_none()); - self.push_krate_path(buffer, def_id.krate, pushed_prelude_crate); - } - - DefPathData::Impl => { - self.push_impl_path(buffer, def_id, pushed_prelude_crate); - } - - // Unclear if there is any value in distinguishing these. - // Probably eventually (and maybe we would even want - // finer-grained distinctions, e.g., between enum/struct). - data @ DefPathData::Misc | - data @ DefPathData::TypeNs(..) | - data @ DefPathData::Trait(..) | - data @ DefPathData::AssocTypeInTrait(..) | - data @ DefPathData::AssocTypeInImpl(..) | - data @ DefPathData::AssocExistentialInImpl(..) | - data @ DefPathData::ValueNs(..) | - data @ DefPathData::Module(..) | - data @ DefPathData::TypeParam(..) | - data @ DefPathData::LifetimeParam(..) | - data @ DefPathData::EnumVariant(..) | - data @ DefPathData::Field(..) | - data @ DefPathData::AnonConst | - data @ DefPathData::MacroDef(..) | - data @ DefPathData::ClosureExpr | - data @ DefPathData::ImplTrait | - data @ DefPathData::GlobalMetaData(..) => { - let parent_did = self.parent_def_id(def_id).unwrap(); - - // Keep track of whether we are one recursion away from the `CrateRoot` and - // pushing the name of a prelude crate. If we are, we'll want to know this when - // printing the `CrateRoot` so we don't prepend a `crate::` to paths. - let mut is_prelude_crate = false; - if let DefPathData::CrateRoot = self.def_key(parent_did).disambiguated_data.data { - if self.extern_prelude.contains_key(&data.as_interned_str().as_symbol()) { - is_prelude_crate = true; - } - } - - self.push_item_path( - buffer, parent_did, pushed_prelude_crate || is_prelude_crate - ); - buffer.push(&data.as_interned_str().as_symbol().as_str()); - }, - - DefPathData::StructCtor => { // present `X` instead of `X::{{constructor}}` - let parent_def_id = self.parent_def_id(def_id).unwrap(); - self.push_item_path(buffer, parent_def_id, pushed_prelude_crate); - } - } - } - - fn push_impl_path( - self, - buffer: &mut T, - impl_def_id: DefId, - pushed_prelude_crate: bool, - ) - where T: ItemPathBuffer + Debug - { - debug!("push_impl_path: buffer={:?} impl_def_id={:?}", buffer, impl_def_id); - let parent_def_id = self.parent_def_id(impl_def_id).unwrap(); - - // Always use types for non-local impls, where types are always - // available, and filename/line-number is mostly uninteresting. - let use_types = !impl_def_id.is_local() || { - // Otherwise, use filename/line-number if forced. - let force_no_types = FORCE_IMPL_FILENAME_LINE.with(|f| f.get()); - !force_no_types - }; - - if !use_types { - return self.push_impl_path_fallback(buffer, impl_def_id, pushed_prelude_crate); - } - - // Decide whether to print the parent path for the impl. - // Logically, since impls are global, it's never needed, but - // users may find it useful. Currently, we omit the parent if - // the impl is either in the same module as the self-type or - // as the trait. - let self_ty = self.type_of(impl_def_id); - let in_self_mod = match characteristic_def_id_of_type(self_ty) { - None => false, - Some(ty_def_id) => self.parent_def_id(ty_def_id) == Some(parent_def_id), - }; - - let impl_trait_ref = self.impl_trait_ref(impl_def_id); - let in_trait_mod = match impl_trait_ref { - None => false, - Some(trait_ref) => self.parent_def_id(trait_ref.def_id) == Some(parent_def_id), - }; - - if !in_self_mod && !in_trait_mod { - // If the impl is not co-located with either self-type or - // trait-type, then fallback to a format that identifies - // the module more clearly. - self.push_item_path(buffer, parent_def_id, pushed_prelude_crate); - if let Some(trait_ref) = impl_trait_ref { - buffer.push(&format!("", trait_ref, self_ty)); - } else { - buffer.push(&format!("", self_ty)); - } - return; - } - - // Otherwise, try to give a good form that would be valid language - // syntax. Preferably using associated item notation. - - if let Some(trait_ref) = impl_trait_ref { - // Trait impls. - buffer.push(&format!("<{} as {}>", self_ty, trait_ref)); - return; - } - - // Inherent impls. Try to print `Foo::bar` for an inherent - // impl on `Foo`, but fallback to `::bar` if self-type is - // anything other than a simple path. - match self_ty.sty { - ty::Adt(adt_def, substs) => { - if substs.types().next().is_none() { // ignore regions - self.push_item_path(buffer, adt_def.did, pushed_prelude_crate); - } else { - buffer.push(&format!("<{}>", self_ty)); - } - } - - ty::Foreign(did) => self.push_item_path(buffer, did, pushed_prelude_crate), - - ty::Bool | - ty::Char | - ty::Int(_) | - ty::Uint(_) | - ty::Float(_) | - ty::Str => { - buffer.push(&self_ty.to_string()); - } - - _ => { - buffer.push(&format!("<{}>", self_ty)); - } - } - } - - fn push_impl_path_fallback( - self, - buffer: &mut T, - impl_def_id: DefId, - pushed_prelude_crate: bool, - ) - where T: ItemPathBuffer + Debug - { - // If no type info is available, fall back to - // pretty printing some span information. This should - // only occur very early in the compiler pipeline. - let parent_def_id = self.parent_def_id(impl_def_id).unwrap(); - self.push_item_path(buffer, parent_def_id, pushed_prelude_crate); - let node_id = self.hir().as_local_node_id(impl_def_id).unwrap(); - let item = self.hir().expect_item(node_id); - let span_str = self.sess.source_map().span_to_string(item.span); - buffer.push(&format!("", span_str)); - } - - /// Returns the def-id of `def_id`'s parent in the def tree. If - /// this returns `None`, then `def_id` represents a crate root or - /// inlined root. - pub fn parent_def_id(self, def_id: DefId) -> Option { - let key = self.def_key(def_id); - key.parent.map(|index| DefId { krate: def_id.krate, index: index }) - } -} - -/// As a heuristic, when we see an impl, if we see that the -/// 'self-type' is a type defined in the same module as the impl, -/// we can omit including the path to the impl itself. This -/// function tries to find a "characteristic def-id" for a -/// type. It's just a heuristic so it makes some questionable -/// decisions and we may want to adjust it later. -pub fn characteristic_def_id_of_type(ty: Ty<'_>) -> Option { - match ty.sty { - ty::Adt(adt_def, _) => Some(adt_def.did), - - ty::Dynamic(data, ..) => Some(data.principal().def_id()), - - ty::Array(subty, _) | - ty::Slice(subty) => characteristic_def_id_of_type(subty), - - ty::RawPtr(mt) => characteristic_def_id_of_type(mt.ty), - - ty::Ref(_, ty, _) => characteristic_def_id_of_type(ty), - - ty::Tuple(ref tys) => tys.iter() - .filter_map(|ty| characteristic_def_id_of_type(ty)) - .next(), - - ty::FnDef(def_id, _) | - ty::Closure(def_id, _) | - ty::Generator(def_id, _, _) | - ty::Foreign(def_id) => Some(def_id), - - ty::Bool | - ty::Char | - ty::Int(_) | - ty::Uint(_) | - ty::Str | - ty::FnPtr(_) | - ty::Projection(_) | - ty::Placeholder(..) | - ty::UnnormalizedProjection(..) | - ty::Param(_) | - ty::Opaque(..) | - ty::Infer(_) | - ty::Bound(..) | - ty::Error | - ty::GeneratorWitness(..) | - ty::Never | - ty::Float(_) => None, - } -} - -/// Unifying Trait for different kinds of item paths we might -/// construct. The basic interface is that components get pushed: the -/// instance can also customize how we handle the root of a crate. -pub trait ItemPathBuffer { - fn root_mode(&self) -> &RootMode; - fn push(&mut self, text: &str); -} - -#[derive(Debug)] -pub enum RootMode { - /// Try to make a path relative to the local crate. In - /// particular, local paths have no prefix, and if the path comes - /// from an extern crate, start with the path to the `extern - /// crate` declaration. - Local, - - /// Always prepend the crate name to the path, forming an absolute - /// path from within a given set of crates. - Absolute, -} - -#[derive(Debug)] -struct LocalPathBuffer { - root_mode: RootMode, - str: String, -} - -impl LocalPathBuffer { - fn new(root_mode: RootMode) -> LocalPathBuffer { - LocalPathBuffer { - root_mode, - str: String::new(), - } - } - - fn into_string(self) -> String { - self.str - } -} - -impl ItemPathBuffer for LocalPathBuffer { - fn root_mode(&self) -> &RootMode { - &self.root_mode - } - - fn push(&mut self, text: &str) { - if !self.str.is_empty() { - self.str.push_str("::"); - } - self.str.push_str(text); - } -} diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 87d745e5cea77..fd1d3a91ede1f 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1,17 +1,7 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use session::{self, DataTypeKind}; -use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions}; - -use syntax::ast::{self, IntTy, UintTy}; +use crate::session::{self, DataTypeKind}; +use crate::ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions}; + +use syntax::ast::{self, Ident, IntTy, UintTy}; use syntax::attr; use syntax_pos::DUMMY_SP; @@ -22,7 +12,7 @@ use std::iter; use std::mem; use std::ops::Bound; -use ich::StableHashingContext; +use crate::ich::StableHashingContext; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; @@ -56,7 +46,7 @@ impl IntegerExt for Integer { } } - /// Get the Integer type from an attr::IntType. + /// Gets the Integer type from an attr::IntType. fn from_attr(cx: &C, ity: attr::IntType) -> Integer { let dl = cx.data_layout(); @@ -72,7 +62,7 @@ impl IntegerExt for Integer { } } - /// Find the appropriate Integer type and signedness for the given + /// Finds the appropriate Integer type and signedness for the given /// signed discriminant range and #[repr] attribute. /// N.B.: u128 values above i128::MAX will be treated as signed, but /// that shouldn't affect anything, other than maybe debuginfo. @@ -191,7 +181,14 @@ fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty::tls::enter_context(&icx, |_| { let cx = LayoutCx { tcx, param_env }; - cx.layout_raw_uncached(ty) + let layout = cx.layout_raw_uncached(ty); + // Type-level uninhabitedness should always imply ABI uninhabitedness. + if let Ok(layout) = layout { + if ty.conservative_is_privately_uninhabited(tcx) { + assert!(layout.abi.is_uninhabited()); + } + } + layout }) }) } @@ -205,12 +202,11 @@ pub fn provide(providers: &mut ty::query::Providers<'_>) { pub struct LayoutCx<'tcx, C> { pub tcx: C, - pub param_env: ty::ParamEnv<'tcx> + pub param_env: ty::ParamEnv<'tcx>, } impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { - fn layout_raw_uncached(&self, ty: Ty<'tcx>) - -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> { + fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> { let tcx = self.tcx; let param_env = self.param_env; let dl = self.data_layout(); @@ -551,13 +547,19 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let size = element.size.checked_mul(count, dl) .ok_or(LayoutError::SizeOverflow(ty))?; + let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) { + Abi::Uninhabited + } else { + Abi::Aggregate { sized: true } + }; + tcx.intern_layout(LayoutDetails { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldPlacement::Array { stride: element.size, count }, - abi: Abi::Aggregate { sized: true }, + abi, align: element.align, size }) @@ -911,11 +913,14 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } return Ok(tcx.intern_layout(LayoutDetails { - variants: Variants::NicheFilling { - dataful_variant: i, - niche_variants, - niche: niche_scalar, - niche_start, + variants: Variants::Multiple { + discr: niche_scalar, + discr_kind: DiscriminantKind::Niche { + dataful_variant: i, + niche_variants, + niche_start, + }, + discr_index: 0, variants: st, }, fields: FieldPlacement::Arbitrary { @@ -1135,8 +1140,10 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } tcx.intern_layout(LayoutDetails { - variants: Variants::Tagged { - tag, + variants: Variants::Multiple { + discr: tag, + discr_kind: DiscriminantKind::Tag, + discr_index: 0, variants: layout_variants, }, fields: FieldPlacement::Arbitrary { @@ -1174,14 +1181,20 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { /// This is invoked by the `layout_raw` query to record the final /// layout of each type. - #[inline] + #[inline(always)] fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) { - // If we are running with `-Zprint-type-sizes`, record layouts for - // dumping later. Ignore layouts that are done with non-empty - // environments or non-monomorphic layouts, as the user only wants - // to see the stuff resulting from the final codegen session. + // If we are running with `-Zprint-type-sizes`, maybe record layouts + // for dumping later. + if self.tcx.sess.opts.debugging_opts.print_type_sizes { + self.record_layout_for_printing_outlined(layout) + } + } + + fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) { + // Ignore layouts that are done with non-empty environments or + // non-monomorphic layouts, as the user only wants to see the stuff + // resulting from the final codegen session. if - !self.tcx.sess.opts.debugging_opts.print_type_sizes || layout.ty.has_param_types() || layout.ty.has_self_ty() || !self.param_env.caller_bounds.is_empty() @@ -1189,10 +1202,6 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { return; } - self.record_layout_for_printing_outlined(layout) - } - - fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) { // (delay format until we actually need it) let record = |kind, packed, opt_discr_size, variants| { let type_desc = format!("{:?}", layout.ty); @@ -1226,7 +1235,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { let adt_kind = adt_def.adt_kind(); let adt_packed = adt_def.repr.packed(); - let build_variant_info = |n: Option, + let build_variant_info = |n: Option, flds: &[ast::Name], layout: TyLayout<'tcx>| { let mut min_size = Size::ZERO; @@ -1271,7 +1280,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { match layout.variants { Variants::Single { index } => { debug!("print-type-size `{:#?}` variant {}", - layout, adt_def.variants[index].name); + layout, adt_def.variants[index].ident); if !adt_def.variants.is_empty() { let variant_def = &adt_def.variants[index]; let fields: Vec<_> = @@ -1279,7 +1288,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { record(adt_kind.into(), adt_packed, None, - vec![build_variant_info(Some(variant_def.name), + vec![build_variant_info(Some(variant_def.ident), &fields, layout)]); } else { @@ -1289,21 +1298,20 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { } } - Variants::NicheFilling { .. } | - Variants::Tagged { .. } => { + Variants::Multiple { ref discr, ref discr_kind, .. } => { debug!("print-type-size `{:#?}` adt general variants def {}", layout.ty, adt_def.variants.len()); let variant_infos: Vec<_> = adt_def.variants.iter_enumerated().map(|(i, variant_def)| { let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect(); - build_variant_info(Some(variant_def.name), + build_variant_info(Some(variant_def.ident), &fields, layout.for_variant(self, i)) }) .collect(); - record(adt_kind.into(), adt_packed, match layout.variants { - Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)), + record(adt_kind.into(), adt_packed, match discr_kind { + DiscriminantKind::Tag => Some(discr.value.size(self)), _ => None }, variant_infos); } @@ -1623,8 +1631,7 @@ impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx> }) } - Variants::NicheFilling { ref variants, .. } | - Variants::Tagged { ref variants, .. } => { + Variants::Multiple { ref variants, .. } => { &variants[variant_index] } }; @@ -1684,7 +1691,7 @@ impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx> tcx.types.re_static, tcx.mk_array(tcx.types.usize, 3), ) - /* FIXME use actual fn pointers + /* FIXME: use actual fn pointers Warning: naively computing the number of entries in the vtable by counting the methods on the trait + methods on all parent traits does not work, because some methods can @@ -1731,8 +1738,7 @@ impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx> } // Discriminant field for enums (where applicable). - Variants::Tagged { tag: ref discr, .. } | - Variants::NicheFilling { niche: ref discr, .. } => { + Variants::Multiple { ref discr, .. } => { assert_eq!(i, 0); let layout = LayoutDetails::scalar(cx, discr.clone()); return MaybeResult::from_ok(TyLayout { @@ -1840,7 +1846,11 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> { return Ok(None); } } - if let FieldPlacement::Array { .. } = layout.fields { + if let FieldPlacement::Array { count: original_64_bit_count, .. } = layout.fields { + // rust-lang/rust#57038: avoid ICE within FieldPlacement::count when count too big + if original_64_bit_count > usize::max_value() as u64 { + return Err(LayoutError::SizeOverflow(layout.ty)); + } if layout.fields.count() > 0 { return self.find_niche(layout.field(self, 0)?); } else { @@ -1866,33 +1876,46 @@ impl<'a> HashStable> for Variants { fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - use ty::layout::Variants::*; + use crate::ty::layout::Variants::*; mem::discriminant(self).hash_stable(hcx, hasher); match *self { Single { index } => { index.hash_stable(hcx, hasher); } - Tagged { - ref tag, + Multiple { + ref discr, + ref discr_kind, + discr_index, ref variants, } => { - tag.hash_stable(hcx, hasher); + discr.hash_stable(hcx, hasher); + discr_kind.hash_stable(hcx, hasher); + discr_index.hash_stable(hcx, hasher); variants.hash_stable(hcx, hasher); } - NicheFilling { + } + } +} + +impl<'a> HashStable> for DiscriminantKind { + fn hash_stable(&self, + hcx: &mut StableHashingContext<'a>, + hasher: &mut StableHasher) { + use crate::ty::layout::DiscriminantKind::*; + mem::discriminant(self).hash_stable(hcx, hasher); + + match *self { + Tag => {} + Niche { dataful_variant, ref niche_variants, - ref niche, niche_start, - ref variants, } => { dataful_variant.hash_stable(hcx, hasher); niche_variants.start().hash_stable(hcx, hasher); niche_variants.end().hash_stable(hcx, hasher); - niche.hash_stable(hcx, hasher); niche_start.hash_stable(hcx, hasher); - variants.hash_stable(hcx, hasher); } } } @@ -1902,7 +1925,7 @@ impl<'a> HashStable> for FieldPlacement { fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - use ty::layout::FieldPlacement::*; + use crate::ty::layout::FieldPlacement::*; mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -1935,7 +1958,7 @@ impl<'a> HashStable> for Abi { fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - use ty::layout::Abi::*; + use crate::ty::layout::Abi::*; mem::discriminant(self).hash_stable(hcx, hasher); match *self { @@ -1969,7 +1992,7 @@ impl<'a> HashStable> for Scalar { } } -impl_stable_hash_for!(struct ::ty::layout::LayoutDetails { +impl_stable_hash_for!(struct crate::ty::layout::LayoutDetails { variants, fields, abi, @@ -1977,7 +2000,7 @@ impl_stable_hash_for!(struct ::ty::layout::LayoutDetails { align }); -impl_stable_hash_for!(enum ::ty::layout::Integer { +impl_stable_hash_for!(enum crate::ty::layout::Integer { I8, I16, I32, @@ -1985,13 +2008,13 @@ impl_stable_hash_for!(enum ::ty::layout::Integer { I128 }); -impl_stable_hash_for!(enum ::ty::layout::Primitive { +impl_stable_hash_for!(enum crate::ty::layout::Primitive { Int(integer, signed), Float(fty), Pointer }); -impl_stable_hash_for!(struct ::ty::layout::AbiAndPrefAlign { +impl_stable_hash_for!(struct crate::ty::layout::AbiAndPrefAlign { abi, pref }); @@ -2017,7 +2040,7 @@ impl<'a, 'gcx> HashStable> for LayoutError<'gcx> fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - use ty::layout::LayoutError::*; + use crate::ty::layout::LayoutError::*; mem::discriminant(self).hash_stable(hcx, hasher); match *self { diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs index 429b7f03af8e4..7d47867cea125 100644 --- a/src/librustc/ty/mod.rs +++ b/src/librustc/ty/mod.rs @@ -1,12 +1,4 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +#![cfg_attr(not(stage0), allow(usage_of_ty_tykind))] pub use self::Variance::*; pub use self::AssociatedItemContainer::*; @@ -14,32 +6,31 @@ pub use self::BorrowKind::*; pub use self::IntVarValue::*; pub use self::fold::TypeFoldable; -use hir::{map as hir_map, FreevarMap, TraitMap}; -use hir::Node; -use hir::def::{Def, CtorKind, ExportMap}; -use hir::def_id::{CrateNum, DefId, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE}; -use hir::map::DefPathData; +use crate::hir::{map as hir_map, FreevarMap, GlobMap, TraitMap}; +use crate::hir::{HirId, Node}; +use crate::hir::def::{Def, CtorOf, CtorKind, ExportMap}; +use crate::hir::def_id::{CrateNum, DefId, LocalDefId, CRATE_DEF_INDEX, LOCAL_CRATE}; use rustc_data_structures::svh::Svh; -use ich::Fingerprint; -use ich::StableHashingContext; -use infer::canonical::Canonical; -use middle::lang_items::{FnTraitLangItem, FnMutTraitLangItem, FnOnceTraitLangItem}; -use middle::privacy::AccessLevels; -use middle::resolve_lifetime::ObjectLifetimeDefault; -use mir::Mir; -use mir::interpret::{GlobalId, ErrorHandled}; -use mir::GeneratorLayout; -use session::CrateDisambiguator; -use traits::{self, Reveal}; -use ty; -use ty::layout::VariantIdx; -use ty::subst::{Subst, Substs}; -use ty::util::{IntTypeExt, Discr}; -use ty::walk::TypeWalker; -use util::captures::Captures; -use util::nodemap::{NodeSet, DefIdMap, FxHashMap}; +use rustc_macros::HashStable; +use crate::ich::Fingerprint; +use crate::ich::StableHashingContext; +use crate::infer::canonical::Canonical; +use crate::middle::lang_items::{FnTraitLangItem, FnMutTraitLangItem, FnOnceTraitLangItem}; +use crate::middle::resolve_lifetime::ObjectLifetimeDefault; +use crate::mir::Mir; +use crate::mir::interpret::{GlobalId, ErrorHandled}; +use crate::mir::GeneratorLayout; +use crate::session::CrateDisambiguator; +use crate::traits::{self, Reveal}; +use crate::ty; +use crate::ty::layout::VariantIdx; +use crate::ty::subst::{Subst, InternalSubsts, SubstsRef}; +use crate::ty::util::{IntTypeExt, Discr}; +use crate::ty::walk::TypeWalker; +use crate::util::captures::Captures; +use crate::util::nodemap::{NodeSet, DefIdMap, FxHashMap}; use arena::SyncDroplessArena; -use session::DataTypeKind; +use crate::session::DataTypeKind; use serialize::{self, Encodable, Encoder}; use std::cell::RefCell; @@ -50,29 +41,29 @@ use std::ops::Deref; use rustc_data_structures::sync::{self, Lrc, ParallelIterator, par_iter}; use std::slice; use std::{mem, ptr}; -use syntax::ast::{self, DUMMY_NODE_ID, Name, Ident, NodeId}; +use syntax::ast::{self, Name, Ident, NodeId}; use syntax::attr; use syntax::ext::hygiene::Mark; use syntax::symbol::{keywords, Symbol, LocalInternedString, InternedString}; -use syntax_pos::{DUMMY_SP, Span}; +use syntax_pos::Span; use smallvec; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc_data_structures::stable_hasher::{StableHasher, StableHasherResult, HashStable}; -use hir; +use crate::hir; pub use self::sty::{Binder, BoundTy, BoundTyKind, BoundVar, DebruijnIndex, INNERMOST}; pub use self::sty::{FnSig, GenSig, CanonicalPolyFnSig, PolyFnSig, PolyGenSig}; -pub use self::sty::{InferTy, ParamTy, ProjectionTy, ExistentialPredicate}; +pub use self::sty::{InferTy, ParamTy, ParamConst, InferConst, ProjectionTy, ExistentialPredicate}; pub use self::sty::{ClosureSubsts, GeneratorSubsts, UpvarSubsts, TypeAndMut}; pub use self::sty::{TraitRef, TyKind, PolyTraitRef}; pub use self::sty::{ExistentialTraitRef, PolyExistentialTraitRef}; pub use self::sty::{ExistentialProjection, PolyExistentialProjection, Const}; pub use self::sty::{BoundRegion, EarlyBoundRegion, FreeRegion, Region}; pub use self::sty::RegionKind; -pub use self::sty::{TyVid, IntVid, FloatVid, RegionVid}; +pub use self::sty::{TyVid, IntVid, FloatVid, ConstVid, RegionVid}; pub use self::sty::BoundRegion::*; pub use self::sty::InferTy::*; pub use self::sty::RegionKind::*; @@ -82,7 +73,11 @@ pub use self::binding::BindingMode; pub use self::binding::BindingMode::*; pub use self::context::{TyCtxt, FreeRegionInfo, GlobalArenas, AllArenas, tls, keep_local}; -pub use self::context::{Lift, TypeckTables}; +pub use self::context::{Lift, TypeckTables, CtxtInterners, GlobalCtxt}; +pub use self::context::{ + UserTypeAnnotationIndex, UserType, CanonicalUserType, + CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations, ResolvedOpaqueTy, +}; pub use self::instance::{Instance, InstanceDef}; @@ -101,10 +96,10 @@ mod erase_regions; pub mod fast_reject; pub mod fold; pub mod inhabitedness; -pub mod item_path; pub mod layout; pub mod _match; pub mod outlives; +pub mod print; pub mod query; pub mod relate; pub mod steal; @@ -122,18 +117,6 @@ mod sty; // Data types -/// The complete set of all analyses described in this module. This is -/// produced by the driver and fed to codegen and later passes. -/// -/// N.B., these contents are being migrated into queries using the -/// *on-demand* infrastructure. -#[derive(Clone)] -pub struct CrateAnalysis { - pub access_levels: Lrc, - pub name: String, - pub glob_map: Option, -} - #[derive(Clone)] pub struct Resolutions { pub freevars: FreevarMap, @@ -141,20 +124,21 @@ pub struct Resolutions { pub maybe_unused_trait_imports: NodeSet, pub maybe_unused_extern_crates: Vec<(NodeId, Span)>, pub export_map: ExportMap, + pub glob_map: GlobMap, /// Extern prelude entries. The value is `true` if the entry was introduced /// via `extern crate` item and not `--extern` option or compiler built-in. pub extern_prelude: FxHashMap, } -#[derive(Clone, Copy, PartialEq, Eq, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug, HashStable)] pub enum AssociatedItemContainer { TraitContainer(DefId), ImplContainer(DefId), } impl AssociatedItemContainer { - /// Asserts that this is the def-id of an associated item declared - /// in a trait, and returns the trait def-id. + /// Asserts that this is the `DefId` of an associated item declared + /// in a trait, and returns the trait `DefId`. pub fn assert_trait(&self) -> DefId { match *self { TraitContainer(id) => id, @@ -172,7 +156,7 @@ impl AssociatedItemContainer { /// The "header" of an impl is everything outside the body: a Self type, a trait /// ref (in the case of a trait impl), and a set of predicates (from the -/// bounds/where clauses). +/// bounds / where-clauses). #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub struct ImplHeader<'tcx> { pub impl_def_id: DefId, @@ -181,9 +165,10 @@ pub struct ImplHeader<'tcx> { pub predicates: Vec>, } -#[derive(Copy, Clone, Debug, PartialEq)] +#[derive(Copy, Clone, Debug, PartialEq, HashStable)] pub struct AssociatedItem { pub def_id: DefId, + #[stable_hasher(project(name))] pub ident: Ident, pub kind: AssociatedKind, pub vis: Visibility, @@ -195,7 +180,7 @@ pub struct AssociatedItem { pub method_has_self_argument: bool, } -#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash, RustcEncodable, RustcDecodable, HashStable)] pub enum AssociatedKind { Const, Method, @@ -243,7 +228,7 @@ impl AssociatedItem { } } -#[derive(Clone, Debug, PartialEq, Eq, Copy, RustcEncodable, RustcDecodable)] +#[derive(Clone, Debug, PartialEq, Eq, Copy, RustcEncodable, RustcDecodable, HashStable)] pub enum Visibility { /// Visible everywhere (including in other crates). Public, @@ -278,7 +263,7 @@ impl<'a, 'gcx, 'tcx> DefIdTree for TyCtxt<'a, 'gcx, 'tcx> { } impl Visibility { - pub fn from_hir(visibility: &hir::Visibility, id: NodeId, tcx: TyCtxt<'_, '_, '_>) -> Self { + pub fn from_hir(visibility: &hir::Visibility, id: hir::HirId, tcx: TyCtxt<'_, '_, '_>) -> Self { match visibility.node { hir::VisibilityKind::Public => Visibility::Public, hir::VisibilityKind::Crate(_) => Visibility::Restricted(DefId::local(CRATE_DEF_INDEX)), @@ -289,7 +274,7 @@ impl Visibility { def => Visibility::Restricted(def.def_id()), }, hir::VisibilityKind::Inherited => { - Visibility::Restricted(tcx.hir().get_module_parent(id)) + Visibility::Restricted(tcx.hir().get_module_parent_by_hir_id(id)) } } } @@ -330,7 +315,7 @@ impl Visibility { } } -#[derive(Copy, Clone, PartialEq, Eq, RustcDecodable, RustcEncodable, Hash)] +#[derive(Copy, Clone, PartialEq, Eq, RustcDecodable, RustcEncodable, Hash, HashStable)] pub enum Variance { Covariant, // T <: T iff A <: B -- e.g., function return type Invariant, // T <: T iff B == A -- e.g., type of mutable cell @@ -344,19 +329,21 @@ pub enum Variance { /// HIR of every item in the local crate. Instead, use /// `tcx.variances_of()` to get the variance for a *particular* /// item. +#[derive(HashStable)] pub struct CrateVariancesMap { /// For each item with generics, maps to a vector of the variance - /// of its generics. If an item has no generics, it will have no + /// of its generics. If an item has no generics, it will have no /// entry. pub variances: FxHashMap>>, /// An empty vector, useful for cloning. + #[stable_hasher(ignore)] pub empty_variance: Lrc>, } impl Variance { /// `a.xform(b)` combines the variance of a context with the - /// variance of a type with the following meaning. If we are in a + /// variance of a type with the following meaning. If we are in a /// context with variance `a`, and we encounter a type argument in /// a position with variance `b`, then `a.xform(b)` is the new /// variance with which the argument appears. @@ -380,10 +367,10 @@ impl Variance { /// The ambient variance is covariant. A `fn` type is /// contravariant with respect to its parameters, so the variance /// within which both pointer types appear is - /// `Covariant.xform(Contravariant)`, or `Contravariant`. `*const + /// `Covariant.xform(Contravariant)`, or `Contravariant`. `*const /// T` is covariant with respect to `T`, so the variance within /// which the first `Vec` appears is - /// `Contravariant.xform(Covariant)` or `Contravariant`. The same + /// `Contravariant.xform(Covariant)` or `Contravariant`. The same /// is true for its `i32` argument. In the `*mut T` case, the /// variance of `Vec` is `Contravariant.xform(Invariant)`, /// and hence the outermost type is `Invariant` with respect to @@ -469,6 +456,8 @@ bitflags! { const HAS_TY_PLACEHOLDER = 1 << 14; + const HAS_CT_INFER = 1 << 15; + const NEEDS_SUBST = TypeFlags::HAS_PARAMS.bits | TypeFlags::HAS_SELF.bits | TypeFlags::HAS_RE_EARLY_BOUND.bits; @@ -480,6 +469,7 @@ bitflags! { TypeFlags::HAS_SELF.bits | TypeFlags::HAS_TY_INFER.bits | TypeFlags::HAS_RE_INFER.bits | + TypeFlags::HAS_CT_INFER.bits | TypeFlags::HAS_RE_PLACEHOLDER.bits | TypeFlags::HAS_RE_EARLY_BOUND.bits | TypeFlags::HAS_FREE_REGIONS.bits | @@ -507,12 +497,12 @@ pub struct TyS<'tcx> { /// So, for a type without any late-bound things, like `u32`, this /// will be *innermost*, because that is the innermost binder that /// captures nothing. But for a type `&'D u32`, where `'D` is a - /// late-bound region with debruijn index `D`, this would be `D + 1` + /// late-bound region with De Bruijn index `D`, this would be `D + 1` /// -- the binder itself does not capture `D`, but `D` is captured /// by an inner binder. /// /// We call this concept an "exclusive" binder `D` because all - /// debruijn indices within the type are contained within `0..D` + /// De Bruijn indices within the type are contained within `0..D` /// (exclusive). outer_exclusive_binder: ty::DebruijnIndex, } @@ -733,26 +723,26 @@ impl List { } } -#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] pub struct UpvarPath { pub hir_id: hir::HirId, } -/// Upvars do not get their own node-id. Instead, we use the pair of -/// the original var id (that is, the root variable that is referenced -/// by the upvar) and the id of the closure expression. -#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +/// Upvars do not get their own `NodeId`. Instead, we use the pair of +/// the original var ID (that is, the root variable that is referenced +/// by the upvar) and the ID of the closure expression. +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] pub struct UpvarId { pub var_path: UpvarPath, pub closure_expr_id: LocalDefId, } -#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable, Copy)] +#[derive(Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable, Copy, HashStable)] pub enum BorrowKind { /// Data must be immutable and is aliasable. ImmBorrow, - /// Data must be immutable but not aliasable. This kind of borrow + /// Data must be immutable but not aliasable. This kind of borrow /// cannot currently be expressed by the user and is used only in /// implicit closure bindings. It is needed when the closure /// is borrowing or mutating a mutable referent, e.g.: @@ -795,7 +785,7 @@ pub enum BorrowKind { /// Information describing the capture of an upvar. This is computed /// during `typeck`, specifically by `regionck`. -#[derive(PartialEq, Clone, Debug, Copy, RustcEncodable, RustcDecodable)] +#[derive(PartialEq, Clone, Debug, Copy, RustcEncodable, RustcDecodable, HashStable)] pub enum UpvarCapture<'tcx> { /// Upvar is captured by value. This is always true when the /// closure is labeled `move`, but can also be true in other cases @@ -806,7 +796,7 @@ pub enum UpvarCapture<'tcx> { ByRef(UpvarBorrow<'tcx>), } -#[derive(PartialEq, Clone, Copy, RustcEncodable, RustcDecodable)] +#[derive(PartialEq, Clone, Copy, RustcEncodable, RustcDecodable, HashStable)] pub struct UpvarBorrow<'tcx> { /// The kind of borrow: by-ref upvars have access to shared /// immutable borrows, which are not part of the normal language @@ -817,6 +807,7 @@ pub struct UpvarBorrow<'tcx> { pub region: ty::Region<'tcx>, } +pub type UpvarListMap = FxHashMap>; pub type UpvarCaptureMap<'tcx> = FxHashMap>; #[derive(Copy, Clone)] @@ -847,17 +838,18 @@ impl ty::EarlyBoundRegion { } } -#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, Debug, RustcEncodable, RustcDecodable, HashStable)] pub enum GenericParamDefKind { Lifetime, Type { has_default: bool, object_lifetime_default: ObjectLifetimeDefault, synthetic: Option, - } + }, + Const, } -#[derive(Clone, RustcEncodable, RustcDecodable)] +#[derive(Clone, RustcEncodable, RustcDecodable, HashStable)] pub struct GenericParamDef { pub name: InternedString, pub def_id: DefId, @@ -897,6 +889,7 @@ impl GenericParamDef { pub struct GenericParamCount { pub lifetimes: usize, pub types: usize, + pub consts: usize, } /// Information about the formal type/lifetime parameters associated @@ -904,13 +897,14 @@ pub struct GenericParamCount { /// /// The ordering of parameters is the same as in `Subst` (excluding child generics): /// `Self` (optionally), `Lifetime` params..., `Type` params... -#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, Debug, RustcEncodable, RustcDecodable, HashStable)] pub struct Generics { pub parent: Option, pub parent_count: usize, pub params: Vec, /// Reverse map to the `index` field of each `GenericParamDef` + #[stable_hasher(ignore)] pub param_def_id_to_index: FxHashMap, pub has_self: bool, @@ -932,6 +926,7 @@ impl<'a, 'gcx, 'tcx> Generics { match param.kind { GenericParamDefKind::Lifetime => own_counts.lifetimes += 1, GenericParamDefKind::Type { .. } => own_counts.types += 1, + GenericParamDefKind::Const => own_counts.consts += 1, }; } @@ -941,7 +936,7 @@ impl<'a, 'gcx, 'tcx> Generics { pub fn requires_monomorphization(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> bool { for param in &self.params { match param.kind { - GenericParamDefKind::Type { .. } => return true, + GenericParamDefKind::Type { .. } | GenericParamDefKind::Const => return true, GenericParamDefKind::Lifetime => {} } } @@ -961,7 +956,7 @@ impl<'a, 'gcx, 'tcx> Generics { if let Some(index) = param.index.checked_sub(self.parent_count as u32) { let param = &self.params[index as usize]; match param.kind { - ty::GenericParamDefKind::Lifetime => param, + GenericParamDefKind::Lifetime => param, _ => bug!("expected lifetime parameter, but found another generic parameter") } } else { @@ -978,7 +973,7 @@ impl<'a, 'gcx, 'tcx> Generics { if let Some(index) = param.idx.checked_sub(self.parent_count as u32) { let param = &self.params[index as usize]; match param.kind { - ty::GenericParamDefKind::Type {..} => param, + GenericParamDefKind::Type { .. } => param, _ => bug!("expected type parameter, but found another generic parameter") } } else { @@ -986,10 +981,27 @@ impl<'a, 'gcx, 'tcx> Generics { .type_param(param, tcx) } } + + /// Returns the `ConstParameterDef` associated with this `ParamConst`. + pub fn const_param(&'tcx self, + param: &ParamConst, + tcx: TyCtxt<'a, 'gcx, 'tcx>) + -> &GenericParamDef { + if let Some(index) = param.index.checked_sub(self.parent_count as u32) { + let param = &self.params[index as usize]; + match param.kind { + GenericParamDefKind::Const => param, + _ => bug!("expected const parameter, but found another generic parameter") + } + } else { + tcx.generics_of(self.parent.expect("parent_count>0 but no parent?")) + .const_param(param, tcx) + } + } } /// Bounds on generics. -#[derive(Clone, Default)] +#[derive(Clone, Default, Debug, HashStable)] pub struct GenericPredicates<'tcx> { pub parent: Option, pub predicates: Vec<(Predicate<'tcx>, Span)>, @@ -999,14 +1011,14 @@ impl<'tcx> serialize::UseSpecializedEncodable for GenericPredicates<'tcx> {} impl<'tcx> serialize::UseSpecializedDecodable for GenericPredicates<'tcx> {} impl<'a, 'gcx, 'tcx> GenericPredicates<'tcx> { - pub fn instantiate(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, substs: &Substs<'tcx>) + pub fn instantiate(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, substs: SubstsRef<'tcx>) -> InstantiatedPredicates<'tcx> { let mut instantiated = InstantiatedPredicates::empty(); self.instantiate_into(tcx, &mut instantiated, substs); instantiated } - pub fn instantiate_own(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, substs: &Substs<'tcx>) + pub fn instantiate_own(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, substs: SubstsRef<'tcx>) -> InstantiatedPredicates<'tcx> { InstantiatedPredicates { predicates: self.predicates.iter().map(|(p, _)| p.subst(tcx, substs)).collect(), @@ -1015,7 +1027,7 @@ impl<'a, 'gcx, 'tcx> GenericPredicates<'tcx> { fn instantiate_into(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, instantiated: &mut InstantiatedPredicates<'tcx>, - substs: &Substs<'tcx>) { + substs: SubstsRef<'tcx>) { if let Some(def_id) = self.parent { tcx.predicates_of(def_id).instantiate_into(tcx, instantiated, substs); } @@ -1052,7 +1064,7 @@ impl<'a, 'gcx, 'tcx> GenericPredicates<'tcx> { } } -#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] pub enum Predicate<'tcx> { /// Corresponds to `where Foo: Bar`. `Foo` here would be /// the `Self` type of the trait reference and `A`, `B`, and `C` @@ -1084,7 +1096,7 @@ pub enum Predicate<'tcx> { Subtype(PolySubtypePredicate<'tcx>), /// Constant initializer must evaluate successfully. - ConstEvaluatable(DefId, &'tcx Substs<'tcx>), + ConstEvaluatable(DefId, SubstsRef<'tcx>), } /// The crate outlives map is computed during typeck and contains the @@ -1093,6 +1105,7 @@ pub enum Predicate<'tcx> { /// HIR of every item in the local crate. Instead, use /// `tcx.inferred_outlives_of()` to get the outlives for a *particular* /// item. +#[derive(HashStable)] pub struct CratePredicatesMap<'tcx> { /// For each struct with outlive bounds, maps to a vector of the /// predicate of its outlive bounds. If an item has no outlives @@ -1100,6 +1113,7 @@ pub struct CratePredicatesMap<'tcx> { pub predicates: FxHashMap>>>, /// An empty vector, useful for cloning. + #[stable_hasher(ignore)] pub empty_predicate: Lrc>>, } @@ -1113,7 +1127,7 @@ impl<'a, 'gcx, 'tcx> Predicate<'tcx> { /// Performs a substitution suitable for going from a /// poly-trait-ref to supertraits that must hold if that /// poly-trait-ref holds. This is slightly different from a normal - /// substitution in terms of what happens with bound regions. See + /// substitution in terms of what happens with bound regions. See /// lengthy comment below for details. pub fn subst_supertrait(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, trait_ref: &ty::PolyTraitRef<'tcx>) @@ -1203,7 +1217,7 @@ impl<'a, 'gcx, 'tcx> Predicate<'tcx> { } } -#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] pub struct TraitPredicate<'tcx> { pub trait_ref: TraitRef<'tcx> } @@ -1231,7 +1245,8 @@ impl<'tcx> PolyTraitPredicate<'tcx> { } } -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, + Hash, Debug, RustcEncodable, RustcDecodable, HashStable)] pub struct OutlivesPredicate(pub A, pub B); // `A: B` pub type PolyOutlivesPredicate = ty::Binder>; pub type RegionOutlivesPredicate<'tcx> = OutlivesPredicate, @@ -1241,7 +1256,7 @@ pub type TypeOutlivesPredicate<'tcx> = OutlivesPredicate, pub type PolyRegionOutlivesPredicate<'tcx> = ty::Binder>; pub type PolyTypeOutlivesPredicate<'tcx> = ty::Binder>; -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable, HashStable)] pub struct SubtypePredicate<'tcx> { pub a_is_expected: bool, pub a: Ty<'tcx>, @@ -1252,7 +1267,7 @@ pub type PolySubtypePredicate<'tcx> = ty::Binder>; /// This kind of predicate has no *direct* correspondent in the /// syntax, but it roughly corresponds to the syntactic forms: /// -/// 1. `T: TraitRef<..., Item=Type>` +/// 1. `T: TraitRef<..., Item = Type>` /// 2. `>::Item == Type` (NYI) /// /// In particular, form #1 is "desugared" to the combination of a @@ -1261,7 +1276,7 @@ pub type PolySubtypePredicate<'tcx> = ty::Binder>; /// equality between arbitrary types. Processing an instance of /// Form #2 eventually yields one of these `ProjectionPredicate` /// instances to normalize the LHS. -#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] pub struct ProjectionPredicate<'tcx> { pub projection_ty: ProjectionTy<'tcx>, pub ty: Ty<'tcx>, @@ -1473,8 +1488,8 @@ impl<'tcx> Predicate<'tcx> { } /// Represents the bounds declared on a particular set of type -/// parameters. Should eventually be generalized into a flag list of -/// where clauses. You can obtain a `InstantiatedPredicates` list from a +/// parameters. Should eventually be generalized into a flag list of +/// where-clauses. You can obtain a `InstantiatedPredicates` list from a /// `GenericPredicates` by using the `instantiate` method. Note that this method /// reflects an important semantic invariant of `InstantiatedPredicates`: while /// the `GenericPredicates` are expressed in terms of the bound type @@ -1488,10 +1503,10 @@ impl<'tcx> Predicate<'tcx> { /// struct Foo> { ... } /// /// Here, the `GenericPredicates` for `Foo` would contain a list of bounds like -/// `[[], [U:Bar]]`. Now if there were some particular reference +/// `[[], [U:Bar]]`. Now if there were some particular reference /// like `Foo`, then the `InstantiatedPredicates` would be `[[], /// [usize:Bar]]`. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct InstantiatedPredicates<'tcx> { pub predicates: Vec>, } @@ -1506,42 +1521,42 @@ impl<'tcx> InstantiatedPredicates<'tcx> { } } -/// "Universes" are used during type- and trait-checking in the -/// presence of `for<..>` binders to control what sets of names are -/// visible. Universes are arranged into a tree: the root universe -/// contains names that are always visible. Each child then adds a new -/// set of names that are visible, in addition to those of its parent. -/// We say that the child universe "extends" the parent universe with -/// new names. -/// -/// To make this more concrete, consider this program: -/// -/// ``` -/// struct Foo { } -/// fn bar(x: T) { -/// let y: for<'a> fn(&'a u8, Foo) = ...; -/// } -/// ``` -/// -/// The struct name `Foo` is in the root universe U0. But the type -/// parameter `T`, introduced on `bar`, is in an extended universe U1 -/// -- i.e., within `bar`, we can name both `T` and `Foo`, but outside -/// of `bar`, we cannot name `T`. Then, within the type of `y`, the -/// region `'a` is in a universe U2 that extends U1, because we can -/// name it inside the fn type but not outside. -/// -/// Universes are used to do type- and trait-checking around these -/// "forall" binders (also called **universal quantification**). The -/// idea is that when, in the body of `bar`, we refer to `T` as a -/// type, we aren't referring to any type in particular, but rather a -/// kind of "fresh" type that is distinct from all other types we have -/// actually declared. This is called a **placeholder** type, and we -/// use universes to talk about this. In other words, a type name in -/// universe 0 always corresponds to some "ground" type that the user -/// declared, but a type name in a non-zero universe is a placeholder -/// type -- an idealized representative of "types in general" that we -/// use for checking generic functions. newtype_index! { + /// "Universes" are used during type- and trait-checking in the + /// presence of `for<..>` binders to control what sets of names are + /// visible. Universes are arranged into a tree: the root universe + /// contains names that are always visible. Each child then adds a new + /// set of names that are visible, in addition to those of its parent. + /// We say that the child universe "extends" the parent universe with + /// new names. + /// + /// To make this more concrete, consider this program: + /// + /// ``` + /// struct Foo { } + /// fn bar(x: T) { + /// let y: for<'a> fn(&'a u8, Foo) = ...; + /// } + /// ``` + /// + /// The struct name `Foo` is in the root universe U0. But the type + /// parameter `T`, introduced on `bar`, is in an extended universe U1 + /// -- i.e., within `bar`, we can name both `T` and `Foo`, but outside + /// of `bar`, we cannot name `T`. Then, within the type of `y`, the + /// region `'a` is in a universe U2 that extends U1, because we can + /// name it inside the fn type but not outside. + /// + /// Universes are used to do type- and trait-checking around these + /// "forall" binders (also called **universal quantification**). The + /// idea is that when, in the body of `bar`, we refer to `T` as a + /// type, we aren't referring to any type in particular, but rather a + /// kind of "fresh" type that is distinct from all other types we have + /// actually declared. This is called a **placeholder** type, and we + /// use universes to talk about this. In other words, a type name in + /// universe 0 always corresponds to some "ground" type that the user + /// declared, but a type name in a non-zero universe is a placeholder + /// type -- an idealized representative of "types in general" that we + /// use for checking generic functions. pub struct UniverseIndex { DEBUG_FORMAT = "U{}", } @@ -1554,7 +1569,7 @@ impl UniverseIndex { /// Returns the "next" universe index in order -- this new index /// is considered to extend all previous universes. This - /// corresponds to entering a `forall` quantifier. So, for + /// corresponds to entering a `forall` quantifier. So, for /// example, suppose we have this type in universe `U`: /// /// ``` @@ -1616,7 +1631,7 @@ pub type PlaceholderType = Placeholder; /// When type checking, we use the `ParamEnv` to track /// details about the set of where-clauses that are in scope at this /// particular point. -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable)] pub struct ParamEnv<'tcx> { /// Obligations that the caller must satisfy. This is basically /// the set of bounds on the in-scope type parameters, translated @@ -1627,41 +1642,48 @@ pub struct ParamEnv<'tcx> { /// want `Reveal::All` -- note that this is always paired with an /// empty environment. To get that, use `ParamEnv::reveal()`. pub reveal: traits::Reveal, + + /// If this `ParamEnv` comes from a call to `tcx.param_env(def_id)`, + /// register that `def_id` (useful for transitioning to the chalk trait + /// solver). + pub def_id: Option, } impl<'tcx> ParamEnv<'tcx> { /// Construct a trait environment suitable for contexts where - /// there are no where clauses in scope. Hidden types (like `impl + /// there are no where-clauses in scope. Hidden types (like `impl /// Trait`) are left hidden, so this is suitable for ordinary /// type-checking. #[inline] pub fn empty() -> Self { - Self::new(List::empty(), Reveal::UserFacing) + Self::new(List::empty(), Reveal::UserFacing, None) } - /// Construct a trait environment with no where clauses in scope + /// Construct a trait environment with no where-clauses in scope /// where the values of all `impl Trait` and other hidden types /// are revealed. This is suitable for monomorphized, post-typeck /// environments like codegen or doing optimizations. /// - /// N.B. If you want to have predicates in scope, use `ParamEnv::new`, + /// N.B., if you want to have predicates in scope, use `ParamEnv::new`, /// or invoke `param_env.with_reveal_all()`. #[inline] pub fn reveal_all() -> Self { - Self::new(List::empty(), Reveal::All) + Self::new(List::empty(), Reveal::All, None) } /// Construct a trait environment with the given set of predicates. #[inline] - pub fn new(caller_bounds: &'tcx List>, - reveal: Reveal) - -> Self { - ty::ParamEnv { caller_bounds, reveal } + pub fn new( + caller_bounds: &'tcx List>, + reveal: Reveal, + def_id: Option + ) -> Self { + ty::ParamEnv { caller_bounds, reveal, def_id } } /// Returns a new parameter environment with the same clauses, but /// which "reveals" the true results of projections in all cases - /// (even for associated types that are specializable). This is + /// (even for associated types that are specializable). This is /// the desired behavior during codegen and certain other special /// contexts; normally though we want to use `Reveal::UserFacing`, /// which is the default. @@ -1744,21 +1766,29 @@ impl<'a, 'gcx, T> HashStable> for ParamEnvAnd<'gcx, T> } } -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, HashStable)] pub struct Destructor { - /// The def-id of the destructor method + /// The `DefId` of the destructor method pub did: DefId, } bitflags! { + #[derive(HashStable)] pub struct AdtFlags: u32 { const NO_ADT_FLAGS = 0; + /// Indicates whether the ADT is an enum. const IS_ENUM = 1 << 0; + /// Indicates whether the ADT is a union. const IS_UNION = 1 << 1; + /// Indicates whether the ADT is a struct. const IS_STRUCT = 1 << 2; + /// Indicates whether the ADT is a struct and has a constructor. const HAS_CTOR = 1 << 3; + /// Indicates whether the type is a `PhantomData`. const IS_PHANTOM_DATA = 1 << 4; + /// Indicates whether the type has a `#[fundamental]` attribute. const IS_FUNDAMENTAL = 1 << 5; + /// Indicates whether the type is a `Box`. const IS_BOX = 1 << 6; /// Indicates whether the type is an `Arc`. const IS_ARC = 1 << 7; @@ -1771,6 +1801,7 @@ bitflags! { } bitflags! { + #[derive(HashStable)] pub struct VariantFlags: u32 { const NO_VARIANT_FLAGS = 0; /// Indicates whether the field list of this variant is `#[non_exhaustive]`. @@ -1778,60 +1809,88 @@ bitflags! { } } +/// Definition of a variant -- a struct's fields or a enum variant. #[derive(Debug)] pub struct VariantDef { - /// The variant's `DefId`. If this is a tuple-like struct, - /// this is the `DefId` of the struct's ctor. - pub did: DefId, - pub name: Name, // struct's name if this is a struct + /// `DefId` that identifies the variant itself. + /// If this variant belongs to a struct or union, then this is a copy of its `DefId`. + pub def_id: DefId, + /// `DefId` that identifies the variant's constructor. + /// If this variant is a struct variant, then this is `None`. + pub ctor_def_id: Option, + /// Variant or struct name. + pub ident: Ident, + /// Discriminant of this variant. pub discr: VariantDiscr, + /// Fields of this variant. pub fields: Vec, + /// Type of constructor of variant. pub ctor_kind: CtorKind, + /// Flags of the variant (e.g. is field list non-exhaustive)? flags: VariantFlags, + /// Recovered? + pub recovered: bool, } impl<'a, 'gcx, 'tcx> VariantDef { - /// Create a new `VariantDef`. + /// Creates a new `VariantDef`. + /// + /// `variant_did` is the `DefId` that identifies the enum variant (if this `VariantDef` + /// represents an enum variant). /// - /// - `did` is the DefId used for the variant - for tuple-structs, it is the constructor DefId, - /// and for everything else, it is the variant DefId. - /// - `attribute_def_id` is the DefId that has the variant's attributes. - /// this is the struct DefId for structs, and the variant DefId for variants. + /// `ctor_did` is the `DefId` that identifies the constructor of unit or + /// tuple-variants/structs. If this is a `struct`-variant then this should be `None`. /// - /// Note that we *could* use the constructor DefId, because the constructor attributes - /// redirect to the base attributes, but compiling a small crate requires - /// loading the AdtDefs for all the structs in the universe (e.g., coherence for any + /// `parent_did` is the `DefId` of the `AdtDef` representing the enum or struct that + /// owns this variant. It is used for checking if a struct has `#[non_exhaustive]` w/out having + /// to go through the redirect of checking the ctor's attributes - but compiling a small crate + /// requires loading the `AdtDef`s for all the structs in the universe (e.g., coherence for any /// built-in trait), and we do not want to load attributes twice. /// /// If someone speeds up attribute loading to not be a performance concern, they can - /// remove this hack and use the constructor DefId everywhere. - pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, - did: DefId, - name: Name, - discr: VariantDiscr, - fields: Vec, - adt_kind: AdtKind, - ctor_kind: CtorKind, - attribute_def_id: DefId) - -> Self - { - debug!("VariantDef::new({:?}, {:?}, {:?}, {:?}, {:?}, {:?}, {:?})", did, name, discr, - fields, adt_kind, ctor_kind, attribute_def_id); + /// remove this hack and use the constructor `DefId` everywhere. + pub fn new( + tcx: TyCtxt<'a, 'gcx, 'tcx>, + ident: Ident, + variant_did: Option, + ctor_def_id: Option, + discr: VariantDiscr, + fields: Vec, + ctor_kind: CtorKind, + adt_kind: AdtKind, + parent_did: DefId, + recovered: bool, + ) -> Self { + debug!( + "VariantDef::new(ident = {:?}, variant_did = {:?}, ctor_def_id = {:?}, discr = {:?}, + fields = {:?}, ctor_kind = {:?}, adt_kind = {:?}, parent_did = {:?})", + ident, variant_did, ctor_def_id, discr, fields, ctor_kind, adt_kind, parent_did, + ); + let mut flags = VariantFlags::NO_VARIANT_FLAGS; - if adt_kind == AdtKind::Struct && tcx.has_attr(attribute_def_id, "non_exhaustive") { - debug!("found non-exhaustive field list for {:?}", did); + if adt_kind == AdtKind::Struct && tcx.has_attr(parent_did, "non_exhaustive") { + debug!("found non-exhaustive field list for {:?}", parent_did); flags = flags | VariantFlags::IS_FIELD_LIST_NON_EXHAUSTIVE; + } else if let Some(variant_did) = variant_did { + if tcx.has_attr(variant_did, "non_exhaustive") { + debug!("found non-exhaustive field list for {:?}", variant_did); + flags = flags | VariantFlags::IS_FIELD_LIST_NON_EXHAUSTIVE; + } } + VariantDef { - did, - name, + def_id: variant_did.unwrap_or(parent_did), + ctor_def_id, + ident, discr, fields, ctor_kind, - flags + flags, + recovered, } } + /// Is this field list non-exhaustive? #[inline] pub fn is_field_list_non_exhaustive(&self) -> bool { self.flags.intersects(VariantFlags::IS_FIELD_LIST_NON_EXHAUSTIVE) @@ -1839,15 +1898,17 @@ impl<'a, 'gcx, 'tcx> VariantDef { } impl_stable_hash_for!(struct VariantDef { - did, - name, + def_id, + ctor_def_id, + ident -> (ident.name), discr, fields, ctor_kind, - flags + flags, + recovered }); -#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable, HashStable)] pub enum VariantDiscr { /// Explicit value for this variant, i.e., `X = 123`. /// The `DefId` corresponds to the embedded constant. @@ -1860,21 +1921,25 @@ pub enum VariantDiscr { Relative(u32), } -#[derive(Debug)] +#[derive(Debug, HashStable)] pub struct FieldDef { pub did: DefId, + #[stable_hasher(project(name))] pub ident: Ident, pub vis: Visibility, } /// The definition of an abstract data type -- a struct or enum. /// -/// These are all interned (by `intern_adt_def`) into the `adt_defs` -/// table. +/// These are all interned (by `intern_adt_def`) into the `adt_defs` table. pub struct AdtDef { + /// `DefId` of the struct, enum or union item. pub did: DefId, + /// Variants of the ADT. If this is a struct or enum, then there will be a single variant. pub variants: IndexVec, + /// Flags of the ADT (e.g. is this a struct? is this non-exhaustive?) flags: AdtFlags, + /// Repr options provided by the user. pub repr: ReprOptions, } @@ -1981,8 +2046,6 @@ impl_stable_hash_for!(struct ReprFlags { bits }); - - /// Represents the repr options provided by the user, #[derive(Copy, Clone, Debug, Eq, PartialEq, RustcEncodable, RustcDecodable, Default)] pub struct ReprOptions { @@ -2032,7 +2095,7 @@ impl ReprOptions { } // This is here instead of layout because the choice must make it into metadata. - if !tcx.consider_optimizing(|| format!("Reorder fields of {:?}", tcx.item_path_str(did))) { + if !tcx.consider_optimizing(|| format!("Reorder fields of {:?}", tcx.def_path_str(did))) { flags.insert(ReprFlags::IS_LINEAR); } ReprOptions { int: size, align: max_align, pack: min_pack, flags: flags } @@ -2061,12 +2124,13 @@ impl ReprOptions { } /// Returns `true` if this `#[repr()]` should inhibit struct field reordering - /// optimizations, such as with repr(C) or repr(packed(1)). + /// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr()`. pub fn inhibit_struct_field_reordering_opt(&self) -> bool { - !(self.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty() || (self.pack == 1) + self.flags.intersects(ReprFlags::IS_UNOPTIMISABLE) || self.pack == 1 || + self.int.is_some() } - /// Returns true if this `#[repr()]` should inhibit union abi optimisations + /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations. pub fn inhibit_union_abi_opt(&self) -> bool { self.c() } @@ -2074,11 +2138,14 @@ impl ReprOptions { } impl<'a, 'gcx, 'tcx> AdtDef { - fn new(tcx: TyCtxt<'_, '_, '_>, - did: DefId, - kind: AdtKind, - variants: IndexVec, - repr: ReprOptions) -> Self { + /// Creates a new `AdtDef`. + fn new( + tcx: TyCtxt<'_, '_, '_>, + did: DefId, + kind: AdtKind, + variants: IndexVec, + repr: ReprOptions + ) -> Self { debug!("AdtDef::new({:?}, {:?}, {:?}, {:?})", did, kind, variants, repr); let mut flags = AdtFlags::NO_ADT_FLAGS; @@ -2086,19 +2153,15 @@ impl<'a, 'gcx, 'tcx> AdtDef { debug!("found non-exhaustive variant list for {:?}", did); flags = flags | AdtFlags::IS_VARIANT_LIST_NON_EXHAUSTIVE; } + flags |= match kind { AdtKind::Enum => AdtFlags::IS_ENUM, AdtKind::Union => AdtFlags::IS_UNION, AdtKind::Struct => AdtFlags::IS_STRUCT, }; - if let AdtKind::Struct = kind { - let variant_def = &variants[VariantIdx::new(0)]; - let def_key = tcx.def_key(variant_def.did); - match def_key.disambiguated_data.data { - DefPathData::StructCtor => flags |= AdtFlags::HAS_CTOR, - _ => (), - } + if kind == AdtKind::Struct && variants[VariantIdx::new(0)].ctor_def_id.is_some() { + flags |= AdtFlags::HAS_CTOR; } let attrs = tcx.get_attrs(did); @@ -2126,21 +2189,25 @@ impl<'a, 'gcx, 'tcx> AdtDef { } } + /// Returns `true` if this is a struct. #[inline] pub fn is_struct(&self) -> bool { self.flags.contains(AdtFlags::IS_STRUCT) } + /// Returns `true` if this is a union. #[inline] pub fn is_union(&self) -> bool { self.flags.contains(AdtFlags::IS_UNION) } + /// Returns `true` if this is a enum. #[inline] pub fn is_enum(&self) -> bool { self.flags.contains(AdtFlags::IS_ENUM) } + /// Returns `true` if the variant list of this ADT is `#[non_exhaustive]`. #[inline] pub fn is_variant_list_non_exhaustive(&self) -> bool { self.flags.contains(AdtFlags::IS_VARIANT_LIST_NON_EXHAUSTIVE) @@ -2158,6 +2225,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { } } + /// Returns a description of this abstract data type. pub fn descr(&self) -> &'static str { match self.adt_kind() { AdtKind::Struct => "struct", @@ -2166,6 +2234,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { } } + /// Returns a description of a variant of this abstract data type. #[inline] pub fn variant_descr(&self) -> &'static str { match self.adt_kind() { @@ -2181,14 +2250,14 @@ impl<'a, 'gcx, 'tcx> AdtDef { self.flags.contains(AdtFlags::HAS_CTOR) } - /// Returns whether this type is `#[fundamental]` for the purposes + /// Returns `true` if this type is `#[fundamental]` for the purposes /// of coherence checking. #[inline] pub fn is_fundamental(&self) -> bool { self.flags.contains(AdtFlags::IS_FUNDAMENTAL) } - /// Returns `true` if this is PhantomData. + /// Returns `true` if this is `PhantomData`. #[inline] pub fn is_phantom_data(&self) -> bool { self.flags.contains(AdtFlags::IS_PHANTOM_DATA) @@ -2210,7 +2279,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { self.flags.contains(AdtFlags::IS_BOX) } - /// Returns whether this type has a destructor. + /// Returns `true` if this type has a destructor. pub fn has_dtor(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> bool { self.destructor(tcx).is_some() } @@ -2238,25 +2307,35 @@ impl<'a, 'gcx, 'tcx> AdtDef { self.variants.iter().all(|v| v.fields.is_empty()) } + /// Return a `VariantDef` given a variant id. pub fn variant_with_id(&self, vid: DefId) -> &VariantDef { - self.variants - .iter() - .find(|v| v.did == vid) + self.variants.iter().find(|v| v.def_id == vid) .expect("variant_with_id: unknown variant") } + /// Return a `VariantDef` given a constructor id. + pub fn variant_with_ctor_id(&self, cid: DefId) -> &VariantDef { + self.variants.iter().find(|v| v.ctor_def_id == Some(cid)) + .expect("variant_with_ctor_id: unknown variant") + } + + /// Return the index of `VariantDef` given a variant id. pub fn variant_index_with_id(&self, vid: DefId) -> VariantIdx { - self.variants - .iter_enumerated() - .find(|(_, v)| v.did == vid) - .expect("variant_index_with_id: unknown variant") - .0 + self.variants.iter_enumerated().find(|(_, v)| v.def_id == vid) + .expect("variant_index_with_id: unknown variant").0 + } + + /// Return the index of `VariantDef` given a constructor id. + pub fn variant_index_with_ctor_id(&self, cid: DefId) -> VariantIdx { + self.variants.iter_enumerated().find(|(_, v)| v.ctor_def_id == Some(cid)) + .expect("variant_index_with_ctor_id: unknown variant").0 } pub fn variant_of_def(&self, def: Def) -> &VariantDef { match def { - Def::Variant(vid) | Def::VariantCtor(vid, ..) => self.variant_with_id(vid), - Def::Struct(..) | Def::StructCtor(..) | Def::Union(..) | + Def::Variant(vid) => self.variant_with_id(vid), + Def::Ctor(cid, ..) => self.variant_with_ctor_id(cid), + Def::Struct(..) | Def::Union(..) | Def::TyAlias(..) | Def::AssociatedTy(..) | Def::SelfTy(..) | Def::SelfCtor(..) => self.non_enum_variant(), _ => bug!("unexpected def {:?} in variant_of_def", def) @@ -2271,7 +2350,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { ) -> Option> { let param_env = ParamEnv::empty(); let repr_type = self.repr.discr_type(); - let substs = Substs::identity_for_item(tcx.global_tcx(), expr_did); + let substs = InternalSubsts::identity_for_item(tcx.global_tcx(), expr_did); let instance = ty::Instance::new(expr_did, substs); let cid = GlobalId { instance, @@ -2288,7 +2367,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { }) } else { info!("invalid enum discriminant: {:#?}", val); - ::mir::interpret::struct_error( + crate::mir::interpret::struct_error( tcx.at(tcx.def_span(expr_did)), "constant evaluation of enum discriminant resulted in non-integer", ).emit(); @@ -2331,7 +2410,7 @@ impl<'a, 'gcx, 'tcx> AdtDef { }) } - /// Compute the discriminant value used by a specific variant. + /// Computes the discriminant value used by a specific variant. /// Unlike `discriminants`, this is (amortized) constant-time, /// only doing at most one query for evaluating an explicit /// discriminant (the last one before the requested variant), @@ -2347,9 +2426,9 @@ impl<'a, 'gcx, 'tcx> AdtDef { explicit_value.checked_add(tcx, offset as u128).0 } - /// Yields a DefId for the discriminant and an offset to add to it + /// Yields a `DefId` for the discriminant and an offset to add to it /// Alternatively, if there is no explicit discriminant, returns the - /// inferred discriminant directly + /// inferred discriminant directly. pub fn discriminant_def_for_variant( &self, variant_index: VariantIdx, @@ -2379,30 +2458,17 @@ impl<'a, 'gcx, 'tcx> AdtDef { } /// Returns a list of types such that `Self: Sized` if and only - /// if that type is Sized, or `TyErr` if this type is recursive. + /// if that type is `Sized`, or `TyErr` if this type is recursive. /// - /// Oddly enough, checking that the sized-constraint is Sized is + /// Oddly enough, checking that the sized-constraint is `Sized` is /// actually more expressive than checking all members: - /// the Sized trait is inductive, so an associated type that references - /// Self would prevent its containing ADT from being Sized. + /// the `Sized` trait is inductive, so an associated type that references + /// `Self` would prevent its containing ADT from being `Sized`. /// /// Due to normalization being eager, this applies even if - /// the associated type is behind a pointer, e.g., issue #31299. + /// the associated type is behind a pointer (e.g., issue #31299). pub fn sized_constraint(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> &'tcx [Ty<'tcx>] { - match tcx.try_adt_sized_constraint(DUMMY_SP, self.did) { - Ok(tys) => tys, - Err(mut bug) => { - debug!("adt_sized_constraint: {:?} is recursive", self); - // This should be reported as an error by `check_representable`. - // - // Consider the type as Sized in the meanwhile to avoid - // further errors. Delay our `bug` diagnostic here to get - // emitted later as well in case we accidentally otherwise don't - // emit an error. - bug.delay_as_bug(); - tcx.intern_type_list(&[tcx.types.err]) - } - } + tcx.adt_sized_constraint(self.did).0 } fn sized_constraint_for_ty(&self, @@ -2486,18 +2552,19 @@ impl<'a, 'gcx, 'tcx> AdtDef { } impl<'a, 'gcx, 'tcx> FieldDef { - pub fn ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, subst: &Substs<'tcx>) -> Ty<'tcx> { + pub fn ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, subst: SubstsRef<'tcx>) -> Ty<'tcx> { tcx.type_of(self.did).subst(tcx, subst) } } -/// Represents the various closure traits in the Rust language. This +/// Represents the various closure traits in the language. This /// will determine the type of the environment (`self`, in the /// desugaring) argument that the closure expects. /// /// You can get the environment type of a closure using /// `tcx.closure_env_ty()`. -#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash, Debug, + RustcEncodable, RustcDecodable, HashStable)] pub enum ClosureKind { // Warning: Ordering is significant here! The ordering is chosen // because the trait Fn is a subtrait of FnMut and so in turn, and @@ -2563,7 +2630,7 @@ impl<'tcx> TyS<'tcx> { TypeWalker::new(self) } - /// Iterator that walks the immediate children of `self`. Hence + /// Iterator that walks the immediate children of `self`. Hence /// `Foo, u32>` yields the sequence `[Bar, u32]` /// (but not `i32`, like `walk`). pub fn walk_shallow(&'tcx self) -> smallvec::IntoIter> { @@ -2571,7 +2638,7 @@ impl<'tcx> TyS<'tcx> { } /// Walks `ty` and any types appearing within `ty`, invoking the - /// callback `f` on each type. If the callback returns false, then the + /// callback `f` on each type. If the callback returns `false`, then the /// children of the current type are ignored. /// /// Note: prefer `ty.walk()` where possible. @@ -2637,12 +2704,51 @@ impl<'gcx> ::std::ops::Deref for Attributes<'gcx> { } } +#[derive(Debug, PartialEq, Eq)] +pub enum ImplOverlapKind { + /// These impls are always allowed to overlap. + Permitted, + /// These impls are allowed to overlap, but that raises + /// an issue #33140 future-compatibility warning. + /// + /// Some background: in Rust 1.0, the trait-object types `Send + Sync` (today's + /// `dyn Send + Sync`) and `Sync + Send` (now `dyn Sync + Send`) were different. + /// + /// The widely-used version 0.1.0 of the crate `traitobject` had accidentally relied + /// that difference, making what reduces to the following set of impls: + /// + /// ``` + /// trait Trait {} + /// impl Trait for dyn Send + Sync {} + /// impl Trait for dyn Sync + Send {} + /// ``` + /// + /// Obviously, once we made these types be identical, that code causes a coherence + /// error and a fairly big headache for us. However, luckily for us, the trait + /// `Trait` used in this case is basically a marker trait, and therefore having + /// overlapping impls for it is sound. + /// + /// To handle this, we basically regard the trait as a marker trait, with an additional + /// future-compatibility warning. To avoid accidentally "stabilizing" this feature, + /// it has the following restrictions: + /// + /// 1. The trait must indeed be a marker-like trait (i.e., no items), and must be + /// positive impls. + /// 2. The trait-ref of both impls must be equal. + /// 3. The trait-ref of both impls must be a trait object type consisting only of + /// marker traits. + /// 4. Neither of the impls can have any where-clauses. + /// + /// Once `traitobject` 0.1.0 is no longer an active concern, this hack can be removed. + Issue33140 +} + impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub fn body_tables(self, body: hir::BodyId) -> &'gcx TypeckTables<'gcx> { self.typeck_tables_of(self.hir().body_owner_def_id(body)) } - /// Returns an iterator of the def-ids for all body-owners in this + /// Returns an iterator of the `DefId`s for all body-owners in this /// crate. If you would prefer to iterate over the bodies /// themselves, you can do `self.hir().krate().body_ids.iter()`. pub fn body_owners( @@ -2687,8 +2793,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } pub fn opt_associated_item(self, def_id: DefId) -> Option { - let is_associated_item = if let Some(node_id) = self.hir().as_local_node_id(def_id) { - match self.hir().get(node_id) { + let is_associated_item = if let Some(hir_id) = self.hir().as_local_hir_id(def_id) { + match self.hir().get_by_hir_id(hir_id) { Node::TraitItem(_) | Node::ImplItem(_) => true, _ => false, } @@ -2711,7 +2817,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { parent_vis: &hir::Visibility, trait_item_ref: &hir::TraitItemRef) -> AssociatedItem { - let def_id = self.hir().local_def_id(trait_item_ref.id.node_id); + let def_id = self.hir().local_def_id_from_hir_id(trait_item_ref.id.hir_id); let (kind, has_self) = match trait_item_ref.kind { hir::AssociatedItemKind::Const => (ty::AssociatedKind::Const, false), hir::AssociatedItemKind::Method { has_self } => { @@ -2725,7 +2831,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ident: trait_item_ref.ident, kind, // Visibility of trait items is inherited from their traits. - vis: Visibility::from_hir(parent_vis, trait_item_ref.id.node_id, self), + vis: Visibility::from_hir(parent_vis, trait_item_ref.id.hir_id, self), defaultness: trait_item_ref.defaultness, def_id, container: TraitContainer(parent_def_id), @@ -2737,7 +2843,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { parent_def_id: DefId, impl_item_ref: &hir::ImplItemRef) -> AssociatedItem { - let def_id = self.hir().local_def_id(impl_item_ref.id.node_id); + let def_id = self.hir().local_def_id_from_hir_id(impl_item_ref.id.hir_id); let (kind, has_self) = match impl_item_ref.kind { hir::AssociatedItemKind::Const => (ty::AssociatedKind::Const, false), hir::AssociatedItemKind::Method { has_self } => { @@ -2751,7 +2857,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ident: impl_item_ref.ident, kind, // Visibility of trait impl items doesn't matter. - vis: ty::Visibility::from_hir(&impl_item_ref.vis, impl_item_ref.id.node_id, self), + vis: ty::Visibility::from_hir(&impl_item_ref.vis, impl_item_ref.id.hir_id, self), defaultness: impl_item_ref.defaultness, def_id, container: ImplContainer(parent_def_id), @@ -2759,14 +2865,13 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - pub fn field_index(self, node_id: NodeId, tables: &TypeckTables<'_>) -> usize { - let hir_id = self.hir().node_to_hir_id(node_id); + pub fn field_index(self, hir_id: hir::HirId, tables: &TypeckTables<'_>) -> usize { tables.field_indices().get(hir_id).cloned().expect("no index for a field") } pub fn find_field_index(self, ident: Ident, variant: &VariantDef) -> Option { variant.fields.iter().position(|field| { - self.adjust_ident(ident, variant.did, DUMMY_NODE_ID).0 == field.ident.modern() + self.adjust_ident(ident, variant.def_id, hir::DUMMY_HIR_ID).0 == field.ident.modern() }) } @@ -2788,8 +2893,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// Returns `true` if the impls are the same polarity and the trait either /// has no items or is annotated #[marker] and prevents item overrides. - pub fn impls_are_allowed_to_overlap(self, def_id1: DefId, def_id2: DefId) -> bool { - if self.features().overlapping_marker_traits { + pub fn impls_are_allowed_to_overlap(self, def_id1: DefId, def_id2: DefId) + -> Option + { + let is_legit = if self.features().overlapping_marker_traits { let trait1_is_empty = self.impl_trait_ref(def_id1) .map_or(false, |trait_ref| { self.associated_item_def_ids(trait_ref.def_id).is_empty() @@ -2801,7 +2908,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.impl_polarity(def_id1) == self.impl_polarity(def_id2) && trait1_is_empty && trait2_is_empty - } else if self.features().marker_trait_attr { + } else { let is_marker_impl = |def_id: DefId| -> bool { let trait_ref = self.impl_trait_ref(def_id); trait_ref.map_or(false, |tr| self.trait_def(tr.def_id).is_marker) @@ -2809,64 +2916,76 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.impl_polarity(def_id1) == self.impl_polarity(def_id2) && is_marker_impl(def_id1) && is_marker_impl(def_id2) + }; + + if is_legit { + debug!("impls_are_allowed_to_overlap({:?}, {:?}) = Some(Permitted)", + def_id1, def_id2); + Some(ImplOverlapKind::Permitted) } else { - false + if let Some(self_ty1) = self.issue33140_self_ty(def_id1) { + if let Some(self_ty2) = self.issue33140_self_ty(def_id2) { + if self_ty1 == self_ty2 { + debug!("impls_are_allowed_to_overlap({:?}, {:?}) - issue #33140 HACK", + def_id1, def_id2); + return Some(ImplOverlapKind::Issue33140); + } else { + debug!("impls_are_allowed_to_overlap({:?}, {:?}) - found {:?} != {:?}", + def_id1, def_id2, self_ty1, self_ty2); + } + } + } + + debug!("impls_are_allowed_to_overlap({:?}, {:?}) = None", + def_id1, def_id2); + None } } - // Returns `ty::VariantDef` if `def` refers to a struct, - // or variant or their constructors, panics otherwise. + /// Returns `ty::VariantDef` if `def` refers to a struct, + /// or variant or their constructors, panics otherwise. pub fn expect_variant_def(self, def: Def) -> &'tcx VariantDef { match def { - Def::Variant(did) | Def::VariantCtor(did, ..) => { - let enum_did = self.parent_def_id(did).unwrap(); + Def::Variant(did) => { + let enum_did = self.parent(did).unwrap(); self.adt_def(enum_did).variant_with_id(did) } Def::Struct(did) | Def::Union(did) => { self.adt_def(did).non_enum_variant() } - Def::StructCtor(ctor_did, ..) => { - let did = self.parent_def_id(ctor_did).expect("struct ctor has no parent"); - self.adt_def(did).non_enum_variant() + Def::Ctor(variant_ctor_did, CtorOf::Variant, ..) => { + let variant_did = self.parent(variant_ctor_did).unwrap(); + let enum_did = self.parent(variant_did).unwrap(); + self.adt_def(enum_did).variant_with_ctor_id(variant_ctor_did) + } + Def::Ctor(ctor_did, CtorOf::Struct, ..) => { + let struct_did = self.parent(ctor_did).expect("struct ctor has no parent"); + self.adt_def(struct_did).non_enum_variant() } _ => bug!("expect_variant_def used with unexpected def {:?}", def) } } - /// Given a `VariantDef`, returns the def-id of the `AdtDef` of which it is a part. - pub fn adt_def_id_of_variant(self, variant_def: &'tcx VariantDef) -> DefId { - let def_key = self.def_key(variant_def.did); - match def_key.disambiguated_data.data { - // for enum variants and tuple structs, the def-id of the ADT itself - // is the *parent* of the variant - DefPathData::EnumVariant(..) | DefPathData::StructCtor => - DefId { krate: variant_def.did.krate, index: def_key.parent.unwrap() }, - - // otherwise, for structs and unions, they share a def-id - _ => variant_def.did, - } - } - pub fn item_name(self, id: DefId) -> InternedString { if id.index == CRATE_DEF_INDEX { self.original_crate_name(id.krate).as_interned_str() } else { let def_key = self.def_key(id); - // The name of a StructCtor is that of its struct parent. - if let hir_map::DefPathData::StructCtor = def_key.disambiguated_data.data { - self.item_name(DefId { - krate: id.krate, - index: def_key.parent.unwrap() - }) - } else { - def_key.disambiguated_data.data.get_opt_name().unwrap_or_else(|| { + match def_key.disambiguated_data.data { + // The name of a constructor is that of its parent. + hir_map::DefPathData::Ctor => + self.item_name(DefId { + krate: id.krate, + index: def_key.parent.unwrap() + }), + _ => def_key.disambiguated_data.data.get_opt_name().unwrap_or_else(|| { bug!("item_name: no name for {:?}", self.def_path(id)); - }) + }), } } } - /// Return the possibly-auto-generated MIR of a (DefId, Subst) pair. + /// Returns the possibly-auto-generated MIR of a `(DefId, Subst)` pair. pub fn instance_mir(self, instance: ty::InstanceDef<'gcx>) -> &'gcx Mir<'gcx> { @@ -2886,26 +3005,16 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - /// Given the DefId of an item, returns its MIR, borrowed immutably. - /// Returns None if there is no MIR for the DefId - pub fn maybe_optimized_mir(self, did: DefId) -> Option<&'gcx Mir<'gcx>> { - if self.is_mir_available(did) { - Some(self.optimized_mir(did)) - } else { - None - } - } - - /// Get the attributes of a definition. + /// Gets the attributes of a definition. pub fn get_attrs(self, did: DefId) -> Attributes<'gcx> { - if let Some(id) = self.hir().as_local_node_id(did) { - Attributes::Borrowed(self.hir().attrs(id)) + if let Some(id) = self.hir().as_local_hir_id(did) { + Attributes::Borrowed(self.hir().attrs_by_hir_id(id)) } else { Attributes::Owned(self.item_attrs(did)) } } - /// Determine whether an item is annotated with an attribute. + /// Determines whether an item is annotated with an attribute. pub fn has_attr(self, did: DefId, attr: &str) -> bool { attr::contains_name(&self.get_attrs(did), attr) } @@ -2919,14 +3028,14 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { self.optimized_mir(def_id).generator_layout.as_ref().unwrap() } - /// Given the def-id of an impl, return the def_id of the trait it implements. - /// If it implements no trait, return `None`. + /// Given the `DefId` of an impl, returns the `DefId` of the trait it implements. + /// If it implements no trait, returns `None`. pub fn trait_id_of_impl(self, def_id: DefId) -> Option { self.impl_trait_ref(def_id).map(|tr| tr.def_id) } - /// If the given defid describes a method belonging to an impl, return the - /// def-id of the impl that the method belongs to. Otherwise, return `None`. + /// If the given defid describes a method belonging to an impl, returns the + /// `DefId` of the impl that the method belongs to; otherwise, returns `None`. pub fn impl_of_method(self, def_id: DefId) -> Option { let item = if def_id.krate != LOCAL_CRATE { if let Some(Def::Method(_)) = self.describe_def(def_id) { @@ -2950,21 +3059,21 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// with the name of the crate containing the impl. pub fn span_of_impl(self, impl_did: DefId) -> Result { if impl_did.is_local() { - let node_id = self.hir().as_local_node_id(impl_did).unwrap(); - Ok(self.hir().span(node_id)) + let hir_id = self.hir().as_local_hir_id(impl_did).unwrap(); + Ok(self.hir().span_by_hir_id(hir_id)) } else { Err(self.crate_name(impl_did.krate)) } } - // Hygienically compare a use-site name (`use_name`) for a field or an associated item with its - // supposed definition name (`def_name`). The method also needs `DefId` of the supposed - // definition's parent/scope to perform comparison. + /// Hygienically compares a use-site name (`use_name`) for a field or an associated item with + /// its supposed definition name (`def_name`). The method also needs `DefId` of the supposed + /// definition's parent/scope to perform comparison. pub fn hygienic_eq(self, use_name: Ident, def_name: Ident, def_parent_def_id: DefId) -> bool { - self.adjust_ident(use_name, def_parent_def_id, DUMMY_NODE_ID).0 == def_name.modern() + self.adjust_ident(use_name, def_parent_def_id, hir::DUMMY_HIR_ID).0 == def_name.modern() } - pub fn adjust_ident(self, mut ident: Ident, scope: DefId, block: NodeId) -> (Ident, DefId) { + pub fn adjust_ident(self, mut ident: Ident, scope: DefId, block: hir::HirId) -> (Ident, DefId) { ident = ident.modern(); let target_expansion = match scope.krate { LOCAL_CRATE => self.hir().definitions().expansion_that_defined(scope.index), @@ -2973,8 +3082,8 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { let scope = match ident.span.adjust(target_expansion) { Some(actual_expansion) => self.hir().definitions().parent_module_of_macro_def(actual_expansion), - None if block == DUMMY_NODE_ID => DefId::local(CRATE_DEF_INDEX), // Dummy DefId - None => self.hir().get_module_parent(block), + None if block == hir::DUMMY_HIR_ID => DefId::local(CRATE_DEF_INDEX), // Dummy DefId + None => self.hir().get_module_parent_by_hir_id(block), }; (ident, scope) } @@ -2997,10 +3106,10 @@ impl Iterator for AssociatedItemsIterator<'_, '_, '_> { } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { - pub fn with_freevars(self, fid: NodeId, f: F) -> T where + pub fn with_freevars(self, fid: HirId, f: F) -> T where F: FnOnce(&[hir::Freevar]) -> T, { - let def_id = self.hir().local_def_id(fid); + let def_id = self.hir().local_def_id_from_hir_id(fid); match self.freevars(def_id) { None => f(&[]), Some(d) => f(&d), @@ -3009,13 +3118,13 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } fn associated_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> AssociatedItem { - let id = tcx.hir().as_local_node_id(def_id).unwrap(); - let parent_id = tcx.hir().get_parent(id); - let parent_def_id = tcx.hir().local_def_id(parent_id); - let parent_item = tcx.hir().expect_item(parent_id); + let id = tcx.hir().as_local_hir_id(def_id).unwrap(); + let parent_id = tcx.hir().get_parent_item(id); + let parent_def_id = tcx.hir().local_def_id_from_hir_id(parent_id); + let parent_item = tcx.hir().expect_item_by_hir_id(parent_id); match parent_item.node { hir::ItemKind::Impl(.., ref impl_item_refs) => { - if let Some(impl_item_ref) = impl_item_refs.iter().find(|i| i.id.node_id == id) { + if let Some(impl_item_ref) = impl_item_refs.iter().find(|i| i.id.hir_id == id) { let assoc_item = tcx.associated_item_from_impl_item_ref(parent_def_id, impl_item_ref); debug_assert_eq!(assoc_item.def_id, def_id); @@ -3024,7 +3133,7 @@ fn associated_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Asso } hir::ItemKind::Trait(.., ref trait_item_refs) => { - if let Some(trait_item_ref) = trait_item_refs.iter().find(|i| i.id.node_id == id) { + if let Some(trait_item_ref) = trait_item_refs.iter().find(|i| i.id.hir_id == id) { let assoc_item = tcx.associated_item_from_trait_item_ref(parent_def_id, &parent_item.vis, trait_item_ref); @@ -3041,7 +3150,10 @@ fn associated_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Asso parent_item.node) } -/// Calculates the Sized-constraint. +#[derive(Clone, HashStable)] +pub struct AdtSizedConstraint<'tcx>(pub &'tcx [Ty<'tcx>]); + +/// Calculates the `Sized` constraint. /// /// In fact, there are only a few options for the types in the constraint: /// - an obviously-unsized type @@ -3052,7 +3164,7 @@ fn associated_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Asso /// check should catch this case. fn adt_sized_constraint<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) - -> &'tcx [Ty<'tcx>] { + -> AdtSizedConstraint<'tcx> { let def = tcx.adt_def(def_id); let result = tcx.mk_type_list(def.variants.iter().flat_map(|v| { @@ -3063,25 +3175,25 @@ fn adt_sized_constraint<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, debug!("adt_sized_constraint: {:?} => {:?}", def, result); - result + AdtSizedConstraint(result) } fn associated_item_def_ids<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Lrc> { - let id = tcx.hir().as_local_node_id(def_id).unwrap(); - let item = tcx.hir().expect_item(id); + let id = tcx.hir().as_local_hir_id(def_id).unwrap(); + let item = tcx.hir().expect_item_by_hir_id(id); let vec: Vec<_> = match item.node { hir::ItemKind::Trait(.., ref trait_item_refs) => { trait_item_refs.iter() .map(|trait_item_ref| trait_item_ref.id) - .map(|id| tcx.hir().local_def_id(id.node_id)) + .map(|id| tcx.hir().local_def_id_from_hir_id(id.hir_id)) .collect() } hir::ItemKind::Impl(.., ref impl_item_refs) => { impl_item_refs.iter() .map(|impl_item_ref| impl_item_ref.id) - .map(|id| tcx.hir().local_def_id(id.node_id)) + .map(|id| tcx.hir().local_def_id_from_hir_id(id.hir_id)) .collect() } hir::ItemKind::TraitAlias(..) => vec![], @@ -3094,9 +3206,9 @@ fn def_span<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Span { tcx.hir().span_if_local(def_id).unwrap() } -/// If the given def ID describes an item belonging to a trait, -/// return the ID of the trait that the trait item belongs to. -/// Otherwise, return `None`. +/// If the given `DefId` describes an item belonging to a trait, +/// returns the `DefId` of the trait that the trait item belongs to; +/// otherwise, returns `None`. fn trait_of_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Option { tcx.opt_associated_item(def_id) .and_then(|associated_item| { @@ -3109,8 +3221,8 @@ fn trait_of_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Option /// Yields the parent function's `DefId` if `def_id` is an `impl Trait` definition. pub fn is_impl_trait_defn(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Option { - if let Some(node_id) = tcx.hir().as_local_node_id(def_id) { - if let Node::Item(item) = tcx.hir().get(node_id) { + if let Some(hir_id) = tcx.hir().as_local_hir_id(def_id) { + if let Node::Item(item) = tcx.hir().get_by_hir_id(hir_id) { if let hir::ItemKind::Existential(ref exist_ty) = item.node { return exist_ty.impl_trait_fn; } @@ -3119,18 +3231,6 @@ pub fn is_impl_trait_defn(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Option, def_id: DefId) -> bool { - if let Some(node_id) = tcx.hir().as_local_node_id(def_id) { - if let Node::Item(item) = tcx.hir().get(node_id) { - if let hir::ItemKind::TraitAlias(..) = item.node { - return true; - } - } - } - false -} - /// See `ParamEnv` struct definition for details. fn param_env<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) @@ -3157,11 +3257,14 @@ fn param_env<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // are any errors at that point, so after type checking you can be // sure that this will succeed without errors anyway. - let unnormalized_env = ty::ParamEnv::new(tcx.intern_predicates(&predicates), - traits::Reveal::UserFacing); + let unnormalized_env = ty::ParamEnv::new( + tcx.intern_predicates(&predicates), + traits::Reveal::UserFacing, + if tcx.sess.opts.debugging_opts.chalk { Some(def_id) } else { None } + ); - let body_id = tcx.hir().as_local_node_id(def_id).map_or(DUMMY_NODE_ID, |id| { - tcx.hir().maybe_body_owned_by(id).map_or(id, |body| body.node_id) + let body_id = tcx.hir().as_local_hir_id(def_id).map_or(hir::DUMMY_HIR_ID, |id| { + tcx.hir().maybe_body_owned_by_by_hir_id(id).map_or(id, |body| body.hir_id) }); let cause = traits::ObligationCause::misc(tcx.def_span(def_id), body_id); traits::normalize_param_env_or_error(tcx, def_id, unnormalized_env, cause) @@ -3200,6 +3303,58 @@ fn instance_def_size_estimate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } +/// If `def_id` is an issue 33140 hack impl, returns its self type; otherwise, returns `None`. +/// +/// See [`ImplOverlapKind::Issue33140`] for more details. +fn issue33140_self_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId) + -> Option> +{ + debug!("issue33140_self_ty({:?})", def_id); + + let trait_ref = tcx.impl_trait_ref(def_id).unwrap_or_else(|| { + bug!("issue33140_self_ty called on inherent impl {:?}", def_id) + }); + + debug!("issue33140_self_ty({:?}), trait-ref={:?}", def_id, trait_ref); + + let is_marker_like = + tcx.impl_polarity(def_id) == hir::ImplPolarity::Positive && + tcx.associated_item_def_ids(trait_ref.def_id).is_empty(); + + // Check whether these impls would be ok for a marker trait. + if !is_marker_like { + debug!("issue33140_self_ty - not marker-like!"); + return None; + } + + // impl must be `impl Trait for dyn Marker1 + Marker2 + ...` + if trait_ref.substs.len() != 1 { + debug!("issue33140_self_ty - impl has substs!"); + return None; + } + + let predicates = tcx.predicates_of(def_id); + if predicates.parent.is_some() || !predicates.predicates.is_empty() { + debug!("issue33140_self_ty - impl has predicates {:?}!", predicates); + return None; + } + + let self_ty = trait_ref.self_ty(); + let self_ty_matches = match self_ty.sty { + ty::Dynamic(ref data, ty::ReStatic) => data.principal().is_none(), + _ => false + }; + + if self_ty_matches { + debug!("issue33140_self_ty - MATCHES!"); + Some(self_ty) + } else { + debug!("issue33140_self_ty - non-matching self type"); + None + } +} + pub fn provide(providers: &mut ty::query::Providers<'_>) { context::provide(providers); erase_regions::provide(providers); @@ -3218,6 +3373,7 @@ pub fn provide(providers: &mut ty::query::Providers<'_>) { crate_hash, trait_impls_of: trait_def::trait_impls_of_provider, instance_def_size_estimate, + issue33140_self_ty, ..*providers }; } @@ -3227,7 +3383,7 @@ pub fn provide(providers: &mut ty::query::Providers<'_>) { /// rather, you should request the vector for a specific type via /// `tcx.inherent_impls(def_id)` so as to minimize your dependencies /// (constructing this map requires touching the entire crate). -#[derive(Clone, Debug, Default)] +#[derive(Clone, Debug, Default, HashStable)] pub struct CrateInherentImpls { pub inherent_impls: DefIdMap>>, } diff --git a/src/librustc/ty/outlives.rs b/src/librustc/ty/outlives.rs index 0e3fc62e4ca9a..5b21ed5abd77b 100644 --- a/src/librustc/ty/outlives.rs +++ b/src/librustc/ty/outlives.rs @@ -1,19 +1,9 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // The outlines relation `T: 'a` or `'a: 'b`. This code frequently // refers to rules defined in RFC 1214 (`OutlivesFooBar`), so see that // RFC for reference. use smallvec::SmallVec; -use ty::{self, Ty, TyCtxt, TypeFoldable}; +use crate::ty::{self, Ty, TyCtxt, TypeFoldable}; #[derive(Debug)] pub enum Component<'tcx> { diff --git a/src/librustc/ty/print/mod.rs b/src/librustc/ty/print/mod.rs new file mode 100644 index 0000000000000..ef30a4032d8fa --- /dev/null +++ b/src/librustc/ty/print/mod.rs @@ -0,0 +1,327 @@ +use crate::hir::map::{DefPathData, DisambiguatedDefPathData}; +use crate::hir::def_id::{CrateNum, DefId}; +use crate::ty::{self, DefIdTree, Ty, TyCtxt}; +use crate::ty::subst::{Kind, Subst}; + +use rustc_data_structures::fx::FxHashSet; + +// `pretty` is a separate module only for organization. +mod pretty; +pub use self::pretty::*; + +pub trait Print<'gcx, 'tcx, P> { + type Output; + type Error; + + fn print(&self, cx: P) -> Result; +} + +/// Interface for outputting user-facing "type-system entities" +/// (paths, types, lifetimes, constants, etc.) as a side-effect +/// (e.g. formatting, like `PrettyPrinter` implementors do) or by +/// constructing some alternative representation (e.g. an AST), +/// which the associated types allow passing through the methods. +/// +/// For pretty-printing/formatting in particular, see `PrettyPrinter`. +// FIXME(eddyb) find a better name, this is more general than "printing". +pub trait Printer<'gcx: 'tcx, 'tcx>: Sized { + type Error; + + type Path; + type Region; + type Type; + type DynExistential; + + fn tcx(&'a self) -> TyCtxt<'a, 'gcx, 'tcx>; + + fn print_def_path( + self, + def_id: DefId, + substs: &'tcx [Kind<'tcx>], + ) -> Result { + self.default_print_def_path(def_id, substs) + } + fn print_impl_path( + self, + impl_def_id: DefId, + substs: &'tcx [Kind<'tcx>], + self_ty: Ty<'tcx>, + trait_ref: Option>, + ) -> Result { + self.default_print_impl_path(impl_def_id, substs, self_ty, trait_ref) + } + + fn print_region( + self, + region: ty::Region<'_>, + ) -> Result; + + fn print_type( + self, + ty: Ty<'tcx>, + ) -> Result; + + fn print_dyn_existential( + self, + predicates: &'tcx ty::List>, + ) -> Result; + + fn path_crate( + self, + cnum: CrateNum, + ) -> Result; + fn path_qualified( + self, + self_ty: Ty<'tcx>, + trait_ref: Option>, + ) -> Result; + + fn path_append_impl( + self, + print_prefix: impl FnOnce(Self) -> Result, + disambiguated_data: &DisambiguatedDefPathData, + self_ty: Ty<'tcx>, + trait_ref: Option>, + ) -> Result; + fn path_append( + self, + print_prefix: impl FnOnce(Self) -> Result, + disambiguated_data: &DisambiguatedDefPathData, + ) -> Result; + fn path_generic_args( + self, + print_prefix: impl FnOnce(Self) -> Result, + args: &[Kind<'tcx>], + ) -> Result; + + // Defaults (should not be overriden): + + fn default_print_def_path( + self, + def_id: DefId, + substs: &'tcx [Kind<'tcx>], + ) -> Result { + debug!("default_print_def_path: def_id={:?}, substs={:?}", def_id, substs); + let key = self.tcx().def_key(def_id); + debug!("default_print_def_path: key={:?}", key); + + match key.disambiguated_data.data { + DefPathData::CrateRoot => { + assert!(key.parent.is_none()); + self.path_crate(def_id.krate) + } + + DefPathData::Impl => { + let generics = self.tcx().generics_of(def_id); + let mut self_ty = self.tcx().type_of(def_id); + let mut impl_trait_ref = self.tcx().impl_trait_ref(def_id); + if substs.len() >= generics.count() { + self_ty = self_ty.subst(self.tcx(), substs); + impl_trait_ref = impl_trait_ref.subst(self.tcx(), substs); + } + self.print_impl_path(def_id, substs, self_ty, impl_trait_ref) + } + + _ => { + let parent_def_id = DefId { index: key.parent.unwrap(), ..def_id }; + + let mut parent_substs = substs; + let mut trait_qualify_parent = false; + if !substs.is_empty() { + let generics = self.tcx().generics_of(def_id); + parent_substs = &substs[..generics.parent_count.min(substs.len())]; + + match key.disambiguated_data.data { + // Closures' own generics are only captures, don't print them. + DefPathData::ClosureExpr => {} + + // If we have any generic arguments to print, we do that + // on top of the same path, but without its own generics. + _ => if !generics.params.is_empty() && substs.len() >= generics.count() { + let args = self.generic_args_to_print(generics, substs); + return self.path_generic_args( + |cx| cx.print_def_path(def_id, parent_substs), + args, + ); + } + } + + // FIXME(eddyb) try to move this into the parent's printing + // logic, instead of doing it when printing the child. + trait_qualify_parent = + generics.has_self && + generics.parent == Some(parent_def_id) && + parent_substs.len() == generics.parent_count && + self.tcx().generics_of(parent_def_id).parent_count == 0; + } + + self.path_append( + |cx: Self| if trait_qualify_parent { + let trait_ref = ty::TraitRef::new( + parent_def_id, + cx.tcx().intern_substs(parent_substs), + ); + cx.path_qualified(trait_ref.self_ty(), Some(trait_ref)) + } else { + cx.print_def_path(parent_def_id, parent_substs) + }, + &key.disambiguated_data, + ) + } + } + } + + fn generic_args_to_print( + &self, + generics: &'tcx ty::Generics, + substs: &'tcx [Kind<'tcx>], + ) -> &'tcx [Kind<'tcx>] { + let mut own_params = generics.parent_count..generics.count(); + + // Don't print args for `Self` parameters (of traits). + if generics.has_self && own_params.start == 0 { + own_params.start = 1; + } + + // Don't print args that are the defaults of their respective parameters. + own_params.end -= generics.params.iter().rev().take_while(|param| { + match param.kind { + ty::GenericParamDefKind::Lifetime => false, + ty::GenericParamDefKind::Type { has_default, .. } => { + has_default && substs[param.index as usize] == Kind::from( + self.tcx().type_of(param.def_id).subst(self.tcx(), substs) + ) + } + ty::GenericParamDefKind::Const => false, // FIXME(const_generics:defaults) + } + }).count(); + + &substs[own_params] + } + + fn default_print_impl_path( + self, + impl_def_id: DefId, + _substs: &'tcx [Kind<'tcx>], + self_ty: Ty<'tcx>, + impl_trait_ref: Option>, + ) -> Result { + debug!("default_print_impl_path: impl_def_id={:?}, self_ty={}, impl_trait_ref={:?}", + impl_def_id, self_ty, impl_trait_ref); + + let key = self.tcx().def_key(impl_def_id); + let parent_def_id = DefId { index: key.parent.unwrap(), ..impl_def_id }; + + // Decide whether to print the parent path for the impl. + // Logically, since impls are global, it's never needed, but + // users may find it useful. Currently, we omit the parent if + // the impl is either in the same module as the self-type or + // as the trait. + let in_self_mod = match characteristic_def_id_of_type(self_ty) { + None => false, + Some(ty_def_id) => self.tcx().parent(ty_def_id) == Some(parent_def_id), + }; + let in_trait_mod = match impl_trait_ref { + None => false, + Some(trait_ref) => self.tcx().parent(trait_ref.def_id) == Some(parent_def_id), + }; + + if !in_self_mod && !in_trait_mod { + // If the impl is not co-located with either self-type or + // trait-type, then fallback to a format that identifies + // the module more clearly. + self.path_append_impl( + |cx| cx.print_def_path(parent_def_id, &[]), + &key.disambiguated_data, + self_ty, + impl_trait_ref, + ) + } else { + // Otherwise, try to give a good form that would be valid language + // syntax. Preferably using associated item notation. + self.path_qualified(self_ty, impl_trait_ref) + } + } +} + +/// As a heuristic, when we see an impl, if we see that the +/// 'self type' is a type defined in the same module as the impl, +/// we can omit including the path to the impl itself. This +/// function tries to find a "characteristic `DefId`" for a +/// type. It's just a heuristic so it makes some questionable +/// decisions and we may want to adjust it later. +pub fn characteristic_def_id_of_type(ty: Ty<'_>) -> Option { + match ty.sty { + ty::Adt(adt_def, _) => Some(adt_def.did), + + ty::Dynamic(data, ..) => data.principal_def_id(), + + ty::Array(subty, _) | + ty::Slice(subty) => characteristic_def_id_of_type(subty), + + ty::RawPtr(mt) => characteristic_def_id_of_type(mt.ty), + + ty::Ref(_, ty, _) => characteristic_def_id_of_type(ty), + + ty::Tuple(ref tys) => tys.iter() + .filter_map(|ty| characteristic_def_id_of_type(ty)) + .next(), + + ty::FnDef(def_id, _) | + ty::Closure(def_id, _) | + ty::Generator(def_id, _, _) | + ty::Foreign(def_id) => Some(def_id), + + ty::Bool | + ty::Char | + ty::Int(_) | + ty::Uint(_) | + ty::Str | + ty::FnPtr(_) | + ty::Projection(_) | + ty::Placeholder(..) | + ty::UnnormalizedProjection(..) | + ty::Param(_) | + ty::Opaque(..) | + ty::Infer(_) | + ty::Bound(..) | + ty::Error | + ty::GeneratorWitness(..) | + ty::Never | + ty::Float(_) => None, + } +} + +impl<'gcx: 'tcx, 'tcx, P: Printer<'gcx, 'tcx>> Print<'gcx, 'tcx, P> for ty::RegionKind { + type Output = P::Region; + type Error = P::Error; + fn print(&self, cx: P) -> Result { + cx.print_region(self) + } +} + +impl<'gcx: 'tcx, 'tcx, P: Printer<'gcx, 'tcx>> Print<'gcx, 'tcx, P> for ty::Region<'_> { + type Output = P::Region; + type Error = P::Error; + fn print(&self, cx: P) -> Result { + cx.print_region(self) + } +} + +impl<'gcx: 'tcx, 'tcx, P: Printer<'gcx, 'tcx>> Print<'gcx, 'tcx, P> for Ty<'tcx> { + type Output = P::Type; + type Error = P::Error; + fn print(&self, cx: P) -> Result { + cx.print_type(self) + } +} + +impl<'gcx: 'tcx, 'tcx, P: Printer<'gcx, 'tcx>> Print<'gcx, 'tcx, P> + for &'tcx ty::List> +{ + type Output = P::DynExistential; + type Error = P::Error; + fn print(&self, cx: P) -> Result { + cx.print_dyn_existential(self) + } +} diff --git a/src/librustc/ty/print/pretty.rs b/src/librustc/ty/print/pretty.rs new file mode 100644 index 0000000000000..c9a4961a8e044 --- /dev/null +++ b/src/librustc/ty/print/pretty.rs @@ -0,0 +1,1608 @@ +use crate::hir; +use crate::hir::def::Namespace; +use crate::hir::map::{DefPathData, DisambiguatedDefPathData}; +use crate::hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE}; +use crate::middle::cstore::{ExternCrate, ExternCrateSource}; +use crate::middle::region; +use crate::ty::{self, DefIdTree, ParamConst, Ty, TyCtxt, TypeFoldable}; +use crate::ty::subst::{Kind, Subst, UnpackedKind}; +use crate::mir::interpret::ConstValue; +use syntax::symbol::{keywords, Symbol}; + +use rustc_target::spec::abi::Abi; +use syntax::symbol::InternedString; + +use std::cell::Cell; +use std::fmt::{self, Write as _}; +use std::ops::{Deref, DerefMut}; + +// `pretty` is a separate module only for organization. +use super::*; + +macro_rules! p { + (@write($($data:expr),+)) => { + write!(scoped_cx!(), $($data),+)? + }; + (@print($x:expr)) => { + scoped_cx!() = $x.print(scoped_cx!())? + }; + (@$method:ident($($arg:expr),*)) => { + scoped_cx!() = scoped_cx!().$method($($arg),*)? + }; + ($($kind:ident $data:tt),+) => {{ + $(p!(@$kind $data);)+ + }}; +} +macro_rules! define_scoped_cx { + ($cx:ident) => { + #[allow(unused_macros)] + macro_rules! scoped_cx { + () => ($cx) + } + }; +} + +thread_local! { + static FORCE_IMPL_FILENAME_LINE: Cell = Cell::new(false); + static SHOULD_PREFIX_WITH_CRATE: Cell = Cell::new(false); +} + +/// Force us to name impls with just the filename/line number. We +/// normally try to use types. But at some points, notably while printing +/// cycle errors, this can result in extra or suboptimal error output, +/// so this variable disables that check. +pub fn with_forced_impl_filename_line R, R>(f: F) -> R { + FORCE_IMPL_FILENAME_LINE.with(|force| { + let old = force.get(); + force.set(true); + let result = f(); + force.set(old); + result + }) +} + +/// Adds the `crate::` prefix to paths where appropriate. +pub fn with_crate_prefix R, R>(f: F) -> R { + SHOULD_PREFIX_WITH_CRATE.with(|flag| { + let old = flag.get(); + flag.set(true); + let result = f(); + flag.set(old); + result + }) +} + +/// The "region highlights" are used to control region printing during +/// specific error messages. When a "region highlight" is enabled, it +/// gives an alternate way to print specific regions. For now, we +/// always print those regions using a number, so something like "`'0`". +/// +/// Regions not selected by the region highlight mode are presently +/// unaffected. +#[derive(Copy, Clone, Default)] +pub struct RegionHighlightMode { + /// If enabled, when we see the selected region, use "`'N`" + /// instead of the ordinary behavior. + highlight_regions: [Option<(ty::RegionKind, usize)>; 3], + + /// If enabled, when printing a "free region" that originated from + /// the given `ty::BoundRegion`, print it as "`'1`". Free regions that would ordinarily + /// have names print as normal. + /// + /// This is used when you have a signature like `fn foo(x: &u32, + /// y: &'a u32)` and we want to give a name to the region of the + /// reference `x`. + highlight_bound_region: Option<(ty::BoundRegion, usize)>, +} + +impl RegionHighlightMode { + /// If `region` and `number` are both `Some`, invokes + /// `highlighting_region`. + pub fn maybe_highlighting_region( + &mut self, + region: Option>, + number: Option, + ) { + if let Some(k) = region { + if let Some(n) = number { + self.highlighting_region(k, n); + } + } + } + + /// Highlights the region inference variable `vid` as `'N`. + pub fn highlighting_region( + &mut self, + region: ty::Region<'_>, + number: usize, + ) { + let num_slots = self.highlight_regions.len(); + let first_avail_slot = self.highlight_regions.iter_mut() + .filter(|s| s.is_none()) + .next() + .unwrap_or_else(|| { + bug!( + "can only highlight {} placeholders at a time", + num_slots, + ) + }); + *first_avail_slot = Some((*region, number)); + } + + /// Convenience wrapper for `highlighting_region`. + pub fn highlighting_region_vid( + &mut self, + vid: ty::RegionVid, + number: usize, + ) { + self.highlighting_region(&ty::ReVar(vid), number) + } + + /// Returns `Some(n)` with the number to use for the given region, if any. + fn region_highlighted(&self, region: ty::Region<'_>) -> Option { + self + .highlight_regions + .iter() + .filter_map(|h| match h { + Some((r, n)) if r == region => Some(*n), + _ => None, + }) + .next() + } + + /// Highlight the given bound region. + /// We can only highlight one bound region at a time. See + /// the field `highlight_bound_region` for more detailed notes. + pub fn highlighting_bound_region( + &mut self, + br: ty::BoundRegion, + number: usize, + ) { + assert!(self.highlight_bound_region.is_none()); + self.highlight_bound_region = Some((br, number)); + } +} + +/// Trait for printers that pretty-print using `fmt::Write` to the printer. +pub trait PrettyPrinter<'gcx: 'tcx, 'tcx>: + Printer<'gcx, 'tcx, + Error = fmt::Error, + Path = Self, + Region = Self, + Type = Self, + DynExistential = Self, + > + + fmt::Write +{ + /// Like `print_def_path` but for value paths. + fn print_value_path( + self, + def_id: DefId, + substs: &'tcx [Kind<'tcx>], + ) -> Result { + self.print_def_path(def_id, substs) + } + + fn in_binder( + self, + value: &ty::Binder, + ) -> Result + where T: Print<'gcx, 'tcx, Self, Output = Self, Error = Self::Error> + TypeFoldable<'tcx> + { + value.skip_binder().print(self) + } + + /// Print comma-separated elements. + fn comma_sep( + mut self, + mut elems: impl Iterator, + ) -> Result + where T: Print<'gcx, 'tcx, Self, Output = Self, Error = Self::Error> + { + if let Some(first) = elems.next() { + self = first.print(self)?; + for elem in elems { + self.write_str(", ")?; + self = elem.print(self)?; + } + } + Ok(self) + } + + /// Print `<...>` around what `f` prints. + fn generic_delimiters( + self, + f: impl FnOnce(Self) -> Result, + ) -> Result; + + /// Return `true` if the region should be printed in + /// optional positions, e.g. `&'a T` or `dyn Tr + 'b`. + /// This is typically the case for all non-`'_` regions. + fn region_should_not_be_omitted( + &self, + region: ty::Region<'_>, + ) -> bool; + + // Defaults (should not be overriden): + + /// If possible, this returns a global path resolving to `def_id` that is visible + /// from at least one local module and returns true. If the crate defining `def_id` is + /// declared with an `extern crate`, the path is guaranteed to use the `extern crate`. + fn try_print_visible_def_path( + mut self, + def_id: DefId, + ) -> Result<(Self, bool), Self::Error> { + define_scoped_cx!(self); + + debug!("try_print_visible_def_path: def_id={:?}", def_id); + + // If `def_id` is a direct or injected extern crate, return the + // path to the crate followed by the path to the item within the crate. + if def_id.index == CRATE_DEF_INDEX { + let cnum = def_id.krate; + + if cnum == LOCAL_CRATE { + return Ok((self.path_crate(cnum)?, true)); + } + + // In local mode, when we encounter a crate other than + // LOCAL_CRATE, execution proceeds in one of two ways: + // + // 1. for a direct dependency, where user added an + // `extern crate` manually, we put the `extern + // crate` as the parent. So you wind up with + // something relative to the current crate. + // 2. for an extern inferred from a path or an indirect crate, + // where there is no explicit `extern crate`, we just prepend + // the crate name. + match *self.tcx().extern_crate(def_id) { + Some(ExternCrate { + src: ExternCrateSource::Extern(def_id), + direct: true, + span, + .. + }) => { + debug!("try_print_visible_def_path: def_id={:?}", def_id); + return Ok((if !span.is_dummy() { + self.print_def_path(def_id, &[])? + } else { + self.path_crate(cnum)? + }, true)); + } + None => { + return Ok((self.path_crate(cnum)?, true)); + } + _ => {}, + } + } + + if def_id.is_local() { + return Ok((self, false)); + } + + let visible_parent_map = self.tcx().visible_parent_map(LOCAL_CRATE); + + let mut cur_def_key = self.tcx().def_key(def_id); + debug!("try_print_visible_def_path: cur_def_key={:?}", cur_def_key); + + // For a constructor we want the name of its parent rather than . + match cur_def_key.disambiguated_data.data { + DefPathData::Ctor => { + let parent = DefId { + krate: def_id.krate, + index: cur_def_key.parent + .expect("DefPathData::Ctor/VariantData missing a parent"), + }; + + cur_def_key = self.tcx().def_key(parent); + }, + _ => {}, + } + + let visible_parent = match visible_parent_map.get(&def_id).cloned() { + Some(parent) => parent, + None => return Ok((self, false)), + }; + // HACK(eddyb) this bypasses `path_append`'s prefix printing to avoid + // knowing ahead of time whether the entire path will succeed or not. + // To support printers that do not implement `PrettyPrinter`, a `Vec` or + // linked list on the stack would need to be built, before any printing. + match self.try_print_visible_def_path(visible_parent)? { + (cx, false) => return Ok((cx, false)), + (cx, true) => self = cx, + } + let actual_parent = self.tcx().parent(def_id); + debug!( + "try_print_visible_def_path: visible_parent={:?} actual_parent={:?}", + visible_parent, actual_parent, + ); + + let mut data = cur_def_key.disambiguated_data.data; + debug!( + "try_print_visible_def_path: data={:?} visible_parent={:?} actual_parent={:?}", + data, visible_parent, actual_parent, + ); + + match data { + // In order to output a path that could actually be imported (valid and visible), + // we need to handle re-exports correctly. + // + // For example, take `std::os::unix::process::CommandExt`, this trait is actually + // defined at `std::sys::unix::ext::process::CommandExt` (at time of writing). + // + // `std::os::unix` rexports the contents of `std::sys::unix::ext`. `std::sys` is + // private so the "true" path to `CommandExt` isn't accessible. + // + // In this case, the `visible_parent_map` will look something like this: + // + // (child) -> (parent) + // `std::sys::unix::ext::process::CommandExt` -> `std::sys::unix::ext::process` + // `std::sys::unix::ext::process` -> `std::sys::unix::ext` + // `std::sys::unix::ext` -> `std::os` + // + // This is correct, as the visible parent of `std::sys::unix::ext` is in fact + // `std::os`. + // + // When printing the path to `CommandExt` and looking at the `cur_def_key` that + // corresponds to `std::sys::unix::ext`, we would normally print `ext` and then go + // to the parent - resulting in a mangled path like + // `std::os::ext::process::CommandExt`. + // + // Instead, we must detect that there was a re-export and instead print `unix` + // (which is the name `std::sys::unix::ext` was re-exported as in `std::os`). To + // do this, we compare the parent of `std::sys::unix::ext` (`std::sys::unix`) with + // the visible parent (`std::os`). If these do not match, then we iterate over + // the children of the visible parent (as was done when computing + // `visible_parent_map`), looking for the specific child we currently have and then + // have access to the re-exported name. + DefPathData::Module(ref mut name) | + DefPathData::TypeNs(ref mut name) if Some(visible_parent) != actual_parent => { + let reexport = self.tcx().item_children(visible_parent) + .iter() + .find(|child| child.def.def_id() == def_id) + .map(|child| child.ident.as_interned_str()); + if let Some(reexport) = reexport { + *name = reexport; + } + } + // Re-exported `extern crate` (#43189). + DefPathData::CrateRoot => { + data = DefPathData::Module( + self.tcx().original_crate_name(def_id.krate).as_interned_str(), + ); + } + _ => {} + } + debug!("try_print_visible_def_path: data={:?}", data); + + Ok((self.path_append(Ok, &DisambiguatedDefPathData { + data, + disambiguator: 0, + })?, true)) + } + + fn pretty_path_qualified( + self, + self_ty: Ty<'tcx>, + trait_ref: Option>, + ) -> Result { + if trait_ref.is_none() { + // Inherent impls. Try to print `Foo::bar` for an inherent + // impl on `Foo`, but fallback to `::bar` if self-type is + // anything other than a simple path. + match self_ty.sty { + ty::Adt(..) | ty::Foreign(_) | + ty::Bool | ty::Char | ty::Str | + ty::Int(_) | ty::Uint(_) | ty::Float(_) => { + return self_ty.print(self); + } + + _ => {} + } + } + + self.generic_delimiters(|mut cx| { + define_scoped_cx!(cx); + + p!(print(self_ty)); + if let Some(trait_ref) = trait_ref { + p!(write(" as "), print(trait_ref)); + } + Ok(cx) + }) + } + + fn pretty_path_append_impl( + mut self, + print_prefix: impl FnOnce(Self) -> Result, + self_ty: Ty<'tcx>, + trait_ref: Option>, + ) -> Result { + self = print_prefix(self)?; + + self.generic_delimiters(|mut cx| { + define_scoped_cx!(cx); + + p!(write("impl ")); + if let Some(trait_ref) = trait_ref { + p!(print(trait_ref), write(" for ")); + } + p!(print(self_ty)); + + Ok(cx) + }) + } + + fn pretty_print_type( + mut self, + ty: Ty<'tcx>, + ) -> Result { + define_scoped_cx!(self); + + match ty.sty { + ty::Bool => p!(write("bool")), + ty::Char => p!(write("char")), + ty::Int(t) => p!(write("{}", t.ty_to_string())), + ty::Uint(t) => p!(write("{}", t.ty_to_string())), + ty::Float(t) => p!(write("{}", t.ty_to_string())), + ty::RawPtr(ref tm) => { + p!(write("*{} ", match tm.mutbl { + hir::MutMutable => "mut", + hir::MutImmutable => "const", + })); + p!(print(tm.ty)) + } + ty::Ref(r, ty, mutbl) => { + p!(write("&")); + if self.region_should_not_be_omitted(r) { + p!(print(r), write(" ")); + } + p!(print(ty::TypeAndMut { ty, mutbl })) + } + ty::Never => p!(write("!")), + ty::Tuple(ref tys) => { + p!(write("(")); + let mut tys = tys.iter(); + if let Some(&ty) = tys.next() { + p!(print(ty), write(",")); + if let Some(&ty) = tys.next() { + p!(write(" "), print(ty)); + for &ty in tys { + p!(write(", "), print(ty)); + } + } + } + p!(write(")")) + } + ty::FnDef(def_id, substs) => { + let sig = self.tcx().fn_sig(def_id).subst(self.tcx(), substs); + p!(print(sig), write(" {{"), print_value_path(def_id, substs), write("}}")); + } + ty::FnPtr(ref bare_fn) => { + p!(print(bare_fn)) + } + ty::Infer(infer_ty) => p!(write("{}", infer_ty)), + ty::Error => p!(write("[type error]")), + ty::Param(ref param_ty) => p!(write("{}", param_ty)), + ty::Bound(debruijn, bound_ty) => { + match bound_ty.kind { + ty::BoundTyKind::Anon => { + if debruijn == ty::INNERMOST { + p!(write("^{}", bound_ty.var.index())) + } else { + p!(write("^{}_{}", debruijn.index(), bound_ty.var.index())) + } + } + + ty::BoundTyKind::Param(p) => p!(write("{}", p)), + } + } + ty::Adt(def, substs) => { + p!(print_def_path(def.did, substs)); + } + ty::Dynamic(data, r) => { + let print_r = self.region_should_not_be_omitted(r); + if print_r { + p!(write("(")); + } + p!(write("dyn "), print(data)); + if print_r { + p!(write(" + "), print(r), write(")")); + } + } + ty::Foreign(def_id) => { + p!(print_def_path(def_id, &[])); + } + ty::Projection(ref data) => p!(print(data)), + ty::UnnormalizedProjection(ref data) => { + p!(write("Unnormalized("), print(data), write(")")) + } + ty::Placeholder(placeholder) => { + p!(write("Placeholder({:?})", placeholder)) + } + ty::Opaque(def_id, substs) => { + // FIXME(eddyb) print this with `print_def_path`. + if self.tcx().sess.verbose() { + p!(write("Opaque({:?}, {:?})", def_id, substs)); + return Ok(self); + } + + let def_key = self.tcx().def_key(def_id); + if let Some(name) = def_key.disambiguated_data.data.get_opt_name() { + p!(write("{}", name)); + let mut substs = substs.iter(); + // FIXME(eddyb) print this with `print_def_path`. + if let Some(first) = substs.next() { + p!(write("::<")); + p!(print(first)); + for subst in substs { + p!(write(", "), print(subst)); + } + p!(write(">")); + } + return Ok(self); + } + // Grab the "TraitA + TraitB" from `impl TraitA + TraitB`, + // by looking up the projections associated with the def_id. + let bounds = self.tcx().predicates_of(def_id).instantiate(self.tcx(), substs); + + let mut first = true; + let mut is_sized = false; + p!(write("impl")); + for predicate in bounds.predicates { + if let Some(trait_ref) = predicate.to_opt_poly_trait_ref() { + // Don't print +Sized, but rather +?Sized if absent. + if Some(trait_ref.def_id()) == self.tcx().lang_items().sized_trait() { + is_sized = true; + continue; + } + + p!( + write("{}", if first { " " } else { "+" }), + print(trait_ref)); + first = false; + } + } + if !is_sized { + p!(write("{}?Sized", if first { " " } else { "+" })); + } else if first { + p!(write(" Sized")); + } + } + ty::Str => p!(write("str")), + ty::Generator(did, substs, movability) => { + let upvar_tys = substs.upvar_tys(did, self.tcx()); + let witness = substs.witness(did, self.tcx()); + if movability == hir::GeneratorMovability::Movable { + p!(write("[generator")); + } else { + p!(write("[static generator")); + } + + // FIXME(eddyb) should use `def_span`. + if let Some(hir_id) = self.tcx().hir().as_local_hir_id(did) { + p!(write("@{:?}", self.tcx().hir().span_by_hir_id(hir_id))); + let mut sep = " "; + for (freevar, upvar_ty) in self.tcx().freevars(did) + .as_ref() + .map_or(&[][..], |fv| &fv[..]) + .iter() + .zip(upvar_tys) + { + p!( + write("{}{}:", + sep, + self.tcx().hir().name(freevar.var_id())), + print(upvar_ty)); + sep = ", "; + } + } else { + // cross-crate closure types should only be + // visible in codegen bug reports, I imagine. + p!(write("@{:?}", did)); + let mut sep = " "; + for (index, upvar_ty) in upvar_tys.enumerate() { + p!( + write("{}{}:", sep, index), + print(upvar_ty)); + sep = ", "; + } + } + + p!(write(" "), print(witness), write("]")) + }, + ty::GeneratorWitness(types) => { + p!(in_binder(&types)); + } + ty::Closure(did, substs) => { + let upvar_tys = substs.upvar_tys(did, self.tcx()); + p!(write("[closure")); + + // FIXME(eddyb) should use `def_span`. + if let Some(hir_id) = self.tcx().hir().as_local_hir_id(did) { + if self.tcx().sess.opts.debugging_opts.span_free_formats { + p!(write("@{:?}", hir_id)); + } else { + p!(write("@{:?}", self.tcx().hir().span_by_hir_id(hir_id))); + } + let mut sep = " "; + for (freevar, upvar_ty) in self.tcx().freevars(did) + .as_ref() + .map_or(&[][..], |fv| &fv[..]) + .iter() + .zip(upvar_tys) + { + p!( + write("{}{}:", + sep, + self.tcx().hir().name(freevar.var_id())), + print(upvar_ty)); + sep = ", "; + } + } else { + // cross-crate closure types should only be + // visible in codegen bug reports, I imagine. + p!(write("@{:?}", did)); + let mut sep = " "; + for (index, upvar_ty) in upvar_tys.enumerate() { + p!( + write("{}{}:", sep, index), + print(upvar_ty)); + sep = ", "; + } + } + + if self.tcx().sess.verbose() { + p!(write( + " closure_kind_ty={:?} closure_sig_ty={:?}", + substs.closure_kind_ty(did, self.tcx()), + substs.closure_sig_ty(did, self.tcx()) + )); + } + + p!(write("]")) + }, + ty::Array(ty, sz) => { + p!(write("["), print(ty), write("; ")); + match sz.val { + ConstValue::Unevaluated(..) | + ConstValue::Infer(..) => p!(write("_")), + ConstValue::Param(ParamConst { name, .. }) => + p!(write("{}", name)), + _ => p!(write("{}", sz.unwrap_usize(self.tcx()))), + } + p!(write("]")) + } + ty::Slice(ty) => { + p!(write("["), print(ty), write("]")) + } + } + + Ok(self) + } + + fn pretty_print_dyn_existential( + mut self, + predicates: &'tcx ty::List>, + ) -> Result { + define_scoped_cx!(self); + + // Generate the main trait ref, including associated types. + let mut first = true; + + if let Some(principal) = predicates.principal() { + p!(print_def_path(principal.def_id, &[])); + + let mut resugared = false; + + // Special-case `Fn(...) -> ...` and resugar it. + let fn_trait_kind = self.tcx().lang_items().fn_trait_kind(principal.def_id); + if !self.tcx().sess.verbose() && fn_trait_kind.is_some() { + if let ty::Tuple(ref args) = principal.substs.type_at(0).sty { + let mut projections = predicates.projection_bounds(); + if let (Some(proj), None) = (projections.next(), projections.next()) { + p!(pretty_fn_sig(args, false, proj.ty)); + resugared = true; + } + } + } + + // HACK(eddyb) this duplicates `FmtPrinter`'s `path_generic_args`, + // in order to place the projections inside the `<...>`. + if !resugared { + // Use a type that can't appear in defaults of type parameters. + let dummy_self = self.tcx().mk_infer(ty::FreshTy(0)); + let principal = principal.with_self_ty(self.tcx(), dummy_self); + + let args = self.generic_args_to_print( + self.tcx().generics_of(principal.def_id), + principal.substs, + ); + + // Don't print `'_` if there's no unerased regions. + let print_regions = args.iter().any(|arg| { + match arg.unpack() { + UnpackedKind::Lifetime(r) => *r != ty::ReErased, + _ => false, + } + }); + let mut args = args.iter().cloned().filter(|arg| { + match arg.unpack() { + UnpackedKind::Lifetime(_) => print_regions, + _ => true, + } + }); + let mut projections = predicates.projection_bounds(); + + let arg0 = args.next(); + let projection0 = projections.next(); + if arg0.is_some() || projection0.is_some() { + let args = arg0.into_iter().chain(args); + let projections = projection0.into_iter().chain(projections); + + p!(generic_delimiters(|mut cx| { + cx = cx.comma_sep(args)?; + if arg0.is_some() && projection0.is_some() { + write!(cx, ", ")?; + } + cx.comma_sep(projections) + })); + } + } + first = false; + } + + // Builtin bounds. + // FIXME(eddyb) avoid printing twice (needed to ensure + // that the auto traits are sorted *and* printed via cx). + let mut auto_traits: Vec<_> = predicates.auto_traits().map(|did| { + (self.tcx().def_path_str(did), did) + }).collect(); + + // The auto traits come ordered by `DefPathHash`. While + // `DefPathHash` is *stable* in the sense that it depends on + // neither the host nor the phase of the moon, it depends + // "pseudorandomly" on the compiler version and the target. + // + // To avoid that causing instabilities in compiletest + // output, sort the auto-traits alphabetically. + auto_traits.sort(); + + for (_, def_id) in auto_traits { + if !first { + p!(write(" + ")); + } + first = false; + + p!(print_def_path(def_id, &[])); + } + + Ok(self) + } + + fn pretty_fn_sig( + mut self, + inputs: &[Ty<'tcx>], + c_variadic: bool, + output: Ty<'tcx>, + ) -> Result { + define_scoped_cx!(self); + + p!(write("(")); + let mut inputs = inputs.iter(); + if let Some(&ty) = inputs.next() { + p!(print(ty)); + for &ty in inputs { + p!(write(", "), print(ty)); + } + if c_variadic { + p!(write(", ...")); + } + } + p!(write(")")); + if !output.is_unit() { + p!(write(" -> "), print(output)); + } + + Ok(self) + } +} + +// HACK(eddyb) boxed to avoid moving around a large struct by-value. +pub struct FmtPrinter<'a, 'gcx, 'tcx, F>(Box>); + +pub struct FmtPrinterData<'a, 'gcx, 'tcx, F> { + tcx: TyCtxt<'a, 'gcx, 'tcx>, + fmt: F, + + empty_path: bool, + in_value: bool, + + used_region_names: FxHashSet, + region_index: usize, + binder_depth: usize, + + pub region_highlight_mode: RegionHighlightMode, +} + +impl Deref for FmtPrinter<'a, 'gcx, 'tcx, F> { + type Target = FmtPrinterData<'a, 'gcx, 'tcx, F>; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for FmtPrinter<'_, '_, '_, F> { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl FmtPrinter<'a, 'gcx, 'tcx, F> { + pub fn new(tcx: TyCtxt<'a, 'gcx, 'tcx>, fmt: F, ns: Namespace) -> Self { + FmtPrinter(Box::new(FmtPrinterData { + tcx, + fmt, + empty_path: false, + in_value: ns == Namespace::ValueNS, + used_region_names: Default::default(), + region_index: 0, + binder_depth: 0, + region_highlight_mode: RegionHighlightMode::default(), + })) + } +} + +impl TyCtxt<'_, '_, '_> { + // HACK(eddyb) get rid of `def_path_str` and/or pass `Namespace` explicitly always + // (but also some things just print a `DefId` generally so maybe we need this?) + fn guess_def_namespace(self, def_id: DefId) -> Namespace { + match self.def_key(def_id).disambiguated_data.data { + DefPathData::ValueNs(..) | + DefPathData::EnumVariant(..) | + DefPathData::Field(..) | + DefPathData::AnonConst | + DefPathData::ConstParam(..) | + DefPathData::ClosureExpr | + DefPathData::Ctor => Namespace::ValueNS, + + DefPathData::MacroDef(..) => Namespace::MacroNS, + + _ => Namespace::TypeNS, + } + } + + /// Returns a string identifying this `DefId`. This string is + /// suitable for user output. + pub fn def_path_str(self, def_id: DefId) -> String { + let ns = self.guess_def_namespace(def_id); + debug!("def_path_str: def_id={:?}, ns={:?}", def_id, ns); + let mut s = String::new(); + let _ = FmtPrinter::new(self, &mut s, ns) + .print_def_path(def_id, &[]); + s + } +} + +impl fmt::Write for FmtPrinter<'_, '_, '_, F> { + fn write_str(&mut self, s: &str) -> fmt::Result { + self.fmt.write_str(s) + } +} + +impl Printer<'gcx, 'tcx> for FmtPrinter<'_, 'gcx, 'tcx, F> { + type Error = fmt::Error; + + type Path = Self; + type Region = Self; + type Type = Self; + type DynExistential = Self; + + fn tcx(&'a self) -> TyCtxt<'a, 'gcx, 'tcx> { + self.tcx + } + + fn print_def_path( + mut self, + def_id: DefId, + substs: &'tcx [Kind<'tcx>], + ) -> Result { + define_scoped_cx!(self); + + if substs.is_empty() { + match self.try_print_visible_def_path(def_id)? { + (cx, true) => return Ok(cx), + (cx, false) => self = cx, + } + } + + let key = self.tcx.def_key(def_id); + if let DefPathData::Impl = key.disambiguated_data.data { + // Always use types for non-local impls, where types are always + // available, and filename/line-number is mostly uninteresting. + let use_types = + !def_id.is_local() || { + // Otherwise, use filename/line-number if forced. + let force_no_types = FORCE_IMPL_FILENAME_LINE.with(|f| f.get()); + !force_no_types + }; + + if !use_types { + // If no type info is available, fall back to + // pretty printing some span information. This should + // only occur very early in the compiler pipeline. + let parent_def_id = DefId { index: key.parent.unwrap(), ..def_id }; + let span = self.tcx.def_span(def_id); + + self = self.print_def_path(parent_def_id, &[])?; + + // HACK(eddyb) copy of `path_append` to avoid + // constructing a `DisambiguatedDefPathData`. + if !self.empty_path { + write!(self, "::")?; + } + write!(self, "", span)?; + self.empty_path = false; + + return Ok(self); + } + } + + self.default_print_def_path(def_id, substs) + } + + fn print_region( + self, + region: ty::Region<'_>, + ) -> Result { + self.pretty_print_region(region) + } + + fn print_type( + self, + ty: Ty<'tcx>, + ) -> Result { + self.pretty_print_type(ty) + } + + fn print_dyn_existential( + self, + predicates: &'tcx ty::List>, + ) -> Result { + self.pretty_print_dyn_existential(predicates) + } + + fn path_crate( + mut self, + cnum: CrateNum, + ) -> Result { + self.empty_path = true; + if cnum == LOCAL_CRATE { + if self.tcx.sess.rust_2018() { + // We add the `crate::` keyword on Rust 2018, only when desired. + if SHOULD_PREFIX_WITH_CRATE.with(|flag| flag.get()) { + write!(self, "{}", keywords::Crate.name())?; + self.empty_path = false; + } + } + } else { + write!(self, "{}", self.tcx.crate_name(cnum))?; + self.empty_path = false; + } + Ok(self) + } + fn path_qualified( + mut self, + self_ty: Ty<'tcx>, + trait_ref: Option>, + ) -> Result { + self = self.pretty_path_qualified(self_ty, trait_ref)?; + self.empty_path = false; + Ok(self) + } + + fn path_append_impl( + mut self, + print_prefix: impl FnOnce(Self) -> Result, + _disambiguated_data: &DisambiguatedDefPathData, + self_ty: Ty<'tcx>, + trait_ref: Option>, + ) -> Result { + self = self.pretty_path_append_impl(|mut cx| { + cx = print_prefix(cx)?; + if !cx.empty_path { + write!(cx, "::")?; + } + + Ok(cx) + }, self_ty, trait_ref)?; + self.empty_path = false; + Ok(self) + } + fn path_append( + mut self, + print_prefix: impl FnOnce(Self) -> Result, + disambiguated_data: &DisambiguatedDefPathData, + ) -> Result { + self = print_prefix(self)?; + + // Skip `::{{constructor}}` on tuple/unit structs. + match disambiguated_data.data { + DefPathData::Ctor => return Ok(self), + _ => {} + } + + // FIXME(eddyb) `name` should never be empty, but it + // currently is for `extern { ... }` "foreign modules". + let name = disambiguated_data.data.as_interned_str().as_str(); + if !name.is_empty() { + if !self.empty_path { + write!(self, "::")?; + } + write!(self, "{}", name)?; + + // FIXME(eddyb) this will print e.g. `{{closure}}#3`, but it + // might be nicer to use something else, e.g. `{closure#3}`. + let dis = disambiguated_data.disambiguator; + let print_dis = + disambiguated_data.data.get_opt_name().is_none() || + dis != 0 && self.tcx.sess.verbose(); + if print_dis { + write!(self, "#{}", dis)?; + } + + self.empty_path = false; + } + + Ok(self) + } + fn path_generic_args( + mut self, + print_prefix: impl FnOnce(Self) -> Result, + args: &[Kind<'tcx>], + ) -> Result { + self = print_prefix(self)?; + + // Don't print `'_` if there's no unerased regions. + let print_regions = args.iter().any(|arg| { + match arg.unpack() { + UnpackedKind::Lifetime(r) => *r != ty::ReErased, + _ => false, + } + }); + let args = args.iter().cloned().filter(|arg| { + match arg.unpack() { + UnpackedKind::Lifetime(_) => print_regions, + _ => true, + } + }); + + if args.clone().next().is_some() { + if self.in_value { + write!(self, "::")?; + } + self.generic_delimiters(|cx| cx.comma_sep(args)) + } else { + Ok(self) + } + } +} + +impl PrettyPrinter<'gcx, 'tcx> for FmtPrinter<'_, 'gcx, 'tcx, F> { + fn print_value_path( + mut self, + def_id: DefId, + substs: &'tcx [Kind<'tcx>], + ) -> Result { + let was_in_value = std::mem::replace(&mut self.in_value, true); + self = self.print_def_path(def_id, substs)?; + self.in_value = was_in_value; + + Ok(self) + } + + fn in_binder( + self, + value: &ty::Binder, + ) -> Result + where T: Print<'gcx, 'tcx, Self, Output = Self, Error = Self::Error> + TypeFoldable<'tcx> + { + self.pretty_in_binder(value) + } + + fn generic_delimiters( + mut self, + f: impl FnOnce(Self) -> Result, + ) -> Result { + write!(self, "<")?; + + let was_in_value = std::mem::replace(&mut self.in_value, false); + let mut inner = f(self)?; + inner.in_value = was_in_value; + + write!(inner, ">")?; + Ok(inner) + } + + fn region_should_not_be_omitted( + &self, + region: ty::Region<'_>, + ) -> bool { + let highlight = self.region_highlight_mode; + if highlight.region_highlighted(region).is_some() { + return true; + } + + if self.tcx.sess.verbose() { + return true; + } + + let identify_regions = self.tcx.sess.opts.debugging_opts.identify_regions; + + match *region { + ty::ReEarlyBound(ref data) => { + data.name != "" && data.name != "'_" + } + + ty::ReLateBound(_, br) | + ty::ReFree(ty::FreeRegion { bound_region: br, .. }) | + ty::RePlaceholder(ty::Placeholder { name: br, .. }) => { + if let ty::BrNamed(_, name) = br { + if name != "" && name != "'_" { + return true; + } + } + + if let Some((region, _)) = highlight.highlight_bound_region { + if br == region { + return true; + } + } + + false + } + + ty::ReScope(_) | + ty::ReVar(_) if identify_regions => true, + + ty::ReVar(_) | + ty::ReScope(_) | + ty::ReErased => false, + + ty::ReStatic | + ty::ReEmpty | + ty::ReClosureBound(_) => true, + } + } +} + +// HACK(eddyb) limited to `FmtPrinter` because of `region_highlight_mode`. +impl FmtPrinter<'_, '_, '_, F> { + pub fn pretty_print_region( + mut self, + region: ty::Region<'_>, + ) -> Result { + define_scoped_cx!(self); + + // Watch out for region highlights. + let highlight = self.region_highlight_mode; + if let Some(n) = highlight.region_highlighted(region) { + p!(write("'{}", n)); + return Ok(self); + } + + if self.tcx.sess.verbose() { + p!(write("{:?}", region)); + return Ok(self); + } + + let identify_regions = self.tcx.sess.opts.debugging_opts.identify_regions; + + // These printouts are concise. They do not contain all the information + // the user might want to diagnose an error, but there is basically no way + // to fit that into a short string. Hence the recommendation to use + // `explain_region()` or `note_and_explain_region()`. + match *region { + ty::ReEarlyBound(ref data) => { + if data.name != "" { + p!(write("{}", data.name)); + return Ok(self); + } + } + ty::ReLateBound(_, br) | + ty::ReFree(ty::FreeRegion { bound_region: br, .. }) | + ty::RePlaceholder(ty::Placeholder { name: br, .. }) => { + if let ty::BrNamed(_, name) = br { + if name != "" && name != "'_" { + p!(write("{}", name)); + return Ok(self); + } + } + + if let Some((region, counter)) = highlight.highlight_bound_region { + if br == region { + p!(write("'{}", counter)); + return Ok(self); + } + } + } + ty::ReScope(scope) if identify_regions => { + match scope.data { + region::ScopeData::Node => + p!(write("'{}s", scope.item_local_id().as_usize())), + region::ScopeData::CallSite => + p!(write("'{}cs", scope.item_local_id().as_usize())), + region::ScopeData::Arguments => + p!(write("'{}as", scope.item_local_id().as_usize())), + region::ScopeData::Destruction => + p!(write("'{}ds", scope.item_local_id().as_usize())), + region::ScopeData::Remainder(first_statement_index) => p!(write( + "'{}_{}rs", + scope.item_local_id().as_usize(), + first_statement_index.index() + )), + } + return Ok(self); + } + ty::ReVar(region_vid) if identify_regions => { + p!(write("{:?}", region_vid)); + return Ok(self); + } + ty::ReVar(_) => {} + ty::ReScope(_) | + ty::ReErased => {} + ty::ReStatic => { + p!(write("'static")); + return Ok(self); + } + ty::ReEmpty => { + p!(write("'")); + return Ok(self); + } + + // The user should never encounter these in unsubstituted form. + ty::ReClosureBound(vid) => { + p!(write("{:?}", vid)); + return Ok(self); + } + } + + p!(write("'_")); + + Ok(self) + } +} + +// HACK(eddyb) limited to `FmtPrinter` because of `binder_depth`, +// `region_index` and `used_region_names`. +impl FmtPrinter<'_, 'gcx, 'tcx, F> { + pub fn pretty_in_binder( + mut self, + value: &ty::Binder, + ) -> Result + where T: Print<'gcx, 'tcx, Self, Output = Self, Error = fmt::Error> + TypeFoldable<'tcx> + { + fn name_by_region_index(index: usize) -> InternedString { + match index { + 0 => Symbol::intern("'r"), + 1 => Symbol::intern("'s"), + i => Symbol::intern(&format!("'t{}", i-2)), + }.as_interned_str() + } + + // Replace any anonymous late-bound regions with named + // variants, using gensym'd identifiers, so that we can + // clearly differentiate between named and unnamed regions in + // the output. We'll probably want to tweak this over time to + // decide just how much information to give. + if self.binder_depth == 0 { + self.prepare_late_bound_region_info(value); + } + + let mut empty = true; + let mut start_or_continue = |cx: &mut Self, start: &str, cont: &str| { + write!(cx, "{}", if empty { + empty = false; + start + } else { + cont + }) + }; + + define_scoped_cx!(self); + + let old_region_index = self.region_index; + let mut region_index = old_region_index; + let new_value = self.tcx.replace_late_bound_regions(value, |br| { + let _ = start_or_continue(&mut self, "for<", ", "); + let br = match br { + ty::BrNamed(_, name) => { + let _ = write!(self, "{}", name); + br + } + ty::BrAnon(_) | + ty::BrFresh(_) | + ty::BrEnv => { + let name = loop { + let name = name_by_region_index(region_index); + region_index += 1; + if !self.used_region_names.contains(&name) { + break name; + } + }; + let _ = write!(self, "{}", name); + ty::BrNamed(DefId::local(CRATE_DEF_INDEX), name) + } + }; + self.tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br)) + }).0; + start_or_continue(&mut self, "", "> ")?; + + self.binder_depth += 1; + self.region_index = region_index; + let mut inner = new_value.print(self)?; + inner.region_index = old_region_index; + inner.binder_depth -= 1; + Ok(inner) + } + + fn prepare_late_bound_region_info(&mut self, value: &ty::Binder) + where T: TypeFoldable<'tcx> + { + + struct LateBoundRegionNameCollector<'a>(&'a mut FxHashSet); + impl<'tcx> ty::fold::TypeVisitor<'tcx> for LateBoundRegionNameCollector<'_> { + fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool { + match *r { + ty::ReLateBound(_, ty::BrNamed(_, name)) => { + self.0.insert(name); + }, + _ => {}, + } + r.super_visit_with(self) + } + } + + self.used_region_names.clear(); + let mut collector = LateBoundRegionNameCollector(&mut self.used_region_names); + value.visit_with(&mut collector); + self.region_index = 0; + } +} + +impl<'gcx: 'tcx, 'tcx, T, P: PrettyPrinter<'gcx, 'tcx>> Print<'gcx, 'tcx, P> + for ty::Binder + where T: Print<'gcx, 'tcx, P, Output = P, Error = P::Error> + TypeFoldable<'tcx> +{ + type Output = P; + type Error = P::Error; + fn print(&self, cx: P) -> Result { + cx.in_binder(self) + } +} + +impl<'gcx: 'tcx, 'tcx, T, U, P: PrettyPrinter<'gcx, 'tcx>> Print<'gcx, 'tcx, P> + for ty::OutlivesPredicate + where T: Print<'gcx, 'tcx, P, Output = P, Error = P::Error>, + U: Print<'gcx, 'tcx, P, Output = P, Error = P::Error>, +{ + type Output = P; + type Error = P::Error; + fn print(&self, mut cx: P) -> Result { + define_scoped_cx!(cx); + p!(print(self.0), write(" : "), print(self.1)); + Ok(cx) + } +} + +macro_rules! forward_display_to_print { + ($($ty:ty),+) => { + $(impl fmt::Display for $ty { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ty::tls::with(|tcx| { + tcx.lift(self) + .expect("could not lift for printing") + .print(FmtPrinter::new(tcx, f, Namespace::TypeNS))?; + Ok(()) + }) + } + })+ + }; +} + +macro_rules! define_print_and_forward_display { + (($self:ident, $cx:ident): $($ty:ty $print:block)+) => { + $(impl<'gcx: 'tcx, 'tcx, P: PrettyPrinter<'gcx, 'tcx>> Print<'gcx, 'tcx, P> for $ty { + type Output = P; + type Error = fmt::Error; + fn print(&$self, $cx: P) -> Result { + #[allow(unused_mut)] + let mut $cx = $cx; + define_scoped_cx!($cx); + let _: () = $print; + #[allow(unreachable_code)] + Ok($cx) + } + })+ + + forward_display_to_print!($($ty),+); + }; +} + +// HACK(eddyb) this is separate because `ty::RegionKind` doesn't need lifting. +impl fmt::Display for ty::RegionKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ty::tls::with(|tcx| { + self.print(FmtPrinter::new(tcx, f, Namespace::TypeNS))?; + Ok(()) + }) + } +} + +forward_display_to_print! { + Ty<'tcx>, + &'tcx ty::List>, + + // HACK(eddyb) these are exhaustive instead of generic, + // because `for<'gcx: 'tcx, 'tcx>` isn't possible yet. + ty::Binder<&'tcx ty::List>>, + ty::Binder>, + ty::Binder>, + ty::Binder>, + ty::Binder>, + ty::Binder>, + ty::Binder, ty::Region<'tcx>>>, + ty::Binder, ty::Region<'tcx>>>, + + ty::OutlivesPredicate, ty::Region<'tcx>>, + ty::OutlivesPredicate, ty::Region<'tcx>> +} + +define_print_and_forward_display! { + (self, cx): + + &'tcx ty::List> { + p!(write("{{")); + let mut tys = self.iter(); + if let Some(&ty) = tys.next() { + p!(print(ty)); + for &ty in tys { + p!(write(", "), print(ty)); + } + } + p!(write("}}")) + } + + ty::TypeAndMut<'tcx> { + p!(write("{}", if self.mutbl == hir::MutMutable { "mut " } else { "" }), + print(self.ty)) + } + + ty::ExistentialTraitRef<'tcx> { + // Use a type that can't appear in defaults of type parameters. + let dummy_self = cx.tcx().mk_infer(ty::FreshTy(0)); + let trait_ref = self.with_self_ty(cx.tcx(), dummy_self); + p!(print(trait_ref)) + } + + ty::ExistentialProjection<'tcx> { + let name = cx.tcx().associated_item(self.item_def_id).ident; + p!(write("{} = ", name), print(self.ty)) + } + + ty::ExistentialPredicate<'tcx> { + match *self { + ty::ExistentialPredicate::Trait(x) => p!(print(x)), + ty::ExistentialPredicate::Projection(x) => p!(print(x)), + ty::ExistentialPredicate::AutoTrait(def_id) => { + p!(print_def_path(def_id, &[])); + } + } + } + + ty::FnSig<'tcx> { + if self.unsafety == hir::Unsafety::Unsafe { + p!(write("unsafe ")); + } + + if self.abi != Abi::Rust { + p!(write("extern {} ", self.abi)); + } + + p!(write("fn"), pretty_fn_sig(self.inputs(), self.c_variadic, self.output())); + } + + ty::InferTy { + if cx.tcx().sess.verbose() { + p!(write("{:?}", self)); + return Ok(cx); + } + match *self { + ty::TyVar(_) => p!(write("_")), + ty::IntVar(_) => p!(write("{}", "{integer}")), + ty::FloatVar(_) => p!(write("{}", "{float}")), + ty::FreshTy(v) => p!(write("FreshTy({})", v)), + ty::FreshIntTy(v) => p!(write("FreshIntTy({})", v)), + ty::FreshFloatTy(v) => p!(write("FreshFloatTy({})", v)) + } + } + + ty::TraitRef<'tcx> { + p!(print_def_path(self.def_id, self.substs)); + } + + &'tcx ty::Const<'tcx> { + match self.val { + ConstValue::Unevaluated(..) | + ConstValue::Infer(..) => p!(write("_")), + ConstValue::Param(ParamConst { name, .. }) => p!(write("{}", name)), + _ => p!(write("{:?}", self)), + } + } + + ty::ParamTy { + p!(write("{}", self.name)) + } + + ty::ParamConst { + p!(write("{}", self.name)) + } + + ty::SubtypePredicate<'tcx> { + p!(print(self.a), write(" <: "), print(self.b)) + } + + ty::TraitPredicate<'tcx> { + p!(print(self.trait_ref.self_ty()), write(": "), print(self.trait_ref)) + } + + ty::ProjectionPredicate<'tcx> { + p!(print(self.projection_ty), write(" == "), print(self.ty)) + } + + ty::ProjectionTy<'tcx> { + p!(print_def_path(self.item_def_id, self.substs)); + } + + ty::ClosureKind { + match *self { + ty::ClosureKind::Fn => p!(write("Fn")), + ty::ClosureKind::FnMut => p!(write("FnMut")), + ty::ClosureKind::FnOnce => p!(write("FnOnce")), + } + } + + ty::Predicate<'tcx> { + match *self { + ty::Predicate::Trait(ref data) => p!(print(data)), + ty::Predicate::Subtype(ref predicate) => p!(print(predicate)), + ty::Predicate::RegionOutlives(ref predicate) => p!(print(predicate)), + ty::Predicate::TypeOutlives(ref predicate) => p!(print(predicate)), + ty::Predicate::Projection(ref predicate) => p!(print(predicate)), + ty::Predicate::WellFormed(ty) => p!(print(ty), write(" well-formed")), + ty::Predicate::ObjectSafe(trait_def_id) => { + p!(write("the trait `"), + print_def_path(trait_def_id, &[]), + write("` is object-safe")) + } + ty::Predicate::ClosureKind(closure_def_id, _closure_substs, kind) => { + p!(write("the closure `"), + print_value_path(closure_def_id, &[]), + write("` implements the trait `{}`", kind)) + } + ty::Predicate::ConstEvaluatable(def_id, substs) => { + p!(write("the constant `"), + print_value_path(def_id, substs), + write("` can be evaluated")) + } + } + } + + Kind<'tcx> { + match self.unpack() { + UnpackedKind::Lifetime(lt) => p!(print(lt)), + UnpackedKind::Type(ty) => p!(print(ty)), + UnpackedKind::Const(ct) => p!(print(ct)), + } + } +} diff --git a/src/librustc/ty/query/README.md b/src/librustc/ty/query/README.md index 0fcaef5de54c2..4b5e08cecd99c 100644 --- a/src/librustc/ty/query/README.md +++ b/src/librustc/ty/query/README.md @@ -1,302 +1,3 @@ -# The Rust Compiler Query System - -The Compiler Query System is the key to our new demand-driven -organization. The idea is pretty simple. You have various queries -that compute things about the input -- for example, there is a query -called `type_of(def_id)` that, given the def-id of some item, will -compute the type of that item and return it to you. - -Query execution is **memoized** -- so the first time you invoke a -query, it will go do the computation, but the next time, the result is -returned from a hashtable. Moreover, query execution fits nicely into -**incremental computation**; the idea is roughly that, when you do a -query, the result **may** be returned to you by loading stored data -from disk (but that's a separate topic we won't discuss further here). - -The overall vision is that, eventually, the entire compiler -control-flow will be query driven. There will effectively be one -top-level query ("compile") that will run compilation on a crate; this -will in turn demand information about that crate, starting from the -*end*. For example: - -- This "compile" query might demand to get a list of codegen-units - (i.e., modules that need to be compiled by LLVM). -- But computing the list of codegen-units would invoke some subquery - that returns the list of all modules defined in the Rust source. -- That query in turn would invoke something asking for the HIR. -- This keeps going further and further back until we wind up doing the - actual parsing. - -However, that vision is not fully realized. Still, big chunks of the -compiler (for example, generating MIR) work exactly like this. - -### Invoking queries - -To invoke a query is simple. The tcx ("type context") offers a method -for each defined query. So, for example, to invoke the `type_of` -query, you would just do this: - -```rust -let ty = tcx.type_of(some_def_id); -``` - -### Cycles between queries - -Currently, cycles during query execution should always result in a -compilation error. Typically, they arise because of illegal programs -that contain cyclic references they shouldn't (though sometimes they -arise because of compiler bugs, in which case we need to factor our -queries in a more fine-grained fashion to avoid them). - -However, it is nonetheless often useful to *recover* from a cycle -(after reporting an error, say) and try to soldier on, so as to give a -better user experience. In order to recover from a cycle, you don't -get to use the nice method-call-style syntax. Instead, you invoke -using the `try_get` method, which looks roughly like this: - -```rust -use ty::query::queries; -... -match queries::type_of::try_get(tcx, DUMMY_SP, self.did) { - Ok(result) => { - // no cycle occurred! You can use `result` - } - Err(err) => { - // A cycle occurred! The error value `err` is a `DiagnosticBuilder`, - // meaning essentially an "in-progress", not-yet-reported error message. - // See below for more details on what to do here. - } -} -``` - -So, if you get back an `Err` from `try_get`, then a cycle *did* occur. This means that -you must ensure that a compiler error message is reported. You can do that in two ways: - -The simplest is to invoke `err.emit()`. This will emit the cycle error to the user. - -However, often cycles happen because of an illegal program, and you -know at that point that an error either already has been reported or -will be reported due to this cycle by some other bit of code. In that -case, you can invoke `err.cancel()` to not emit any error. It is -traditional to then invoke: - -``` -tcx.sess.delay_span_bug(some_span, "some message") -``` - -`delay_span_bug()` is a helper that says: we expect a compilation -error to have happened or to happen in the future; so, if compilation -ultimately succeeds, make an ICE with the message `"some -message"`. This is basically just a precaution in case you are wrong. - -### How the compiler executes a query - -So you may be wondering what happens when you invoke a query -method. The answer is that, for each query, the compiler maintains a -cache -- if your query has already been executed, then, the answer is -simple: we clone the return value out of the cache and return it -(therefore, you should try to ensure that the return types of queries -are cheaply cloneable; insert a `Rc` if necessary). - -#### Providers - -If, however, the query is *not* in the cache, then the compiler will -try to find a suitable **provider**. A provider is a function that has -been defined and linked into the compiler somewhere that contains the -code to compute the result of the query. - -**Providers are defined per-crate.** The compiler maintains, -internally, a table of providers for every crate, at least -conceptually. Right now, there are really two sets: the providers for -queries about the **local crate** (that is, the one being compiled) -and providers for queries about **external crates** (that is, -dependencies of the local crate). Note that what determines the crate -that a query is targeting is not the *kind* of query, but the *key*. -For example, when you invoke `tcx.type_of(def_id)`, that could be a -local query or an external query, depending on what crate the `def_id` -is referring to (see the `self::keys::Key` trait for more information -on how that works). - -Providers always have the same signature: - -```rust -fn provider<'cx, 'tcx>(tcx: TyCtxt<'cx, 'tcx, 'tcx>, - key: QUERY_KEY) - -> QUERY_RESULT -{ - ... -} -``` - -Providers take two arguments: the `tcx` and the query key. Note also -that they take the *global* tcx (i.e., they use the `'tcx` lifetime -twice), rather than taking a tcx with some active inference context. -They return the result of the query. - -#### How providers are setup - -When the tcx is created, it is given the providers by its creator using -the `Providers` struct. This struct is generate by the macros here, but it -is basically a big list of function pointers: - -```rust -struct Providers { - type_of: for<'cx, 'tcx> fn(TyCtxt<'cx, 'tcx, 'tcx>, DefId) -> Ty<'tcx>, - ... -} -``` - -At present, we have one copy of the struct for local crates, and one -for external crates, though the plan is that we may eventually have -one per crate. - -These `Provider` structs are ultimately created and populated by -`librustc_driver`, but it does this by distributing the work -throughout the other `rustc_*` crates. This is done by invoking -various `provide` functions. These functions tend to look something -like this: - -```rust -pub fn provide(providers: &mut Providers) { - *providers = Providers { - type_of, - ..*providers - }; -} -``` - -That is, they take an `&mut Providers` and mutate it in place. Usually -we use the formulation above just because it looks nice, but you could -as well do `providers.type_of = type_of`, which would be equivalent. -(Here, `type_of` would be a top-level function, defined as we saw -before.) So, if we want to add a provider for some other query, -let's call it `fubar`, into the crate above, we might modify the `provide()` -function like so: - -```rust -pub fn provide(providers: &mut Providers) { - *providers = Providers { - type_of, - fubar, - ..*providers - }; -} - -fn fubar<'cx, 'tcx>(tcx: TyCtxt<'cx, 'tcx>, key: DefId) -> Fubar<'tcx> { .. } -``` - -NB. Most of the `rustc_*` crates only provide **local -providers**. Almost all **extern providers** wind up going through the -`rustc_metadata` crate, which loads the information from the crate -metadata. But in some cases there are crates that provide queries for -*both* local and external crates, in which case they define both a -`provide` and a `provide_extern` function that `rustc_driver` can -invoke. - -### Adding a new kind of query - -So suppose you want to add a new kind of query, how do you do so? -Well, defining a query takes place in two steps: - -1. first, you have to specify the query name and arguments; and then, -2. you have to supply query providers where needed. - -To specify the query name and arguments, you simply add an entry -to the big macro invocation in `mod.rs`. This will probably have changed -by the time you read this README, but at present it looks something -like: - -``` -define_queries! { <'tcx> - /// Records the type of every item. - [] fn type_of: TypeOfItem(DefId) -> Ty<'tcx>, - - ... -} -``` - -Each line of the macro defines one query. The name is broken up like this: - -``` -[] fn type_of: TypeOfItem(DefId) -> Ty<'tcx>, -^^ ^^^^^^^ ^^^^^^^^^^ ^^^^^ ^^^^^^^^ -| | | | | -| | | | result type of query -| | | query key type -| | dep-node constructor -| name of query -query flags -``` - -Let's go over them one by one: - -- **Query flags:** these are largely unused right now, but the intention - is that we'll be able to customize various aspects of how the query is - processed. -- **Name of query:** the name of the query method - (`tcx.type_of(..)`). Also used as the name of a struct - (`ty::query::queries::type_of`) that will be generated to represent - this query. -- **Dep-node constructor:** indicates the constructor function that - connects this query to incremental compilation. Typically, this is a - `DepNode` variant, which can be added by modifying the - `define_dep_nodes!` macro invocation in - `librustc/dep_graph/dep_node.rs`. - - However, sometimes we use a custom function, in which case the - name will be in snake case and the function will be defined at the - bottom of the file. This is typically used when the query key is - not a def-id, or just not the type that the dep-node expects. -- **Query key type:** the type of the argument to this query. - This type must implement the `ty::query::keys::Key` trait, which - defines (for example) how to map it to a crate, and so forth. -- **Result type of query:** the type produced by this query. This type - should (a) not use `RefCell` or other interior mutability and (b) be - cheaply cloneable. Interning or using `Rc` or `Arc` is recommended for - non-trivial data types. - - The one exception to those rules is the `ty::steal::Steal` type, - which is used to cheaply modify MIR in place. See the definition - of `Steal` for more details. New uses of `Steal` should **not** be - added without alerting `@rust-lang/compiler`. - -So, to add a query: - -- Add an entry to `define_queries!` using the format above. -- Possibly add a corresponding entry to the dep-node macro. -- Link the provider by modifying the appropriate `provide` method; - or add a new one if needed and ensure that `rustc_driver` is invoking it. - -#### Query structs and descriptions - -For each kind, the `define_queries` macro will generate a "query struct" -named after the query. This struct is a kind of a place-holder -describing the query. Each such struct implements the -`self::config::QueryConfig` trait, which has associated types for the -key/value of that particular query. Basically the code generated looks something -like this: - -```rust -// Dummy struct representing a particular kind of query: -pub struct type_of<'tcx> { phantom: PhantomData<&'tcx ()> } - -impl<'tcx> QueryConfig for type_of<'tcx> { - type Key = DefId; - type Value = Ty<'tcx>; -} -``` - -There is an additional trait that you may wish to implement called -`self::config::QueryDescription`. This trait is used during cycle -errors to give a "human readable" name for the query, so that we can -summarize what was happening when the cycle occurred. Implementing -this trait is optional if the query key is `DefId`, but if you *don't* -implement it, you get a pretty generic error ("processing `foo`..."). -You can put new impls into the `config` module. They look something like this: - -```rust -impl<'tcx> QueryDescription for queries::type_of<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, key: DefId) -> String { - format!("computing the type of `{}`", tcx.item_path_str(key)) - } -} -``` +For more information about how the query system works, see the [rustc guide]. +[rustc guide]: https://rust-lang.github.io/rustc-guide/query.html diff --git a/src/librustc/ty/query/config.rs b/src/librustc/ty/query/config.rs index fd9143be679a4..73b7902797242 100644 --- a/src/librustc/ty/query/config.rs +++ b/src/librustc/ty/query/config.rs @@ -1,37 +1,19 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use dep_graph::SerializedDepNodeIndex; -use dep_graph::DepNode; -use hir::def_id::{CrateNum, DefId, DefIndex}; -use mir::interpret::GlobalId; -use traits; -use traits::query::{ - CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal, - CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpNormalizeGoal, - CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal, -}; -use ty::{self, ParamEnvAnd, Ty, TyCtxt}; -use ty::subst::Substs; -use ty::query::queries; -use ty::query::Query; -use ty::query::QueryCache; -use util::profiling::ProfileCategory; +use crate::dep_graph::SerializedDepNodeIndex; +use crate::dep_graph::DepNode; +use crate::hir::def_id::{CrateNum, DefId}; +use crate::ty::TyCtxt; +use crate::ty::query::queries; +use crate::ty::query::Query; +use crate::ty::query::QueryCache; +use crate::ty::query::plumbing::CycleError; +use crate::util::profiling::ProfileCategory; use std::borrow::Cow; use std::hash::Hash; use std::fmt::Debug; -use syntax_pos::symbol::InternedString; use rustc_data_structures::sync::Lock; -use rustc_data_structures::stable_hasher::HashStable; -use ich::StableHashingContext; +use rustc_data_structures::fingerprint::Fingerprint; +use crate::ich::StableHashingContext; // Query configuration and description traits. @@ -40,10 +22,10 @@ pub trait QueryConfig<'tcx> { const CATEGORY: ProfileCategory; type Key: Eq + Hash + Clone + Debug; - type Value: Clone + for<'a> HashStable>; + type Value: Clone; } -pub(super) trait QueryAccessors<'tcx>: QueryConfig<'tcx> { +pub(crate) trait QueryAccessors<'tcx>: QueryConfig<'tcx> { fn query(key: Self::Key) -> Query<'tcx>; // Don't use this method to access query results, instead use the methods on TyCtxt @@ -54,14 +36,19 @@ pub(super) trait QueryAccessors<'tcx>: QueryConfig<'tcx> { // Don't use this method to compute query results, instead use the methods on TyCtxt fn compute(tcx: TyCtxt<'_, 'tcx, '_>, key: Self::Key) -> Self::Value; - fn handle_cycle_error(tcx: TyCtxt<'_, 'tcx, '_>) -> Self::Value; + fn hash_result( + hcx: &mut StableHashingContext<'_>, + result: &Self::Value + ) -> Option; + + fn handle_cycle_error(tcx: TyCtxt<'_, 'tcx, '_>, error: CycleError<'tcx>) -> Self::Value; } -pub(super) trait QueryDescription<'tcx>: QueryAccessors<'tcx> { +pub(crate) trait QueryDescription<'tcx>: QueryAccessors<'tcx> { fn describe(tcx: TyCtxt<'_, '_, '_>, key: Self::Key) -> Cow<'static, str>; #[inline] - fn cache_on_disk(_: Self::Key) -> bool { + fn cache_on_disk(_: TyCtxt<'_, 'tcx, 'tcx>, _: Self::Key) -> bool { false } @@ -75,820 +62,25 @@ pub(super) trait QueryDescription<'tcx>: QueryAccessors<'tcx> { impl<'tcx, M: QueryAccessors<'tcx, Key=DefId>> QueryDescription<'tcx> for M { default fn describe(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Cow<'static, str> { if !tcx.sess.verbose() { - format!("processing `{}`", tcx.item_path_str(def_id)).into() + format!("processing `{}`", tcx.def_path_str(def_id)).into() } else { let name = unsafe { ::std::intrinsics::type_name::() }; - format!("processing `{}` applied to `{:?}`", name, def_id).into() + format!("processing {:?} with query `{}`", def_id, name).into() } } } -impl<'tcx> QueryDescription<'tcx> for queries::normalize_projection_ty<'tcx> { - fn describe( - _tcx: TyCtxt<'_, '_, '_>, - goal: CanonicalProjectionGoal<'tcx>, - ) -> Cow<'static, str> { - format!("normalizing `{:?}`", goal).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::implied_outlives_bounds<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, goal: CanonicalTyGoal<'tcx>) -> Cow<'static, str> { - format!("computing implied outlives bounds for `{:?}`", goal).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::dropck_outlives<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, goal: CanonicalTyGoal<'tcx>) -> Cow<'static, str> { - format!("computing dropck types for `{:?}`", goal).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::normalize_ty_after_erasing_regions<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, goal: ParamEnvAnd<'tcx, Ty<'tcx>>) -> Cow<'static, str> { - format!("normalizing `{:?}`", goal).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::evaluate_obligation<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, goal: CanonicalPredicateGoal<'tcx>) -> Cow<'static, str> { - format!("evaluating trait selection obligation `{}`", goal.value.value).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::type_op_ascribe_user_type<'tcx> { - fn describe( - _tcx: TyCtxt<'_, '_, '_>, - goal: CanonicalTypeOpAscribeUserTypeGoal<'tcx>, - ) -> Cow<'static, str> { - format!("evaluating `type_op_ascribe_user_type` `{:?}`", goal).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::type_op_eq<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, goal: CanonicalTypeOpEqGoal<'tcx>) -> Cow<'static, str> { - format!("evaluating `type_op_eq` `{:?}`", goal).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::type_op_subtype<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, goal: CanonicalTypeOpSubtypeGoal<'tcx>) - -> Cow<'static, str> { - format!("evaluating `type_op_subtype` `{:?}`", goal).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::type_op_prove_predicate<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, goal: CanonicalTypeOpProvePredicateGoal<'tcx>) - -> Cow<'static, str> { - format!("evaluating `type_op_prove_predicate` `{:?}`", goal).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::type_op_normalize_ty<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, - goal: CanonicalTypeOpNormalizeGoal<'tcx, Ty<'tcx>>) -> Cow<'static, str> { - format!("normalizing `{:?}`", goal).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::type_op_normalize_predicate<'tcx> { - fn describe( - _tcx: TyCtxt<'_, '_, '_>, - goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::Predicate<'tcx>>, - ) -> Cow<'static, str> { - format!("normalizing `{:?}`", goal).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::type_op_normalize_poly_fn_sig<'tcx> { - fn describe( - _tcx: TyCtxt<'_, '_, '_>, - goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::PolyFnSig<'tcx>>, - ) -> Cow<'static, str> { - format!("normalizing `{:?}`", goal).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::type_op_normalize_fn_sig<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, - goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::FnSig<'tcx>>) -> Cow<'static, str> { - format!("normalizing `{:?}`", goal).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_copy_raw<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Cow<'static, str> { - format!("computing whether `{}` is `Copy`", env.value).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_sized_raw<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Cow<'static, str> { - format!("computing whether `{}` is `Sized`", env.value).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_freeze_raw<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Cow<'static, str> { - format!("computing whether `{}` is freeze", env.value).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::needs_drop_raw<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Cow<'static, str> { - format!("computing whether `{}` needs drop", env.value).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::layout_raw<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Cow<'static, str> { - format!("computing layout of `{}`", env.value).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::super_predicates_of<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Cow<'static, str> { - format!("computing the supertraits of `{}`", - tcx.item_path_str(def_id)).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::erase_regions_ty<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, ty: Ty<'tcx>) -> Cow<'static, str> { - format!("erasing regions from `{:?}`", ty).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::type_param_predicates<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, (_, def_id): (DefId, DefId)) -> Cow<'static, str> { - let id = tcx.hir().as_local_node_id(def_id).unwrap(); - format!("computing the bounds for type parameter `{}`", - tcx.hir().ty_param_name(id)).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::coherent_trait<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Cow<'static, str> { - format!("coherence checking all impls of trait `{}`", - tcx.item_path_str(def_id)).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::upstream_monomorphizations<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, k: CrateNum) -> Cow<'static, str> { - format!("collecting available upstream monomorphizations `{:?}`", k).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::crate_inherent_impls<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, k: CrateNum) -> Cow<'static, str> { - format!("all inherent impls defined in crate `{:?}`", k).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::crate_inherent_impls_overlap_check<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "check for overlap between inherent impls defined in this crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::crate_variances<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "computing the variances for items in this crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::inferred_outlives_crate<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "computing the inferred outlives predicates for items in this crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::mir_shims<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, def: ty::InstanceDef<'tcx>) -> Cow<'static, str> { - format!("generating MIR shim for `{}`", - tcx.item_path_str(def.def_id())).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::privacy_access_levels<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "privacy access levels".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::typeck_item_bodies<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "type-checking all item bodies".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::reachable_set<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "reachability".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::const_eval<'tcx> { - fn describe( - tcx: TyCtxt<'_, '_, '_>, - key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>, - ) -> Cow<'static, str> { - format!( - "const-evaluating + checking `{}`", - tcx.item_path_str(key.value.instance.def.def_id()), - ).into() - } - - #[inline] - fn cache_on_disk(_key: Self::Key) -> bool { - true - } - - #[inline] - fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: SerializedDepNodeIndex) - -> Option { - tcx.queries.on_disk_cache.try_load_query_result(tcx, id).map(Ok) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::const_eval_raw<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) - -> Cow<'static, str> - { - format!("const-evaluating `{}`", tcx.item_path_str(key.value.instance.def.def_id())).into() - } - - #[inline] - fn cache_on_disk(_key: Self::Key) -> bool { - true - } - - #[inline] - fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: SerializedDepNodeIndex) - -> Option { - tcx.queries.on_disk_cache.try_load_query_result(tcx, id).map(Ok) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::mir_keys<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "getting a list of all mir_keys".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::symbol_name<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, instance: ty::Instance<'tcx>) -> Cow<'static, str> { - format!("computing the symbol for `{}`", instance).into() - } - - #[inline] - fn cache_on_disk(_: Self::Key) -> bool { - true - } - - #[inline] - fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: SerializedDepNodeIndex) - -> Option { - tcx.queries.on_disk_cache.try_load_query_result(tcx, id) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::describe_def<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: DefId) -> Cow<'static, str> { - bug!("describe_def") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::def_span<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: DefId) -> Cow<'static, str> { - bug!("def_span") - } -} - - -impl<'tcx> QueryDescription<'tcx> for queries::lookup_stability<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: DefId) -> Cow<'static, str> { - bug!("stability") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::lookup_deprecation_entry<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: DefId) -> Cow<'static, str> { - bug!("deprecation") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::item_attrs<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: DefId) -> Cow<'static, str> { - bug!("item_attrs") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_reachable_non_generic<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: DefId) -> Cow<'static, str> { - bug!("is_reachable_non_generic") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::fn_arg_names<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: DefId) -> Cow<'static, str> { - bug!("fn_arg_names") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::impl_parent<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: DefId) -> Cow<'static, str> { - bug!("impl_parent") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::trait_of_item<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: DefId) -> Cow<'static, str> { - bug!("trait_of_item") - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::const_is_rvalue_promotable_to_static<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Cow<'static, str> { - format!("const checking if rvalue is promotable to static `{}`", - tcx.item_path_str(def_id)).into() - } - - #[inline] - fn cache_on_disk(_: Self::Key) -> bool { - true - } - - #[inline] - fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: SerializedDepNodeIndex) - -> Option { - tcx.queries.on_disk_cache.try_load_query_result(tcx, id) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::rvalue_promotable_map<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Cow<'static, str> { - format!("checking which parts of `{}` are promotable to static", - tcx.item_path_str(def_id)).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_mir_available<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Cow<'static, str> { - format!("checking if item is mir available: `{}`", - tcx.item_path_str(def_id)).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::codegen_fulfill_obligation<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, - key: (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)) -> Cow<'static, str> { - format!("checking if `{}` fulfills its obligations", tcx.item_path_str(key.1.def_id())) - .into() - } - - #[inline] - fn cache_on_disk(_: Self::Key) -> bool { - true - } - - #[inline] - fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: SerializedDepNodeIndex) - -> Option { - tcx.queries.on_disk_cache.try_load_query_result(tcx, id) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::trait_impls_of<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Cow<'static, str> { - format!("trait impls of `{}`", tcx.item_path_str(def_id)).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_object_safe<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Cow<'static, str> { - format!("determine object safety of trait `{}`", tcx.item_path_str(def_id)).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_const_fn_raw<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Cow<'static, str> { - format!("checking if item is const fn: `{}`", tcx.item_path_str(def_id)).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::dylib_dependency_formats<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "dylib dependency formats of crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_panic_runtime<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "checking if the crate is_panic_runtime".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_compiler_builtins<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "checking if the crate is_compiler_builtins".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::has_global_allocator<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "checking if the crate has_global_allocator".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::has_panic_handler<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "checking if the crate has_panic_handler".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::extern_crate<'tcx> { - fn describe(_: TyCtxt<'_, '_, '_>, _: DefId) -> Cow<'static, str> { - "getting crate's ExternCrateData".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::lint_levels<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "computing the lint levels for items in this crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::specializes<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: (DefId, DefId)) -> Cow<'static, str> { - "computing whether impls specialize one another".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::in_scope_traits_map<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: DefIndex) -> Cow<'static, str> { - "traits in scope at a block".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_no_builtins<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "test whether a crate has #![no_builtins]".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::panic_strategy<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "query a crate's configured panic strategy".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_profiler_runtime<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "query a crate is #![profiler_runtime]".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_sanitizer_runtime<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "query a crate is #![sanitizer_runtime]".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::reachable_non_generics<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "looking up the exported symbols of a crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::native_libraries<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "looking up the native libraries of a linked crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::foreign_modules<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "looking up the foreign modules of a linked crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::plugin_registrar_fn<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "looking up the plugin registrar for a crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::proc_macro_decls_static<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "looking up the derive registrar for a crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::crate_disambiguator<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "looking up the disambiguator a crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::crate_hash<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "looking up the hash a crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::original_crate_name<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "looking up the original name a crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::extra_filename<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "looking up the extra filename for a crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::implementations_of_trait<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: (CrateNum, DefId)) -> Cow<'static, str> { - "looking up implementations of a trait in a crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::all_trait_implementations<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "looking up all (?) trait implementations".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::link_args<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "looking up link arguments for a crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::resolve_lifetimes<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "resolving lifetimes".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::named_region_map<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: DefIndex) -> Cow<'static, str> { - "looking up a named region".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::is_late_bound_map<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: DefIndex) -> Cow<'static, str> { - "testing if a region is late bound".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::object_lifetime_defaults_map<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: DefIndex) -> Cow<'static, str> { - "looking up lifetime defaults for a region".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::dep_kind<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "fetching what a dependency looks like".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::crate_name<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "fetching what a crate is named".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::get_lib_features<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "calculating the lib features map".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::defined_lib_features<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "calculating the lib features defined in a crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::get_lang_items<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "calculating the lang items map".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::defined_lang_items<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "calculating the lang items defined in a crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::missing_lang_items<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "calculating the missing lang items in a crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::visible_parent_map<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "calculating the visible parent map".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::missing_extern_crate_item<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "seeing if we're missing an `extern crate` item for this crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::used_crate_source<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "looking at the source for a crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::postorder_cnums<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "generating a postorder list of CrateNums".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::maybe_unused_extern_crates<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "looking up all possibly unused extern crates".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::stability_index<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "calculating the stability index for the local crate".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::all_traits<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "fetching all foreign and local traits".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::all_crate_nums<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "fetching all foreign CrateNum instances".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::exported_symbols<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "exported_symbols".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::collect_and_partition_mono_items<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "collect_and_partition_mono_items".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::codegen_unit<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: InternedString) -> Cow<'static, str> { - "codegen_unit".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::output_filenames<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "output_filenames".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::vtable_methods<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, key: ty::PolyTraitRef<'tcx> ) -> Cow<'static, str> { - format!("finding all methods for trait {}", tcx.item_path_str(key.def_id())).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::features_query<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "looking up enabled feature gates".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::typeck_tables_of<'tcx> { - #[inline] - fn cache_on_disk(def_id: Self::Key) -> bool { - def_id.is_local() - } - - fn try_load_from_disk(tcx: TyCtxt<'_, 'tcx, 'tcx>, - id: SerializedDepNodeIndex) - -> Option { - let typeck_tables: Option> = tcx - .queries.on_disk_cache - .try_load_query_result(tcx, id); - - typeck_tables.map(|tables| tcx.alloc_tables(tables)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::optimized_mir<'tcx> { - #[inline] - fn cache_on_disk(def_id: Self::Key) -> bool { - def_id.is_local() - } - - fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: SerializedDepNodeIndex) - -> Option { - let mir: Option<::mir::Mir<'tcx>> = tcx.queries.on_disk_cache - .try_load_query_result(tcx, id); - mir.map(|x| tcx.alloc_mir(x)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::substitute_normalize_and_test_predicates<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, key: (DefId, &'tcx Substs<'tcx>)) -> Cow<'static, str> { - format!("testing substituted normalized predicates:`{}`", tcx.item_path_str(key.0)).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::target_features_whitelist<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "looking up the whitelist of target features".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::instance_def_size_estimate<'tcx> { - fn describe(tcx: TyCtxt<'_, '_, '_>, def: ty::InstanceDef<'tcx>) -> Cow<'static, str> { - format!("estimating size for `{}`", tcx.item_path_str(def.def_id())).into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::generics_of<'tcx> { - #[inline] - fn cache_on_disk(def_id: Self::Key) -> bool { - def_id.is_local() - } - - fn try_load_from_disk<'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: SerializedDepNodeIndex) - -> Option { - let generics: Option = tcx.queries.on_disk_cache - .try_load_query_result(tcx, id); - generics.map(|x| tcx.alloc_generics(x)) - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::program_clauses_for<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: DefId) -> Cow<'static, str> { - "generating chalk-style clauses".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::program_clauses_for_env<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: traits::Environment<'tcx>) -> Cow<'static, str> { - "generating chalk-style clauses for environment".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::environment<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: DefId) -> Cow<'static, str> { - "return a chalk-style environment".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::wasm_import_module_map<'tcx> { - fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "wasm import module map".into() - } -} - -impl<'tcx> QueryDescription<'tcx> for queries::dllimport_foreign_items<'tcx> { +impl<'tcx> QueryDescription<'tcx> for queries::analysis<'tcx> { fn describe(_tcx: TyCtxt<'_, '_, '_>, _: CrateNum) -> Cow<'static, str> { - "wasm import module map".into() + "running analysis passes on this crate".into() } } macro_rules! impl_disk_cacheable_query( - ($query_name:ident, |$key:tt| $cond:expr) => { + ($query_name:ident, |$tcx:tt, $key:tt| $cond:expr) => { impl<'tcx> QueryDescription<'tcx> for queries::$query_name<'tcx> { #[inline] - fn cache_on_disk($key: Self::Key) -> bool { + fn cache_on_disk($tcx: TyCtxt<'_, 'tcx, 'tcx>, $key: Self::Key) -> bool { $cond } @@ -902,14 +94,14 @@ macro_rules! impl_disk_cacheable_query( } ); -impl_disk_cacheable_query!(unsafety_check_result, |def_id| def_id.is_local()); -impl_disk_cacheable_query!(borrowck, |def_id| def_id.is_local()); -impl_disk_cacheable_query!(mir_borrowck, |def_id| def_id.is_local()); -impl_disk_cacheable_query!(mir_const_qualif, |def_id| def_id.is_local()); -impl_disk_cacheable_query!(check_match, |def_id| def_id.is_local()); -impl_disk_cacheable_query!(def_symbol_name, |_| true); -impl_disk_cacheable_query!(type_of, |def_id| def_id.is_local()); -impl_disk_cacheable_query!(predicates_of, |def_id| def_id.is_local()); -impl_disk_cacheable_query!(used_trait_imports, |def_id| def_id.is_local()); -impl_disk_cacheable_query!(codegen_fn_attrs, |_| true); -impl_disk_cacheable_query!(specialization_graph_of, |_| true); +impl_disk_cacheable_query!(mir_borrowck, |tcx, def_id| { + def_id.is_local() && tcx.is_closure(def_id) +}); + +impl_disk_cacheable_query!(unsafety_check_result, |_, def_id| def_id.is_local()); +impl_disk_cacheable_query!(borrowck, |_, def_id| def_id.is_local()); +impl_disk_cacheable_query!(check_match, |_, def_id| def_id.is_local()); +impl_disk_cacheable_query!(predicates_of, |_, def_id| def_id.is_local()); +impl_disk_cacheable_query!(used_trait_imports, |_, def_id| def_id.is_local()); +impl_disk_cacheable_query!(codegen_fn_attrs, |_, _| true); +impl_disk_cacheable_query!(specialization_graph_of, |_, _| true); diff --git a/src/librustc/ty/query/job.rs b/src/librustc/ty/query/job.rs index 1439e41bb31fd..8e68c9fa30431 100644 --- a/src/librustc/ty/query/job.rs +++ b/src/librustc/ty/query/job.rs @@ -1,31 +1,28 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(warnings)] use std::mem; +use std::process; +use std::{fmt, ptr}; + use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::sync::{Lock, LockGuard, Lrc, Weak}; use rustc_data_structures::OnDrop; +use rustc_data_structures::jobserver; use syntax_pos::Span; -use ty::tls; -use ty::query::Query; -use ty::query::plumbing::CycleError; -use ty::context::TyCtxt; -use errors::Diagnostic; -use std::process; -use std::{fmt, ptr}; -#[cfg(parallel_queries)] +use crate::ty::tls; +use crate::ty::query::Query; +use crate::ty::query::plumbing::CycleError; +#[cfg(not(parallel_compiler))] +use crate::ty::query::{ + plumbing::TryGetJob, + config::QueryDescription, +}; +use crate::ty::context::TyCtxt; + +#[cfg(parallel_compiler)] use { - rayon_core, + rustc_rayon_core as rayon_core, parking_lot::{Mutex, Condvar}, std::sync::atomic::Ordering, std::thread, @@ -35,92 +32,79 @@ use { rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, HashStable}, }; -/// Indicates the state of a query for a given key in a query map +/// Indicates the state of a query for a given key in a query map. pub(super) enum QueryResult<'tcx> { - /// An already executing query. The query job can be used to await for its completion + /// An already executing query. The query job can be used to await for its completion. Started(Lrc>), - /// The query panicked. Queries trying to wait on this will raise a fatal error / silently panic + /// The query panicked. Queries trying to wait on this will raise a fatal error or + /// silently panic. Poisoned, } -/// A span and a query key +/// Represents a span and a query key. #[derive(Clone, Debug)] pub struct QueryInfo<'tcx> { - /// The span for a reason this query was required + /// The span corresponding to the reason for which this query was required. pub span: Span, pub query: Query<'tcx>, } -/// A object representing an active query job. +/// Representss an object representing an active query job. pub struct QueryJob<'tcx> { pub info: QueryInfo<'tcx>, /// The parent query job which created this job and is implicitly waiting on it. pub parent: Option>>, - /// Diagnostic messages which are emitted while the query executes - pub diagnostics: Lock>, - - /// The latch which is used to wait on this job - #[cfg(parallel_queries)] + /// The latch that is used to wait on this job. + #[cfg(parallel_compiler)] latch: QueryLatch<'tcx>, } impl<'tcx> QueryJob<'tcx> { - /// Creates a new query job + /// Creates a new query job. pub fn new(info: QueryInfo<'tcx>, parent: Option>>) -> Self { QueryJob { - diagnostics: Lock::new(Vec::new()), info, parent, - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] latch: QueryLatch::new(), } } /// Awaits for the query job to complete. - /// - /// For single threaded rustc there's no concurrent jobs running, so if we are waiting for any - /// query that means that there is a query cycle, thus this always running a cycle error. - pub(super) fn await<'lcx>( + #[cfg(parallel_compiler)] + pub(super) fn r#await<'lcx>( &self, tcx: TyCtxt<'_, 'tcx, 'lcx>, span: Span, ) -> Result<(), CycleError<'tcx>> { - #[cfg(not(parallel_queries))] - { - self.find_cycle_in_stack(tcx, span) - } - - #[cfg(parallel_queries)] - { - tls::with_related_context(tcx, move |icx| { - let mut waiter = Lrc::new(QueryWaiter { - query: icx.query.clone(), - span, - cycle: Lock::new(None), - condvar: Condvar::new(), - }); - self.latch.await(&waiter); - // FIXME: Get rid of this lock. We have ownership of the QueryWaiter - // although another thread may still have a Lrc reference so we cannot - // use Lrc::get_mut - let mut cycle = waiter.cycle.lock(); - match cycle.take() { - None => Ok(()), - Some(cycle) => Err(cycle) - } - }) - } + tls::with_related_context(tcx, move |icx| { + let mut waiter = Lrc::new(QueryWaiter { + query: icx.query.clone(), + span, + cycle: Lock::new(None), + condvar: Condvar::new(), + }); + self.latch.r#await(&waiter); + // FIXME: Get rid of this lock. We have ownership of the QueryWaiter + // although another thread may still have a Lrc reference so we cannot + // use Lrc::get_mut + let mut cycle = waiter.cycle.lock(); + match cycle.take() { + None => Ok(()), + Some(cycle) => Err(cycle) + } + }) } - #[cfg(not(parallel_queries))] - fn find_cycle_in_stack<'lcx>( + #[cfg(not(parallel_compiler))] + pub(super) fn find_cycle_in_stack<'lcx>( &self, tcx: TyCtxt<'_, 'tcx, 'lcx>, span: Span, - ) -> Result<(), CycleError<'tcx>> { + ) -> CycleError<'tcx> { // Get the current executing query (waiter) and find the waitee amongst its parents let mut current_job = tls::with_related_context(tcx, |icx| icx.query.clone()); let mut cycle = Vec::new(); @@ -140,7 +124,7 @@ impl<'tcx> QueryJob<'tcx> { let usage = job.parent.as_ref().map(|parent| { (job.info.span, parent.info.query.clone()) }); - return Err(CycleError { usage, cycle }); + return CycleError { usage, cycle }; } current_job = job.parent.clone(); @@ -154,7 +138,7 @@ impl<'tcx> QueryJob<'tcx> { /// This does nothing for single threaded rustc, /// as there are no concurrent jobs which could be waiting on us pub fn signal_complete(&self) { - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] self.latch.set(); } @@ -163,7 +147,7 @@ impl<'tcx> QueryJob<'tcx> { } } -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] struct QueryWaiter<'tcx> { query: Option>>, condvar: Condvar, @@ -171,7 +155,7 @@ struct QueryWaiter<'tcx> { cycle: Lock>>, } -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] impl<'tcx> QueryWaiter<'tcx> { fn notify(&self, registry: &rayon_core::Registry) { rayon_core::mark_unblocked(registry); @@ -179,18 +163,18 @@ impl<'tcx> QueryWaiter<'tcx> { } } -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] struct QueryLatchInfo<'tcx> { complete: bool, waiters: Vec>>, } -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] struct QueryLatch<'tcx> { info: Mutex>, } -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] impl<'tcx> QueryLatch<'tcx> { fn new() -> Self { QueryLatch { @@ -202,7 +186,7 @@ impl<'tcx> QueryLatch<'tcx> { } /// Awaits the caller on this latch by blocking the current thread. - fn await(&self, waiter: &Lrc>) { + fn r#await(&self, waiter: &Lrc>) { let mut info = self.info.lock(); if !info.complete { // We push the waiter on to the `waiters` list. It can be accessed inside @@ -215,7 +199,11 @@ impl<'tcx> QueryLatch<'tcx> { // we have to be in the `wait` call. This is ensured by the deadlock handler // getting the self.info lock. rayon_core::mark_blocked(); + jobserver::release_thread(); waiter.condvar.wait(&mut info); + // Release the lock before we potentially block in `acquire_thread` + mem::drop(info); + jobserver::acquire_thread(); } } @@ -230,7 +218,7 @@ impl<'tcx> QueryLatch<'tcx> { } } - /// Remove a single waiter from the list of waiters. + /// Removes a single waiter from the list of waiters. /// This is used to break query cycles. fn extract_waiter( &self, @@ -244,7 +232,7 @@ impl<'tcx> QueryLatch<'tcx> { } /// A resumable waiter of a query. The usize is the index into waiters in the query's latch -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] type Waiter<'tcx> = (Lrc>, usize); /// Visits all the non-resumable and resumable waiters of a query. @@ -256,7 +244,7 @@ type Waiter<'tcx> = (Lrc>, usize); /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the /// required information to resume the waiter. /// If all `visit` calls returns None, this function also returns None. -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] fn visit_waiters<'tcx, F>(query: Lrc>, mut visit: F) -> Option>> where F: FnMut(Span, Lrc>) -> Option>> @@ -284,18 +272,18 @@ where /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP. /// If a cycle is detected, this initial value is replaced with the span causing /// the cycle. -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] fn cycle_check<'tcx>(query: Lrc>, span: Span, stack: &mut Vec<(Span, Lrc>)>, visited: &mut FxHashSet<*const QueryJob<'tcx>> ) -> Option>> { - if visited.contains(&query.as_ptr()) { + if !visited.insert(query.as_ptr()) { return if let Some(p) = stack.iter().position(|q| q.1.as_ptr() == query.as_ptr()) { // We detected a query cycle, fix up the initial span and return Some // Remove previous stack entries - stack.splice(0..p, iter::empty()); + stack.drain(0..p); // Replace the span for the first query with the cycle cause stack[0].0 = span; Some(None) @@ -304,8 +292,7 @@ fn cycle_check<'tcx>(query: Lrc>, } } - // Mark this query is visited and add it to the stack - visited.insert(query.as_ptr()); + // Query marked as visited is added it to the stack stack.push((span, query.clone())); // Visit all the waiters @@ -324,13 +311,13 @@ fn cycle_check<'tcx>(query: Lrc>, /// Finds out if there's a path to the compiler root (aka. code which isn't in a query) /// from `query` without going through any of the queries in `visited`. /// This is achieved with a depth first search. -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] fn connected_to_root<'tcx>( query: Lrc>, visited: &mut FxHashSet<*const QueryJob<'tcx>> ) -> bool { // We already visited this or we're deliberately ignoring it - if visited.contains(&query.as_ptr()) { + if !visited.insert(query.as_ptr()) { return false; } @@ -339,8 +326,6 @@ fn connected_to_root<'tcx>( return true; } - visited.insert(query.as_ptr()); - visit_waiters(query, |_, successor| { if connected_to_root(successor, visited) { Some(None) @@ -351,7 +336,7 @@ fn connected_to_root<'tcx>( } // Deterministically pick an query from a list -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, Lrc>)>( tcx: TyCtxt<'_, 'tcx, '_>, queries: &'a [T], @@ -377,7 +362,7 @@ fn pick_query<'a, 'tcx, T, F: Fn(&T) -> (Span, Lrc>)>( /// the function return true. /// If a cycle was not found, the starting query is removed from `jobs` and /// the function returns false. -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] fn remove_cycle<'tcx>( jobs: &mut Vec>>, wakelist: &mut Vec>>, @@ -390,11 +375,9 @@ fn remove_cycle<'tcx>( DUMMY_SP, &mut stack, &mut visited) { - // Reverse the stack so earlier entries require later entries - stack.reverse(); - - // The stack is a vector of pairs of spans and queries - let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().unzip(); + // The stack is a vector of pairs of spans and queries; reverse it so that + // the earlier entries require later entries + let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip(); // Shift the spans so that queries are matched with the span for their waitee spans.rotate_right(1); @@ -411,7 +394,7 @@ fn remove_cycle<'tcx>( // Find the queries in the cycle which are // connected to queries outside the cycle - let entry_points: Vec<_> = stack.iter().filter_map(|(span, query)| { + let entry_points = stack.iter().filter_map(|(span, query)| { if query.parent.is_none() { // This query is connected to the root (it has no query parent) Some((*span, query.clone(), None)) @@ -436,10 +419,7 @@ fn remove_cycle<'tcx>( Some((*span, query.clone(), Some(waiter))) } } - }).collect(); - - let entry_points: Vec<(Span, Lrc>, Option<(Span, Lrc>)>)> - = entry_points; + }).collect::>, Option<(Span, Lrc>)>)>>(); // Deterministically pick an entry point let (_, entry_point, usage) = pick_query(tcx, &entry_points, |e| (e.0, e.1.clone())); @@ -485,7 +465,7 @@ fn remove_cycle<'tcx>( /// Creates a new thread and forwards information in thread locals to it. /// The new thread runs the deadlock handler. /// Must only be called when a deadlock is about to happen. -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] pub unsafe fn handle_deadlock() { use syntax; use syntax_pos; @@ -524,7 +504,7 @@ pub unsafe fn handle_deadlock() { /// uses a query latch and then resuming that waiter. /// There may be multiple cycles involved in a deadlock, so this searches /// all active queries for cycles before finally resuming all the waiters at once. -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] fn deadlock(tcx: TyCtxt<'_, '_, '_>, registry: &rayon_core::Registry) { let on_panic = OnDrop(|| { eprintln!("deadlock handler panicked, aborting process"); diff --git a/src/librustc/ty/query/keys.rs b/src/librustc/ty/query/keys.rs index f2d7a6792b563..d353da801778d 100644 --- a/src/librustc/ty/query/keys.rs +++ b/src/librustc/ty/query/keys.rs @@ -1,22 +1,12 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Defines the set of legal keys that can be used in queries. -use infer::canonical::Canonical; -use hir::def_id::{CrateNum, DefId, LOCAL_CRATE, DefIndex}; -use traits; -use ty::{self, Ty, TyCtxt}; -use ty::subst::Substs; -use ty::fast_reject::SimplifiedType; -use mir; +use crate::infer::canonical::Canonical; +use crate::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, DefIndex}; +use crate::traits; +use crate::ty::{self, Ty, TyCtxt}; +use crate::ty::subst::SubstsRef; +use crate::ty::fast_reject::SimplifiedType; +use crate::mir; use std::fmt::Debug; use std::hash::Hash; @@ -119,7 +109,7 @@ impl Key for (DefId, SimplifiedType) { } } -impl<'tcx> Key for (DefId, &'tcx Substs<'tcx>) { +impl<'tcx> Key for (DefId, SubstsRef<'tcx>) { fn query_crate(&self) -> CrateNum { self.0.krate } @@ -146,7 +136,7 @@ impl<'tcx> Key for ty::PolyTraitRef<'tcx>{ } } -impl<'tcx> Key for &'tcx ty::Const<'tcx> { +impl<'tcx> Key for ty::Const<'tcx> { fn query_crate(&self) -> CrateNum { LOCAL_CRATE } diff --git a/src/librustc/ty/query/mod.rs b/src/librustc/ty/query/mod.rs index 5cd06fb8a52c0..c4bc35fff66b8 100644 --- a/src/librustc/ty/query/mod.rs +++ b/src/librustc/ty/query/mod.rs @@ -1,67 +1,61 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use dep_graph::{DepConstructor, DepNode}; -use errors::DiagnosticBuilder; -use hir::def_id::{CrateNum, DefId, DefIndex}; -use hir::def::{Def, Export}; -use hir::{self, TraitCandidate, ItemLocalId, CodegenFnAttrs}; -use rustc_data_structures::svh::Svh; -use infer::canonical::{self, Canonical}; -use lint; -use middle::borrowck::BorrowCheckResult; -use middle::cstore::{ExternCrate, LinkagePreference, NativeLibrary, ForeignModule}; -use middle::cstore::{NativeLibraryKind, DepKind, CrateSource}; -use middle::privacy::AccessLevels; -use middle::reachable::ReachableSet; -use middle::region; -use middle::resolve_lifetime::{ResolveLifetimes, Region, ObjectLifetimeDefault}; -use middle::stability::{self, DeprecationEntry}; -use middle::lib_features::LibFeatures; -use middle::lang_items::{LanguageItems, LangItem}; -use middle::exported_symbols::{SymbolExportLevel, ExportedSymbol}; -use mir::interpret::{ConstEvalRawResult, ConstEvalResult}; -use mir::mono::CodegenUnit; -use mir; -use mir::interpret::GlobalId; -use session::{CompileResult, CrateDisambiguator}; -use session::config::OutputFilenames; -use traits::{self, Vtable}; -use traits::query::{ +use crate::dep_graph::{self, DepNode}; +use crate::hir::def_id::{CrateNum, DefId, DefIndex}; +use crate::hir::def::{Def, Export}; +use crate::hir::{self, TraitCandidate, ItemLocalId, CodegenFnAttrs}; +use crate::infer::canonical::{self, Canonical}; +use crate::lint; +use crate::middle::borrowck::BorrowCheckResult; +use crate::middle::cstore::{ExternCrate, LinkagePreference, NativeLibrary, ForeignModule}; +use crate::middle::cstore::{NativeLibraryKind, DepKind, CrateSource}; +use crate::middle::privacy::AccessLevels; +use crate::middle::reachable::ReachableSet; +use crate::middle::region; +use crate::middle::resolve_lifetime::{ResolveLifetimes, Region, ObjectLifetimeDefault}; +use crate::middle::stability::{self, DeprecationEntry}; +use crate::middle::lib_features::LibFeatures; +use crate::middle::lang_items::{LanguageItems, LangItem}; +use crate::middle::exported_symbols::{SymbolExportLevel, ExportedSymbol}; +use crate::mir::interpret::{ConstEvalRawResult, ConstEvalResult}; +use crate::mir::mono::CodegenUnit; +use crate::mir; +use crate::mir::interpret::GlobalId; +use crate::session::CrateDisambiguator; +use crate::session::config::{EntryFnType, OutputFilenames, OptLevel}; +use crate::traits::{self, Vtable}; +use crate::traits::query::{ CanonicalPredicateGoal, CanonicalProjectionGoal, - CanonicalTyGoal, CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, - CanonicalTypeOpSubtypeGoal, CanonicalTypeOpProvePredicateGoal, + CanonicalTyGoal, CanonicalTypeOpAscribeUserTypeGoal, + CanonicalTypeOpEqGoal, CanonicalTypeOpSubtypeGoal, CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpNormalizeGoal, NoSolution, }; -use traits::query::dropck_outlives::{DtorckConstraint, DropckOutlivesResult}; -use traits::query::normalize::NormalizationResult; -use traits::query::outlives_bounds::OutlivesBound; -use traits::specialization_graph; -use traits::Clauses; -use ty::{self, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt}; -use ty::steal::Steal; -use ty::subst::Substs; -use util::nodemap::{DefIdSet, DefIdMap, ItemLocalSet}; -use util::common::{ErrorReported}; -use util::profiling::ProfileCategory::*; +use crate::traits::query::method_autoderef::MethodAutoderefStepsResult; +use crate::traits::query::dropck_outlives::{DtorckConstraint, DropckOutlivesResult}; +use crate::traits::query::normalize::NormalizationResult; +use crate::traits::query::outlives_bounds::OutlivesBound; +use crate::traits::specialization_graph; +use crate::traits::Clauses; +use crate::ty::{self, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt, AdtSizedConstraint}; +use crate::ty::steal::Steal; +use crate::ty::util::NeedsDrop; +use crate::ty::subst::SubstsRef; +use crate::util::nodemap::{DefIdSet, DefIdMap, ItemLocalSet}; +use crate::util::common::{ErrorReported}; +use crate::util::profiling::ProfileCategory::*; +use crate::session::Session; +use rustc_data_structures::svh::Svh; use rustc_data_structures::bit_set::BitSet; use rustc_data_structures::indexed_vec::IndexVec; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::stable_hasher::StableVec; use rustc_data_structures::sync::Lrc; +use rustc_data_structures::fingerprint::Fingerprint; use rustc_target::spec::PanicStrategy; use std::borrow::Cow; use std::ops::Deref; use std::sync::Arc; +use std::intrinsics::type_name; use syntax_pos::{Span, DUMMY_SP}; use syntax_pos::symbol::InternedString; use syntax::attr; @@ -76,7 +70,7 @@ pub use self::plumbing::{force_from_dep_node, CycleError}; mod job; pub use self::job::{QueryJob, QueryInfo}; -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] pub use self::job::handle_deadlock; mod keys; @@ -86,13 +80,14 @@ mod values; use self::values::Value; mod config; +pub(crate) use self::config::QueryDescription; pub use self::config::QueryConfig; -use self::config::{QueryAccessors, QueryDescription}; +use self::config::QueryAccessors; mod on_disk_cache; pub use self::on_disk_cache::OnDiskCache; -// Each of these quries corresponds to a function pointer field in the +// Each of these queries corresponds to a function pointer field in the // `Providers` struct for requesting a value of that type, and a method // on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way // which memoizes and does dep-graph tracking, wrapping around the actual @@ -103,796 +98,11 @@ pub use self::on_disk_cache::OnDiskCache; // (error) value if the query resulted in a query cycle. // Queries marked with `fatal_cycle` do not need the latter implementation, // as they will raise an fatal error on query cycles instead. -define_queries! { <'tcx> - Other { - /// Records the type of every item. - [] fn type_of: TypeOfItem(DefId) -> Ty<'tcx>, - - /// Maps from the def-id of an item (trait/struct/enum/fn) to its - /// associated generics. - [] fn generics_of: GenericsOfItem(DefId) -> &'tcx ty::Generics, - - /// Maps from the def-id of an item (trait/struct/enum/fn) to the - /// predicates (where clauses) that must be proven true in order - /// to reference it. This is almost always the "predicates query" - /// that you want. - /// - /// `predicates_of` builds on `predicates_defined_on` -- in fact, - /// it is almost always the same as that query, except for the - /// case of traits. For traits, `predicates_of` contains - /// an additional `Self: Trait<...>` predicate that users don't - /// actually write. This reflects the fact that to invoke the - /// trait (e.g., via `Default::default`) you must supply types - /// that actually implement the trait. (However, this extra - /// predicate gets in the way of some checks, which are intended - /// to operate over only the actual where-clauses written by the - /// user.) - [] fn predicates_of: PredicatesOfItem(DefId) -> Lrc>, - - /// Maps from the def-id of an item (trait/struct/enum/fn) to the - /// predicates (where clauses) directly defined on it. This is - /// equal to the `explicit_predicates_of` predicates plus the - /// `inferred_outlives_of` predicates. - [] fn predicates_defined_on: PredicatesDefinedOnItem(DefId) - -> Lrc>, - - /// Returns the predicates written explicit by the user. - [] fn explicit_predicates_of: ExplicitPredicatesOfItem(DefId) - -> Lrc>, - - /// Returns the inferred outlives predicates (e.g., for `struct - /// Foo<'a, T> { x: &'a T }`, this would return `T: 'a`). - [] fn inferred_outlives_of: InferredOutlivesOf(DefId) -> Lrc>>, - - /// Maps from the def-id of a trait to the list of - /// super-predicates. This is a subset of the full list of - /// predicates. We store these in a separate map because we must - /// evaluate them even during type conversion, often before the - /// full predicates are available (note that supertraits have - /// additional acyclicity requirements). - [] fn super_predicates_of: SuperPredicatesOfItem(DefId) -> Lrc>, - - /// To avoid cycles within the predicates of a single item we compute - /// per-type-parameter predicates for resolving `T::AssocTy`. - [] fn type_param_predicates: type_param_predicates((DefId, DefId)) - -> Lrc>, - - [] fn trait_def: TraitDefOfItem(DefId) -> &'tcx ty::TraitDef, - [] fn adt_def: AdtDefOfItem(DefId) -> &'tcx ty::AdtDef, - [] fn adt_destructor: AdtDestructor(DefId) -> Option, - [] fn adt_sized_constraint: SizedConstraint(DefId) -> &'tcx [Ty<'tcx>], - [] fn adt_dtorck_constraint: DtorckConstraint( - DefId - ) -> Result, NoSolution>, - - /// True if this is a const fn, use the `is_const_fn` to know whether your crate actually - /// sees it as const fn (e.g., the const-fn-ness might be unstable and you might not have - /// the feature gate active) - /// - /// **Do not call this function manually.** It is only meant to cache the base data for the - /// `is_const_fn` function. - [] fn is_const_fn_raw: IsConstFn(DefId) -> bool, - - - /// Returns true if calls to the function may be promoted - /// - /// This is either because the function is e.g., a tuple-struct or tuple-variant - /// constructor, or because it has the `#[rustc_promotable]` attribute. The attribute should - /// be removed in the future in favour of some form of check which figures out whether the - /// function does not inspect the bits of any of its arguments (so is essentially just a - /// constructor function). - [] fn is_promotable_const_fn: IsPromotableConstFn(DefId) -> bool, - - /// True if this is a foreign item (i.e., linked via `extern { ... }`). - [] fn is_foreign_item: IsForeignItem(DefId) -> bool, - - /// Get a map with the variance of every item; use `item_variance` - /// instead. - [] fn crate_variances: crate_variances(CrateNum) -> Lrc, - - /// Maps from def-id of a type or region parameter to its - /// (inferred) variance. - [] fn variances_of: ItemVariances(DefId) -> Lrc>, - }, - - TypeChecking { - /// Maps from def-id of a type to its (inferred) outlives. - [] fn inferred_outlives_crate: InferredOutlivesCrate(CrateNum) - -> Lrc>, - }, +rustc_query_append! { [define_queries!][ <'tcx> Other { - /// Maps from an impl/trait def-id to a list of the def-ids of its items - [] fn associated_item_def_ids: AssociatedItemDefIds(DefId) -> Lrc>, - - /// Maps from a trait item to the trait item "descriptor" - [] fn associated_item: AssociatedItems(DefId) -> ty::AssociatedItem, - - [] fn impl_trait_ref: ImplTraitRef(DefId) -> Option>, - [] fn impl_polarity: ImplPolarity(DefId) -> hir::ImplPolarity, - }, - - TypeChecking { - /// Maps a DefId of a type to a list of its inherent impls. - /// Contains implementations of methods that are inherent to a type. - /// Methods in these implementations don't need to be exported. - [] fn inherent_impls: InherentImpls(DefId) -> Lrc>, - }, - - Codegen { - /// Set of all the def-ids in this crate that have MIR associated with - /// them. This includes all the body owners, but also things like struct - /// constructors. - [] fn mir_keys: mir_keys(CrateNum) -> Lrc, - - /// Maps DefId's that have an associated Mir to the result - /// of the MIR qualify_consts pass. The actual meaning of - /// the value isn't known except to the pass itself. - [] fn mir_const_qualif: MirConstQualif(DefId) -> (u8, Lrc>), - - /// Fetch the MIR for a given def-id right after it's built - this includes - /// unreachable code. - [] fn mir_built: MirBuilt(DefId) -> &'tcx Steal>, - - /// Fetch the MIR for a given def-id up till the point where it is - /// ready for const evaluation. - /// - /// See the README for the `mir` module for details. - [] fn mir_const: MirConst(DefId) -> &'tcx Steal>, - - [] fn mir_validated: MirValidated(DefId) -> &'tcx Steal>, - - /// MIR after our optimization passes have run. This is MIR that is ready - /// for codegen. This is also the only query that can fetch non-local MIR, at present. - [] fn optimized_mir: MirOptimized(DefId) -> &'tcx mir::Mir<'tcx>, - }, - - TypeChecking { - /// The result of unsafety-checking this def-id. - [] fn unsafety_check_result: UnsafetyCheckResult(DefId) -> mir::UnsafetyCheckResult, - - /// HACK: when evaluated, this reports a "unsafe derive on repr(packed)" error - [] fn unsafe_derive_on_repr_packed: UnsafeDeriveOnReprPacked(DefId) -> (), - - /// The signature of functions and closures. - [] fn fn_sig: FnSignature(DefId) -> ty::PolyFnSig<'tcx>, - }, - - Other { - /// Caches CoerceUnsized kinds for impls on custom types. - [] fn coerce_unsized_info: CoerceUnsizedInfo(DefId) - -> ty::adjustment::CoerceUnsizedInfo, - }, - - TypeChecking { - [] fn typeck_item_bodies: typeck_item_bodies_dep_node(CrateNum) -> CompileResult, - - [] fn typeck_tables_of: TypeckTables(DefId) -> &'tcx ty::TypeckTables<'tcx>, - }, - - Other { - [] fn used_trait_imports: UsedTraitImports(DefId) -> Lrc, - }, - - TypeChecking { - [] fn has_typeck_tables: HasTypeckTables(DefId) -> bool, - - [] fn coherent_trait: CoherenceCheckTrait(DefId) -> (), - }, - - BorrowChecking { - [] fn borrowck: BorrowCheck(DefId) -> Lrc, + /// Run analysis passes on the crate + [] fn analysis: Analysis(CrateNum) -> Result<(), ErrorReported>, - /// Borrow checks the function body. If this is a closure, returns - /// additional requirements that the closure's creator must verify. - [] fn mir_borrowck: MirBorrowCheck(DefId) -> mir::BorrowCheckResult<'tcx>, }, - - TypeChecking { - /// Gets a complete map from all types to their inherent impls. - /// Not meant to be used directly outside of coherence. - /// (Defined only for LOCAL_CRATE) - [] fn crate_inherent_impls: crate_inherent_impls_dep_node(CrateNum) - -> Lrc, - - /// Checks all types in the krate for overlap in their inherent impls. Reports errors. - /// Not meant to be used directly outside of coherence. - /// (Defined only for LOCAL_CRATE) - [] fn crate_inherent_impls_overlap_check: inherent_impls_overlap_check_dep_node(CrateNum) - -> (), - }, - - Other { - /// Evaluate a constant without running sanity checks - /// - /// DO NOT USE THIS outside const eval. Const eval uses this to break query cycles during - /// validation. Please add a comment to every use site explaining why using `const_eval` - /// isn't sufficient - [] fn const_eval_raw: const_eval_raw_dep_node(ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) - -> ConstEvalRawResult<'tcx>, - - /// Results of evaluating const items or constants embedded in - /// other items (such as enum variant explicit discriminants). - [] fn const_eval: const_eval_dep_node(ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) - -> ConstEvalResult<'tcx>, - }, - - TypeChecking { - [] fn check_match: CheckMatch(DefId) - -> Result<(), ErrorReported>, - - /// Performs the privacy check and computes "access levels". - [] fn privacy_access_levels: PrivacyAccessLevels(CrateNum) -> Lrc, - }, - - Other { - [] fn reachable_set: reachability_dep_node(CrateNum) -> ReachableSet, - - /// Per-body `region::ScopeTree`. The `DefId` should be the owner-def-id for the body; - /// in the case of closures, this will be redirected to the enclosing function. - [] fn region_scope_tree: RegionScopeTree(DefId) -> Lrc, - - [] fn mir_shims: mir_shim_dep_node(ty::InstanceDef<'tcx>) -> &'tcx mir::Mir<'tcx>, - - [] fn def_symbol_name: SymbolName(DefId) -> ty::SymbolName, - [] fn symbol_name: symbol_name_dep_node(ty::Instance<'tcx>) -> ty::SymbolName, - - [] fn describe_def: DescribeDef(DefId) -> Option, - [] fn def_span: DefSpan(DefId) -> Span, - [] fn lookup_stability: LookupStability(DefId) -> Option<&'tcx attr::Stability>, - [] fn lookup_deprecation_entry: LookupDeprecationEntry(DefId) -> Option, - [] fn item_attrs: ItemAttrs(DefId) -> Lrc<[ast::Attribute]>, - }, - - Codegen { - [] fn codegen_fn_attrs: codegen_fn_attrs(DefId) -> CodegenFnAttrs, - }, - - Other { - [] fn fn_arg_names: FnArgNames(DefId) -> Vec, - /// Gets the rendered value of the specified constant or associated constant. - /// Used by rustdoc. - [] fn rendered_const: RenderedConst(DefId) -> String, - [] fn impl_parent: ImplParent(DefId) -> Option, - }, - - TypeChecking { - [] fn trait_of_item: TraitOfItem(DefId) -> Option, - [] fn const_is_rvalue_promotable_to_static: ConstIsRvaluePromotableToStatic(DefId) -> bool, - [] fn rvalue_promotable_map: RvaluePromotableMap(DefId) -> Lrc, - }, - - Codegen { - [] fn is_mir_available: IsMirAvailable(DefId) -> bool, - }, - - Other { - [] fn vtable_methods: vtable_methods_node(ty::PolyTraitRef<'tcx>) - -> Lrc)>>>, - }, - - Codegen { - [] fn codegen_fulfill_obligation: fulfill_obligation_dep_node( - (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)) -> Vtable<'tcx, ()>, - }, - - TypeChecking { - [] fn trait_impls_of: TraitImpls(DefId) -> Lrc, - [] fn specialization_graph_of: SpecializationGraph(DefId) - -> Lrc, - [] fn is_object_safe: ObjectSafety(DefId) -> bool, - - /// Get the ParameterEnvironment for a given item; this environment - /// will be in "user-facing" mode, meaning that it is suitabe for - /// type-checking etc, and it does not normalize specializable - /// associated types. This is almost always what you want, - /// unless you are doing MIR optimizations, in which case you - /// might want to use `reveal_all()` method to change modes. - [] fn param_env: ParamEnv(DefId) -> ty::ParamEnv<'tcx>, - - /// Trait selection queries. These are best used by invoking `ty.moves_by_default()`, - /// `ty.is_copy()`, etc, since that will prune the environment where possible. - [] fn is_copy_raw: is_copy_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, - [] fn is_sized_raw: is_sized_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, - [] fn is_freeze_raw: is_freeze_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, - [] fn needs_drop_raw: needs_drop_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool, - [] fn layout_raw: layout_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> Result<&'tcx ty::layout::LayoutDetails, - ty::layout::LayoutError<'tcx>>, - }, - - Other { - [] fn dylib_dependency_formats: DylibDepFormats(CrateNum) - -> Lrc>, - }, - - Codegen { - [fatal_cycle] fn is_panic_runtime: IsPanicRuntime(CrateNum) -> bool, - [fatal_cycle] fn is_compiler_builtins: IsCompilerBuiltins(CrateNum) -> bool, - [fatal_cycle] fn has_global_allocator: HasGlobalAllocator(CrateNum) -> bool, - [fatal_cycle] fn has_panic_handler: HasPanicHandler(CrateNum) -> bool, - [fatal_cycle] fn is_sanitizer_runtime: IsSanitizerRuntime(CrateNum) -> bool, - [fatal_cycle] fn is_profiler_runtime: IsProfilerRuntime(CrateNum) -> bool, - [fatal_cycle] fn panic_strategy: GetPanicStrategy(CrateNum) -> PanicStrategy, - [fatal_cycle] fn is_no_builtins: IsNoBuiltins(CrateNum) -> bool, - - [] fn extern_crate: ExternCrate(DefId) -> Lrc>, - }, - - TypeChecking { - [] fn specializes: specializes_node((DefId, DefId)) -> bool, - [] fn in_scope_traits_map: InScopeTraits(DefIndex) - -> Option>>>>, - }, - - Other { - [] fn module_exports: ModuleExports(DefId) -> Option>>, - [] fn lint_levels: lint_levels_node(CrateNum) -> Lrc, - }, - - TypeChecking { - [] fn impl_defaultness: ImplDefaultness(DefId) -> hir::Defaultness, - - [] fn check_item_well_formed: CheckItemWellFormed(DefId) -> (), - [] fn check_trait_item_well_formed: CheckTraitItemWellFormed(DefId) -> (), - [] fn check_impl_item_well_formed: CheckImplItemWellFormed(DefId) -> (), - }, - - Linking { - // The DefIds of all non-generic functions and statics in the given crate - // that can be reached from outside the crate. - // - // We expect this items to be available for being linked to. - // - // This query can also be called for LOCAL_CRATE. In this case it will - // compute which items will be reachable to other crates, taking into account - // the kind of crate that is currently compiled. Crates with only a - // C interface have fewer reachable things. - // - // Does not include external symbols that don't have a corresponding DefId, - // like the compiler-generated `main` function and so on. - [] fn reachable_non_generics: ReachableNonGenerics(CrateNum) - -> Lrc>, - [] fn is_reachable_non_generic: IsReachableNonGeneric(DefId) -> bool, - [] fn is_unreachable_local_definition: IsUnreachableLocalDefinition(DefId) -> bool, - }, - - Codegen { - [] fn upstream_monomorphizations: UpstreamMonomorphizations(CrateNum) - -> Lrc, CrateNum>>>>, - [] fn upstream_monomorphizations_for: UpstreamMonomorphizationsFor(DefId) - -> Option, CrateNum>>>, - }, - - Other { - [] fn native_libraries: NativeLibraries(CrateNum) -> Lrc>, - - [] fn foreign_modules: ForeignModules(CrateNum) -> Lrc>, - - [] fn plugin_registrar_fn: PluginRegistrarFn(CrateNum) -> Option, - [] fn proc_macro_decls_static: ProcMacroDeclsStatic(CrateNum) -> Option, - [] fn crate_disambiguator: CrateDisambiguator(CrateNum) -> CrateDisambiguator, - [] fn crate_hash: CrateHash(CrateNum) -> Svh, - [] fn original_crate_name: OriginalCrateName(CrateNum) -> Symbol, - [] fn extra_filename: ExtraFileName(CrateNum) -> String, - }, - - TypeChecking { - [] fn implementations_of_trait: implementations_of_trait_node((CrateNum, DefId)) - -> Lrc>, - [] fn all_trait_implementations: AllTraitImplementations(CrateNum) - -> Lrc>, - }, - - Other { - [] fn dllimport_foreign_items: DllimportForeignItems(CrateNum) - -> Lrc>, - [] fn is_dllimport_foreign_item: IsDllimportForeignItem(DefId) -> bool, - [] fn is_statically_included_foreign_item: IsStaticallyIncludedForeignItem(DefId) -> bool, - [] fn native_library_kind: NativeLibraryKind(DefId) - -> Option, - }, - - Linking { - [] fn link_args: link_args_node(CrateNum) -> Lrc>, - }, - - BorrowChecking { - // Lifetime resolution. See `middle::resolve_lifetimes`. - [] fn resolve_lifetimes: ResolveLifetimes(CrateNum) -> Lrc, - [] fn named_region_map: NamedRegion(DefIndex) -> - Option>>, - [] fn is_late_bound_map: IsLateBound(DefIndex) -> - Option>>, - [] fn object_lifetime_defaults_map: ObjectLifetimeDefaults(DefIndex) - -> Option>>>>, - }, - - TypeChecking { - [] fn visibility: Visibility(DefId) -> ty::Visibility, - }, - - Other { - [] fn dep_kind: DepKind(CrateNum) -> DepKind, - [] fn crate_name: CrateName(CrateNum) -> Symbol, - [] fn item_children: ItemChildren(DefId) -> Lrc>, - [] fn extern_mod_stmt_cnum: ExternModStmtCnum(DefId) -> Option, - - [] fn get_lib_features: get_lib_features_node(CrateNum) -> Lrc, - [] fn defined_lib_features: DefinedLibFeatures(CrateNum) - -> Lrc)>>, - [] fn get_lang_items: get_lang_items_node(CrateNum) -> Lrc, - [] fn defined_lang_items: DefinedLangItems(CrateNum) -> Lrc>, - [] fn missing_lang_items: MissingLangItems(CrateNum) -> Lrc>, - [] fn visible_parent_map: visible_parent_map_node(CrateNum) - -> Lrc>, - [] fn missing_extern_crate_item: MissingExternCrateItem(CrateNum) -> bool, - [] fn used_crate_source: UsedCrateSource(CrateNum) -> Lrc, - [] fn postorder_cnums: postorder_cnums_node(CrateNum) -> Lrc>, - - [] fn freevars: Freevars(DefId) -> Option>>, - [] fn maybe_unused_trait_import: MaybeUnusedTraitImport(DefId) -> bool, - [] fn maybe_unused_extern_crates: maybe_unused_extern_crates_node(CrateNum) - -> Lrc>, - - [] fn stability_index: stability_index_node(CrateNum) -> Lrc>, - [] fn all_crate_nums: all_crate_nums_node(CrateNum) -> Lrc>, - - /// A vector of every trait accessible in the whole crate - /// (i.e., including those from subcrates). This is used only for - /// error reporting. - [] fn all_traits: all_traits_node(CrateNum) -> Lrc>, - }, - - Linking { - [] fn exported_symbols: ExportedSymbols(CrateNum) - -> Arc, SymbolExportLevel)>>, - }, - - Codegen { - [] fn collect_and_partition_mono_items: - collect_and_partition_mono_items_node(CrateNum) - -> (Arc, Arc>>>), - [] fn is_codegened_item: IsCodegenedItem(DefId) -> bool, - [] fn codegen_unit: CodegenUnit(InternedString) -> Arc>, - }, - - Other { - [] fn output_filenames: output_filenames_node(CrateNum) - -> Arc, - }, - - TypeChecking { - // Erases regions from `ty` to yield a new type. - // Normally you would just use `tcx.erase_regions(&value)`, - // however, which uses this query as a kind of cache. - [] fn erase_regions_ty: erase_regions_ty(Ty<'tcx>) -> Ty<'tcx>, - - /// Do not call this query directly: invoke `normalize` instead. - [] fn normalize_projection_ty: NormalizeProjectionTy( - CanonicalProjectionGoal<'tcx> - ) -> Result< - Lrc>>>, - NoSolution, - >, - - /// Do not call this query directly: invoke `normalize_erasing_regions` instead. - [] fn normalize_ty_after_erasing_regions: NormalizeTyAfterErasingRegions( - ParamEnvAnd<'tcx, Ty<'tcx>> - ) -> Ty<'tcx>, - - [] fn implied_outlives_bounds: ImpliedOutlivesBounds( - CanonicalTyGoal<'tcx> - ) -> Result< - Lrc>>>>, - NoSolution, - >, - - /// Do not call this query directly: invoke `infcx.at().dropck_outlives()` instead. - [] fn dropck_outlives: DropckOutlives( - CanonicalTyGoal<'tcx> - ) -> Result< - Lrc>>>, - NoSolution, - >, - - /// Do not call this query directly: invoke `infcx.predicate_may_hold()` or - /// `infcx.predicate_must_hold()` instead. - [] fn evaluate_obligation: EvaluateObligation( - CanonicalPredicateGoal<'tcx> - ) -> Result, - - /// Do not call this query directly: part of the `Eq` type-op - [] fn type_op_ascribe_user_type: TypeOpAscribeUserType( - CanonicalTypeOpAscribeUserTypeGoal<'tcx> - ) -> Result< - Lrc>>, - NoSolution, - >, - - /// Do not call this query directly: part of the `Eq` type-op - [] fn type_op_eq: TypeOpEq( - CanonicalTypeOpEqGoal<'tcx> - ) -> Result< - Lrc>>, - NoSolution, - >, - - /// Do not call this query directly: part of the `Subtype` type-op - [] fn type_op_subtype: TypeOpSubtype( - CanonicalTypeOpSubtypeGoal<'tcx> - ) -> Result< - Lrc>>, - NoSolution, - >, - - /// Do not call this query directly: part of the `ProvePredicate` type-op - [] fn type_op_prove_predicate: TypeOpProvePredicate( - CanonicalTypeOpProvePredicateGoal<'tcx> - ) -> Result< - Lrc>>, - NoSolution, - >, - - /// Do not call this query directly: part of the `Normalize` type-op - [] fn type_op_normalize_ty: TypeOpNormalizeTy( - CanonicalTypeOpNormalizeGoal<'tcx, Ty<'tcx>> - ) -> Result< - Lrc>>>, - NoSolution, - >, - - /// Do not call this query directly: part of the `Normalize` type-op - [] fn type_op_normalize_predicate: TypeOpNormalizePredicate( - CanonicalTypeOpNormalizeGoal<'tcx, ty::Predicate<'tcx>> - ) -> Result< - Lrc>>>, - NoSolution, - >, - - /// Do not call this query directly: part of the `Normalize` type-op - [] fn type_op_normalize_poly_fn_sig: TypeOpNormalizePolyFnSig( - CanonicalTypeOpNormalizeGoal<'tcx, ty::PolyFnSig<'tcx>> - ) -> Result< - Lrc>>>, - NoSolution, - >, - - /// Do not call this query directly: part of the `Normalize` type-op - [] fn type_op_normalize_fn_sig: TypeOpNormalizeFnSig( - CanonicalTypeOpNormalizeGoal<'tcx, ty::FnSig<'tcx>> - ) -> Result< - Lrc>>>, - NoSolution, - >, - - [] fn substitute_normalize_and_test_predicates: - substitute_normalize_and_test_predicates_node((DefId, &'tcx Substs<'tcx>)) -> bool, - }, - - Other { - [] fn target_features_whitelist: - target_features_whitelist_node(CrateNum) -> Lrc>>, - - // Get an estimate of the size of an InstanceDef based on its MIR for CGU partitioning. - [] fn instance_def_size_estimate: instance_def_size_estimate_dep_node(ty::InstanceDef<'tcx>) - -> usize, - - [] fn features_query: features_node(CrateNum) -> Lrc, - }, - - TypeChecking { - [] fn program_clauses_for: ProgramClausesFor(DefId) -> Clauses<'tcx>, - - [] fn program_clauses_for_env: ProgramClausesForEnv( - traits::Environment<'tcx> - ) -> Clauses<'tcx>, - - // Get the chalk-style environment of the given item. - [] fn environment: Environment(DefId) -> ty::Binder>, - }, - - Linking { - [] fn wasm_import_module_map: WasmImportModuleMap(CrateNum) - -> Lrc>, - }, -} - -// `try_get_query` can't be public because it uses the private query -// implementation traits, so we provide access to it selectively. -impl<'a, 'tcx, 'lcx> TyCtxt<'a, 'tcx, 'lcx> { - pub fn try_adt_sized_constraint( - self, - span: Span, - key: DefId, - ) -> Result<&'tcx [Ty<'tcx>], DiagnosticBuilder<'a>> { - self.try_get_query::>(span, key) - } - pub fn try_needs_drop_raw( - self, - span: Span, - key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, - ) -> Result> { - self.try_get_query::>(span, key) - } - pub fn try_optimized_mir( - self, - span: Span, - key: DefId, - ) -> Result<&'tcx mir::Mir<'tcx>, DiagnosticBuilder<'a>> { - self.try_get_query::>(span, key) - } -} - -////////////////////////////////////////////////////////////////////// -// These functions are little shims used to find the dep-node for a -// given query when there is not a *direct* mapping: - - -fn features_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::Features -} - -fn codegen_fn_attrs<'tcx>(id: DefId) -> DepConstructor<'tcx> { - DepConstructor::CodegenFnAttrs { 0: id } -} - -fn erase_regions_ty<'tcx>(ty: Ty<'tcx>) -> DepConstructor<'tcx> { - DepConstructor::EraseRegionsTy { ty } -} - -fn type_param_predicates<'tcx>((item_id, param_id): (DefId, DefId)) -> DepConstructor<'tcx> { - DepConstructor::TypeParamPredicates { - item_id, - param_id - } -} - -fn fulfill_obligation_dep_node<'tcx>((param_env, trait_ref): - (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)) -> DepConstructor<'tcx> { - DepConstructor::FulfillObligation { - param_env, - trait_ref - } -} - -fn crate_inherent_impls_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::Coherence -} - -fn inherent_impls_overlap_check_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::CoherenceInherentImplOverlapCheck -} - -fn reachability_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::Reachability -} - -fn mir_shim_dep_node<'tcx>(instance_def: ty::InstanceDef<'tcx>) -> DepConstructor<'tcx> { - DepConstructor::MirShim { - instance_def - } -} - -fn symbol_name_dep_node<'tcx>(instance: ty::Instance<'tcx>) -> DepConstructor<'tcx> { - DepConstructor::InstanceSymbolName { instance } -} - -fn typeck_item_bodies_dep_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::TypeckBodiesKrate -} - -fn const_eval_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) - -> DepConstructor<'tcx> { - DepConstructor::ConstEval { param_env } -} -fn const_eval_raw_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>) - -> DepConstructor<'tcx> { - DepConstructor::ConstEvalRaw { param_env } -} - -fn mir_keys<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::MirKeys -} - -fn crate_variances<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::CrateVariances -} - -fn is_copy_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - DepConstructor::IsCopy { param_env } -} - -fn is_sized_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - DepConstructor::IsSized { param_env } -} - -fn is_freeze_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - DepConstructor::IsFreeze { param_env } -} - -fn needs_drop_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - DepConstructor::NeedsDrop { param_env } -} - -fn layout_dep_node<'tcx>(param_env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> DepConstructor<'tcx> { - DepConstructor::Layout { param_env } -} - -fn lint_levels_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::LintLevels -} - -fn specializes_node<'tcx>((a, b): (DefId, DefId)) -> DepConstructor<'tcx> { - DepConstructor::Specializes { impl1: a, impl2: b } -} - -fn implementations_of_trait_node<'tcx>((krate, trait_id): (CrateNum, DefId)) - -> DepConstructor<'tcx> -{ - DepConstructor::ImplementationsOfTrait { krate, trait_id } -} - -fn link_args_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::LinkArgs -} - -fn get_lib_features_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::GetLibFeatures -} - -fn get_lang_items_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::GetLangItems -} - -fn visible_parent_map_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::VisibleParentMap -} - -fn postorder_cnums_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::PostorderCnums -} - -fn maybe_unused_extern_crates_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::MaybeUnusedExternCrates -} - -fn stability_index_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::StabilityIndex -} - -fn all_crate_nums_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::AllCrateNums -} - -fn all_traits_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::AllTraits -} - -fn collect_and_partition_mono_items_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::CollectAndPartitionMonoItems -} - -fn output_filenames_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::OutputFilenames -} - -fn vtable_methods_node<'tcx>(trait_ref: ty::PolyTraitRef<'tcx>) -> DepConstructor<'tcx> { - DepConstructor::VtableMethods{ trait_ref } -} - -fn substitute_normalize_and_test_predicates_node<'tcx>(key: (DefId, &'tcx Substs<'tcx>)) - -> DepConstructor<'tcx> { - DepConstructor::SubstituteNormalizeAndTestPredicates { key } -} - -fn target_features_whitelist_node<'tcx>(_: CrateNum) -> DepConstructor<'tcx> { - DepConstructor::TargetFeaturesWhitelist -} - -fn instance_def_size_estimate_dep_node<'tcx>(instance_def: ty::InstanceDef<'tcx>) - -> DepConstructor<'tcx> { - DepConstructor::InstanceDefSizeEstimate { - instance_def - } -} +]} diff --git a/src/librustc/ty/query/on_disk_cache.rs b/src/librustc/ty/query/on_disk_cache.rs index 04c880826fe79..28cf3f5245ef8 100644 --- a/src/librustc/ty/query/on_disk_cache.rs +++ b/src/librustc/ty/query/on_disk_cache.rs @@ -1,37 +1,29 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use dep_graph::{DepNodeIndex, SerializedDepNodeIndex}; +use crate::dep_graph::{DepNodeIndex, SerializedDepNodeIndex}; +use crate::hir; +use crate::hir::def_id::{CrateNum, DefIndex, DefId, LocalDefId, LOCAL_CRATE}; +use crate::hir::map::definitions::DefPathHash; +use crate::ich::{CachingSourceMapView, Fingerprint}; +use crate::mir::{self, interpret}; +use crate::mir::interpret::{AllocDecodingSession, AllocDecodingState}; +use crate::rustc_serialize::{Decodable, Decoder, Encodable, Encoder, opaque, + SpecializedDecoder, SpecializedEncoder, + UseSpecializedDecodable, UseSpecializedEncodable}; +use crate::session::{CrateDisambiguator, Session}; +use crate::ty; +use crate::ty::codec::{self as ty_codec, TyDecoder, TyEncoder}; +use crate::ty::context::TyCtxt; +use crate::util::common::{time, time_ext}; + use errors::Diagnostic; -use hir; -use hir::def_id::{CrateNum, DefIndex, DefId, LocalDefId, LOCAL_CRATE}; -use hir::map::definitions::DefPathHash; -use ich::{CachingSourceMapView, Fingerprint}; -use mir::{self, interpret}; -use mir::interpret::{AllocDecodingSession, AllocDecodingState}; use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::thin_vec::ThinVec; use rustc_data_structures::sync::{Lrc, Lock, HashMapExt, Once}; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; -use rustc_serialize::{Decodable, Decoder, Encodable, Encoder, opaque, - SpecializedDecoder, SpecializedEncoder, - UseSpecializedDecodable, UseSpecializedEncodable}; -use session::{CrateDisambiguator, Session}; use std::mem; use syntax::ast::NodeId; use syntax::source_map::{SourceMap, StableSourceFileId}; use syntax_pos::{BytePos, Span, DUMMY_SP, SourceFile}; use syntax_pos::hygiene::{Mark, SyntaxContext, ExpnInfo}; -use ty; -use ty::codec::{self as ty_codec, TyDecoder, TyEncoder}; -use ty::context::TyCtxt; -use util::common::time; const TAG_FILE_FOOTER: u128 = 0xC0FFEE_C0FFEE_C0FFEE_C0FFEE_C0FFEE; @@ -112,7 +104,7 @@ impl AbsoluteBytePos { } impl<'sess> OnDiskCache<'sess> { - /// Create a new OnDiskCache instance from the serialized data in `data`. + /// Creates a new OnDiskCache instance from the serialized data in `data`. pub fn new(sess: &'sess Session, data: Vec, start_pos: usize) -> OnDiskCache<'sess> { debug_assert!(sess.opts.incremental.is_some()); @@ -211,7 +203,7 @@ impl<'sess> OnDiskCache<'sess> { let mut query_result_index = EncodedQueryResultIndex::new(); time(tcx.sess, "encode query results", || { - use ty::query::queries::*; + use crate::ty::query::queries::*; let enc = &mut encoder; let qri = &mut query_result_index; @@ -226,7 +218,6 @@ impl<'sess> OnDiskCache<'sess> { encode_query_results::, _>(tcx, enc, qri)?; encode_query_results::, _>(tcx, enc, qri)?; encode_query_results::, _>(tcx, enc, qri)?; - encode_query_results::, _>(tcx, enc, qri)?; encode_query_results::, _>(tcx, enc, qri)?; encode_query_results::, _>(tcx, enc, qri)?; encode_query_results::, _>(tcx, enc, qri)?; @@ -234,12 +225,12 @@ impl<'sess> OnDiskCache<'sess> { encode_query_results::, _>(tcx, enc, qri)?; // const eval is special, it only encodes successfully evaluated constants - use ty::query::QueryAccessors; + use crate::ty::query::QueryAccessors; let cache = const_eval::query_cache(tcx).borrow(); assert!(cache.active.is_empty()); for (key, entry) in cache.results.iter() { - use ty::query::config::QueryDescription; - if const_eval::cache_on_disk(key.clone()) { + use crate::ty::query::config::QueryDescription; + if const_eval::cache_on_disk(tcx, key.clone()) { if let Ok(ref value) = entry.value { let dep_node = SerializedDepNodeIndex::new(entry.index.index()); @@ -334,7 +325,7 @@ impl<'sess> OnDiskCache<'sess> { }) } - /// Load a diagnostic emitted during the previous compilation session. + /// Loads a diagnostic emitted during the previous compilation session. pub fn load_diagnostics<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, dep_node_index: SerializedDepNodeIndex) @@ -348,19 +339,21 @@ impl<'sess> OnDiskCache<'sess> { diagnostics.unwrap_or_default() } - /// Store a diagnostic emitted during the current compilation session. + /// Stores a diagnostic emitted during the current compilation session. /// Anything stored like this will be available via `load_diagnostics` in /// the next compilation session. + #[inline(never)] + #[cold] pub fn store_diagnostics(&self, dep_node_index: DepNodeIndex, - diagnostics: Vec) { + diagnostics: ThinVec) { let mut current_diagnostics = self.current_diagnostics.borrow_mut(); - let prev = current_diagnostics.insert(dep_node_index, diagnostics); + let prev = current_diagnostics.insert(dep_node_index, diagnostics.into()); debug_assert!(prev.is_none()); } /// Returns the cached query result if there is something in the cache for - /// the given SerializedDepNodeIndex. Otherwise returns None. + /// the given `SerializedDepNodeIndex`; otherwise returns `None`. pub fn try_load_query_result<'tcx, T>(&self, tcx: TyCtxt<'_, 'tcx, 'tcx>, dep_node_index: SerializedDepNodeIndex) @@ -373,20 +366,20 @@ impl<'sess> OnDiskCache<'sess> { "query result") } - /// Store a diagnostic emitted during computation of an anonymous query. + /// Stores a diagnostic emitted during computation of an anonymous query. /// Since many anonymous queries can share the same `DepNode`, we aggregate /// them -- as opposed to regular queries where we assume that there is a /// 1:1 relationship between query-key and `DepNode`. + #[inline(never)] + #[cold] pub fn store_diagnostics_for_anon_node(&self, dep_node_index: DepNodeIndex, - mut diagnostics: Vec) { + diagnostics: ThinVec) { let mut current_diagnostics = self.current_diagnostics.borrow_mut(); - let x = current_diagnostics.entry(dep_node_index).or_insert_with(|| { - mem::replace(&mut diagnostics, Vec::new()) - }); + let x = current_diagnostics.entry(dep_node_index).or_insert(Vec::new()); - x.extend(diagnostics.into_iter()); + x.extend(Into::>::into(diagnostics)); } fn load_indexed<'tcx, T>(&self, @@ -398,11 +391,7 @@ impl<'sess> OnDiskCache<'sess> { -> Option where T: Decodable { - let pos = if let Some(&pos) = index.get(&dep_node_index) { - pos - } else { - return None - }; + let pos = index.get(&dep_node_index).cloned()?; // Initialize the cnum_map using the value from the thread which finishes the closure first self.cnum_map.init_nonlocking_same(|| { @@ -788,7 +777,6 @@ impl<'enc, 'a, 'tcx, E> CacheEncoder<'enc, 'a, 'tcx, E> value: &V) -> Result<(), E::Error> { - use ty::codec::TyEncoder; let start_pos = self.position(); tag.encode(self)?; @@ -1092,23 +1080,22 @@ fn encode_query_results<'enc, 'a, 'tcx, Q, E>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let desc = &format!("encode_query_results for {}", unsafe { ::std::intrinsics::type_name::() }); - time(tcx.sess, desc, || { - - let map = Q::query_cache(tcx).borrow(); - assert!(map.active.is_empty()); - for (key, entry) in map.results.iter() { - if Q::cache_on_disk(key.clone()) { - let dep_node = SerializedDepNodeIndex::new(entry.index.index()); + time_ext(tcx.sess.time_extended(), Some(tcx.sess), desc, || { + let map = Q::query_cache(tcx).borrow(); + assert!(map.active.is_empty()); + for (key, entry) in map.results.iter() { + if Q::cache_on_disk(tcx, key.clone()) { + let dep_node = SerializedDepNodeIndex::new(entry.index.index()); - // Record position of the cache entry - query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.position()))); + // Record position of the cache entry + query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.position()))); - // Encode the type check tables with the SerializedDepNodeIndex - // as tag. - encoder.encode_tagged(dep_node, &entry.value)?; + // Encode the type check tables with the SerializedDepNodeIndex + // as tag. + encoder.encode_tagged(dep_node, &entry.value)?; + } } - } - Ok(()) + Ok(()) }) } diff --git a/src/librustc/ty/query/plumbing.rs b/src/librustc/ty/query/plumbing.rs index 5f33d466c4a19..a03cfd19b9165 100644 --- a/src/librustc/ty/query/plumbing.rs +++ b/src/librustc/ty/query/plumbing.rs @@ -1,33 +1,25 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! The implementation of the query system itself. Defines the macros -//! that generate the actual methods on tcx which find and execute the -//! provider, manage the caches, and so forth. - -use dep_graph::{DepNodeIndex, DepNode, DepKind, DepNodeColor}; +//! The implementation of the query system itself. This defines the macros that +//! generate the actual methods on tcx which find and execute the provider, +//! manage the caches, and so forth. + +use crate::dep_graph::{DepNodeIndex, DepNode, DepKind, SerializedDepNodeIndex}; +use crate::ty::tls; +use crate::ty::{self, TyCtxt}; +use crate::ty::query::Query; +use crate::ty::query::config::{QueryConfig, QueryDescription}; +use crate::ty::query::job::{QueryJob, QueryResult, QueryInfo}; + +use crate::util::common::{profq_msg, ProfileQueriesMsg, QueryMsg}; + use errors::DiagnosticBuilder; use errors::Level; use errors::Diagnostic; use errors::FatalError; -use ty::tls; -use ty::{TyCtxt}; -use ty::query::Query; -use ty::query::config::{QueryConfig, QueryDescription}; -use ty::query::job::{QueryJob, QueryResult, QueryInfo}; -use ty::item_path; - -use util::common::{profq_msg, ProfileQueriesMsg, QueryMsg}; - use rustc_data_structures::fx::{FxHashMap}; use rustc_data_structures::sync::{Lrc, Lock}; +use rustc_data_structures::thin_vec::ThinVec; +#[cfg(not(parallel_compiler))] +use rustc_data_structures::cold_path; use std::mem; use std::ptr; use std::collections::hash_map::Entry; @@ -37,6 +29,8 @@ use syntax::source_map::DUMMY_SP; pub struct QueryCache<'tcx, D: QueryConfig<'tcx> + ?Sized> { pub(super) results: FxHashMap>, pub(super) active: FxHashMap>, + #[cfg(debug_assertions)] + pub(super) cache_hits: usize, } pub(super) struct QueryValue { @@ -60,6 +54,8 @@ impl<'tcx, M: QueryConfig<'tcx>> Default for QueryCache<'tcx, M> { QueryCache { results: FxHashMap::default(), active: FxHashMap::default(), + #[cfg(debug_assertions)] + cache_hits: 0, } } } @@ -118,29 +114,40 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> { let mut lock = cache.borrow_mut(); if let Some(value) = lock.results.get(key) { profq_msg!(tcx, ProfileQueriesMsg::CacheHit); - tcx.sess.profiler(|p| { - p.record_query(Q::CATEGORY); - p.record_query_hit(Q::CATEGORY); - }); - - let result = Ok((value.value.clone(), value.index)); + tcx.sess.profiler(|p| p.record_query_hit(Q::NAME, Q::CATEGORY)); + let result = (value.value.clone(), value.index); + #[cfg(debug_assertions)] + { + lock.cache_hits += 1; + } return TryGetJob::JobCompleted(result); } let job = match lock.active.entry((*key).clone()) { Entry::Occupied(entry) => { match *entry.get() { - QueryResult::Started(ref job) => job.clone(), + QueryResult::Started(ref job) => { + //For parallel queries, we'll block and wait until the query running + //in another thread has completed. Record how long we wait in the + //self-profiler + #[cfg(parallel_compiler)] + tcx.sess.profiler(|p| p.query_blocked_start(Q::NAME, Q::CATEGORY)); + + job.clone() + }, QueryResult::Poisoned => FatalError.raise(), } } Entry::Vacant(entry) => { // No job entry for this query. Return a new one to be started later return tls::with_related_context(tcx, |icx| { + // Create the `parent` variable before `info`. This allows LLVM + // to elide the move of `info` + let parent = icx.query.clone(); let info = QueryInfo { span, query: Q::query(key.clone()), }; - let job = Lrc::new(QueryJob::new(info, icx.query.clone())); + let job = Lrc::new(QueryJob::new(info, parent)); let owner = JobOwner { cache, job: job.clone(), @@ -153,14 +160,30 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> { }; mem::drop(lock); - if let Err(cycle) = job.await(tcx, span) { - return TryGetJob::JobCompleted(Err(cycle)); + // If we are single-threaded we know that we have cycle error, + // so we just return the error + #[cfg(not(parallel_compiler))] + return TryGetJob::Cycle(cold_path(|| { + Q::handle_cycle_error(tcx, job.find_cycle_in_stack(tcx, span)) + })); + + // With parallel queries we might just have to wait on some other + // thread + #[cfg(parallel_compiler)] + { + let result = job.r#await(tcx, span); + tcx.sess.profiler(|p| p.query_blocked_end(Q::NAME, Q::CATEGORY)); + + if let Err(cycle) = result { + return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle)); + } } } } /// Completes the query by updating the query cache with the `result`, /// signals the waiter and forgets the JobOwner, so it won't poison the query + #[inline(always)] pub(super) fn complete(self, result: &Q::Value, dep_node_index: DepNodeIndex) { // We can move out of `self` here because we `mem::forget` it below let key = unsafe { ptr::read(&self.key) }; @@ -179,44 +202,21 @@ impl<'a, 'tcx, Q: QueryDescription<'tcx>> JobOwner<'a, 'tcx, Q> { job.signal_complete(); } +} - /// Executes a job by changing the ImplicitCtxt to point to the - /// new query job while it executes. It returns the diagnostics - /// captured during execution and the actual result. - pub(super) fn start<'lcx, F, R>( - &self, - tcx: TyCtxt<'_, 'tcx, 'lcx>, - compute: F) - -> (R, Vec) - where - F: for<'b> FnOnce(TyCtxt<'b, 'tcx, 'lcx>) -> R - { - // The TyCtxt stored in TLS has the same global interner lifetime - // as `tcx`, so we use `with_related_context` to relate the 'gcx lifetimes - // when accessing the ImplicitCtxt - let r = tls::with_related_context(tcx, move |current_icx| { - // Update the ImplicitCtxt to point to our new query job - let new_icx = tls::ImplicitCtxt { - tcx, - query: Some(self.job.clone()), - layout_depth: current_icx.layout_depth, - task: current_icx.task, - }; - - // Use the ImplicitCtxt while we execute the query - tls::enter_context(&new_icx, |_| { - compute(tcx) - }) - }); - - // Extract the diagnostic from the job - let diagnostics = mem::replace(&mut *self.job.diagnostics.lock(), Vec::new()); - - (r, diagnostics) - } +#[inline(always)] +fn with_diagnostics(f: F) -> (R, ThinVec) +where + F: FnOnce(Option<&Lock>>) -> R +{ + let diagnostics = Lock::new(ThinVec::new()); + let result = f(Some(&diagnostics)); + (result, diagnostics.into_inner()) } impl<'a, 'tcx, Q: QueryDescription<'tcx>> Drop for JobOwner<'a, 'tcx, Q> { + #[inline(never)] + #[cold] fn drop(&mut self) { // Poison the query so jobs waiting on it panic self.cache.borrow_mut().active.insert(self.key.clone(), QueryResult::Poisoned); @@ -241,12 +241,52 @@ pub(super) enum TryGetJob<'a, 'tcx: 'a, D: QueryDescription<'tcx> + 'a> { /// The query was already completed. /// Returns the result of the query and its dep node index /// if it succeeded or a cycle error if it failed - JobCompleted(Result<(D::Value, DepNodeIndex), CycleError<'tcx>>), + JobCompleted((D::Value, DepNodeIndex)), + + /// Trying to execute the query resulted in a cycle. + Cycle(D::Value), } impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { - pub(super) fn report_cycle(self, CycleError { usage, cycle: stack }: CycleError<'gcx>) - -> DiagnosticBuilder<'a> + /// Executes a job by changing the ImplicitCtxt to point to the + /// new query job while it executes. It returns the diagnostics + /// captured during execution and the actual result. + #[inline(always)] + pub(super) fn start_query( + self, + job: Lrc>, + diagnostics: Option<&Lock>>, + compute: F) + -> R + where + F: for<'b, 'lcx> FnOnce(TyCtxt<'b, 'gcx, 'lcx>) -> R + { + // The TyCtxt stored in TLS has the same global interner lifetime + // as `self`, so we use `with_related_context` to relate the 'gcx lifetimes + // when accessing the ImplicitCtxt + tls::with_related_context(self, move |current_icx| { + // Update the ImplicitCtxt to point to our new query job + let new_icx = tls::ImplicitCtxt { + tcx: self.global_tcx(), + query: Some(job), + diagnostics, + layout_depth: current_icx.layout_depth, + task_deps: current_icx.task_deps, + }; + + // Use the ImplicitCtxt while we execute the query + tls::enter_context(&new_icx, |_| { + compute(self.global_tcx()) + }) + }) + } + + #[inline(never)] + #[cold] + pub(super) fn report_cycle( + self, + CycleError { usage, cycle: stack }: CycleError<'gcx> + ) -> DiagnosticBuilder<'a> { assert!(!stack.is_empty()); @@ -258,7 +298,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // sometimes cycles itself, leading to extra cycle errors. // (And cycle errors around impls tend to occur during the // collect/coherence phases anyhow.) - item_path::with_forced_impl_filename_line(|| { + ty::print::with_forced_impl_filename_line(|| { let span = fix_span(stack[1 % stack.len()].span, &stack[0].query); let mut err = struct_span_err!(self.sess, span, @@ -280,7 +320,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { &format!("cycle used when {}", query.describe(self))); } - return err + err }) } @@ -311,47 +351,13 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { eprintln!("end of query stack"); } - /// Try to read a node index for the node dep_node. - /// A node will have an index, when it's already been marked green, or when we can mark it - /// green. This function will mark the current task as a reader of the specified node, when - /// a node index can be found for that node. - pub(super) fn try_mark_green_and_read(self, dep_node: &DepNode) -> Option { - match self.dep_graph.node_color(dep_node) { - Some(DepNodeColor::Green(dep_node_index)) => { - self.dep_graph.read_index(dep_node_index); - Some(dep_node_index) - } - Some(DepNodeColor::Red) => { - None - } - None => { - // try_mark_green (called below) will panic when full incremental - // compilation is disabled. If that's the case, we can't try to mark nodes - // as green anyway, so we can safely return None here. - if !self.dep_graph.is_fully_enabled() { - return None; - } - match self.dep_graph.try_mark_green(self.global_tcx(), &dep_node) { - Some(dep_node_index) => { - debug_assert!(self.dep_graph.is_green(&dep_node)); - self.dep_graph.read_index(dep_node_index); - Some(dep_node_index) - } - None => { - None - } - } - } - } - } - - fn try_get_with>( + #[inline(never)] + pub(super) fn get_query>( self, span: Span, key: Q::Key) - -> Result> - { - debug!("ty::queries::{}::try_get_with(key={:?}, span={:?})", + -> Q::Value { + debug!("ty::query::get_query<{}>(key={:?}, span={:?})", Q::NAME, key, span); @@ -363,80 +369,84 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { ) ); - self.sess.profiler(|p| p.record_query(Q::CATEGORY)); - let job = match JobOwner::try_get(self, span, &key) { TryGetJob::NotYetStarted(job) => job, - TryGetJob::JobCompleted(result) => { - return result.map(|(v, index)| { - self.sess.profiler(|p| p.record_query_hit(Q::CATEGORY)); - self.dep_graph.read_index(index); - v - }) + TryGetJob::Cycle(result) => return result, + TryGetJob::JobCompleted((v, index)) => { + self.dep_graph.read_index(index); + return v } }; // Fast path for when incr. comp. is off. `to_dep_node` is // expensive for some DepKinds. if !self.dep_graph.is_fully_enabled() { - let null_dep_node = DepNode::new_no_params(::dep_graph::DepKind::Null); - return self.force_query_with_job::(key, job, null_dep_node).map(|(v, _)| v); + let null_dep_node = DepNode::new_no_params(crate::dep_graph::DepKind::Null); + return self.force_query_with_job::(key, job, null_dep_node).0; } let dep_node = Q::to_dep_node(self, &key); if dep_node.kind.is_anon() { profq_msg!(self, ProfileQueriesMsg::ProviderBegin); - self.sess.profiler(|p| p.start_activity(Q::CATEGORY)); + self.sess.profiler(|p| p.start_query(Q::NAME, Q::CATEGORY)); - let res = job.start(self, |tcx| { - tcx.dep_graph.with_anon_task(dep_node.kind, || { - Q::compute(tcx.global_tcx(), key) + let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| { + self.start_query(job.job.clone(), diagnostics, |tcx| { + tcx.dep_graph.with_anon_task(dep_node.kind, || { + Q::compute(tcx.global_tcx(), key) + }) }) }); - self.sess.profiler(|p| p.end_activity(Q::CATEGORY)); + self.sess.profiler(|p| p.end_query(Q::NAME, Q::CATEGORY)); profq_msg!(self, ProfileQueriesMsg::ProviderEnd); - let ((result, dep_node_index), diagnostics) = res; self.dep_graph.read_index(dep_node_index); - self.queries.on_disk_cache - .store_diagnostics_for_anon_node(dep_node_index, diagnostics); + if unlikely!(!diagnostics.is_empty()) { + self.queries.on_disk_cache + .store_diagnostics_for_anon_node(dep_node_index, diagnostics); + } job.complete(&result, dep_node_index); - return Ok(result); + return result; } - if !dep_node.kind.is_input() { - if let Some(dep_node_index) = self.try_mark_green_and_read(&dep_node) { - profq_msg!(self, ProfileQueriesMsg::CacheHit); - self.sess.profiler(|p| p.record_query_hit(Q::CATEGORY)); - - return self.load_from_disk_and_cache_in_memory::(key, - job, - dep_node_index, - &dep_node) + if !dep_node.kind.is_eval_always() { + // The diagnostics for this query will be + // promoted to the current session during + // try_mark_green(), so we can ignore them here. + let loaded = self.start_query(job.job.clone(), None, |tcx| { + let marked = tcx.dep_graph.try_mark_green_and_read(tcx, &dep_node); + marked.map(|(prev_dep_node_index, dep_node_index)| { + (tcx.load_from_disk_and_cache_in_memory::( + key.clone(), + prev_dep_node_index, + dep_node_index, + &dep_node + ), dep_node_index) + }) + }); + if let Some((result, dep_node_index)) = loaded { + job.complete(&result, dep_node_index); + return result; } } - match self.force_query_with_job::(key, job, dep_node) { - Ok((result, dep_node_index)) => { - self.dep_graph.read_index(dep_node_index); - Ok(result) - } - Err(e) => Err(e) - } + let (result, dep_node_index) = self.force_query_with_job::(key, job, dep_node); + self.dep_graph.read_index(dep_node_index); + result } fn load_from_disk_and_cache_in_memory>( self, key: Q::Key, - job: JobOwner<'a, 'gcx, Q>, + prev_dep_node_index: SerializedDepNodeIndex, dep_node_index: DepNodeIndex, dep_node: &DepNode - ) -> Result> + ) -> Q::Value { // Note this function can be called concurrently from the same query // We must ensure that this is handled correctly @@ -444,12 +454,11 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { debug_assert!(self.dep_graph.is_green(dep_node)); // First we try to load the result from the on-disk cache - let result = if Q::cache_on_disk(key.clone()) && + let result = if Q::cache_on_disk(self.global_tcx(), key.clone()) && self.sess.opts.debugging_opts.incremental_queries { - let prev_dep_node_index = - self.dep_graph.prev_dep_node_index_of(dep_node); - let result = Q::try_load_from_disk(self.global_tcx(), - prev_dep_node_index); + self.sess.profiler(|p| p.incremental_load_result_start(Q::NAME)); + let result = Q::try_load_from_disk(self.global_tcx(), prev_dep_node_index); + self.sess.profiler(|p| p.incremental_load_result_end(Q::NAME)); // We always expect to find a cached result for things that // can be forced from DepNode. @@ -464,65 +473,73 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { }; let result = if let Some(result) = result { + profq_msg!(self, ProfileQueriesMsg::CacheHit); + self.sess.profiler(|p| p.record_query_hit(Q::NAME, Q::CATEGORY)); + result } else { // We could not load a result from the on-disk cache, so // recompute. - // The diagnostics for this query have already been - // promoted to the current session during - // try_mark_green(), so we can ignore them here. - let (result, _) = job.start(self, |tcx| { - // The dep-graph for this computation is already in - // place - tcx.dep_graph.with_ignore(|| { - Q::compute(tcx, key) - }) + self.sess.profiler(|p| p.start_query(Q::NAME, Q::CATEGORY)); + + // The dep-graph for this computation is already in + // place + let result = self.dep_graph.with_ignore(|| { + Q::compute(self, key) }); + + self.sess.profiler(|p| p.end_query(Q::NAME, Q::CATEGORY)); result }; // If -Zincremental-verify-ich is specified, re-hash results from // the cache and make sure that they have the expected fingerprint. - if self.sess.opts.debugging_opts.incremental_verify_ich { - use rustc_data_structures::stable_hasher::{StableHasher, HashStable}; - use ich::Fingerprint; - - assert!(Some(self.dep_graph.fingerprint_of(dep_node_index)) == - self.dep_graph.prev_fingerprint_of(dep_node), - "Fingerprint for green query instance not loaded \ - from cache: {:?}", dep_node); + if unlikely!(self.sess.opts.debugging_opts.incremental_verify_ich) { + self.incremental_verify_ich::(&result, dep_node, dep_node_index); + } - debug!("BEGIN verify_ich({:?})", dep_node); - let mut hcx = self.create_stable_hashing_context(); - let mut hasher = StableHasher::new(); + if unlikely!(self.sess.opts.debugging_opts.query_dep_graph) { + self.dep_graph.mark_loaded_from_cache(dep_node_index, true); + } - result.hash_stable(&mut hcx, &mut hasher); + result + } - let new_hash: Fingerprint = hasher.finish(); - debug!("END verify_ich({:?})", dep_node); + #[inline(never)] + #[cold] + fn incremental_verify_ich>( + self, + result: &Q::Value, + dep_node: &DepNode, + dep_node_index: DepNodeIndex, + ) { + use crate::ich::Fingerprint; - let old_hash = self.dep_graph.fingerprint_of(dep_node_index); + assert!(Some(self.dep_graph.fingerprint_of(dep_node_index)) == + self.dep_graph.prev_fingerprint_of(dep_node), + "Fingerprint for green query instance not loaded \ + from cache: {:?}", dep_node); - assert!(new_hash == old_hash, "Found unstable fingerprints \ - for {:?}", dep_node); - } + debug!("BEGIN verify_ich({:?})", dep_node); + let mut hcx = self.create_stable_hashing_context(); - if self.sess.opts.debugging_opts.query_dep_graph { - self.dep_graph.mark_loaded_from_cache(dep_node_index, true); - } + let new_hash = Q::hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO); + debug!("END verify_ich({:?})", dep_node); - job.complete(&result, dep_node_index); + let old_hash = self.dep_graph.fingerprint_of(dep_node_index); - Ok(result) + assert!(new_hash == old_hash, "Found unstable fingerprints \ + for {:?}", dep_node); } + #[inline(always)] fn force_query_with_job>( self, key: Q::Key, job: JobOwner<'_, 'gcx, Q>, dep_node: DepNode) - -> Result<(Q::Value, DepNodeIndex), CycleError<'gcx>> { + -> (Q::Value, DepNodeIndex) { // If the following assertion triggers, it can have two reasons: // 1. Something is wrong with DepNode creation, either here or // in DepGraph::try_mark_green() @@ -535,42 +552,43 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { key, dep_node); profq_msg!(self, ProfileQueriesMsg::ProviderBegin); - self.sess.profiler(|p| { - p.start_activity(Q::CATEGORY); - p.record_query(Q::CATEGORY); - }); - - let res = job.start(self, |tcx| { - if dep_node.kind.is_eval_always() { - tcx.dep_graph.with_eval_always_task(dep_node, - tcx, - key, - Q::compute) - } else { - tcx.dep_graph.with_task(dep_node, - tcx, - key, - Q::compute) - } + self.sess.profiler(|p| p.start_query(Q::NAME, Q::CATEGORY)); + + let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| { + self.start_query(job.job.clone(), diagnostics, |tcx| { + if dep_node.kind.is_eval_always() { + tcx.dep_graph.with_eval_always_task(dep_node, + tcx, + key, + Q::compute, + Q::hash_result) + } else { + tcx.dep_graph.with_task(dep_node, + tcx, + key, + Q::compute, + Q::hash_result) + } + }) }); - self.sess.profiler(|p| p.end_activity(Q::CATEGORY)); + self.sess.profiler(|p| p.end_query(Q::NAME, Q::CATEGORY)); profq_msg!(self, ProfileQueriesMsg::ProviderEnd); - let ((result, dep_node_index), diagnostics) = res; - - if self.sess.opts.debugging_opts.query_dep_graph { + if unlikely!(self.sess.opts.debugging_opts.query_dep_graph) { self.dep_graph.mark_loaded_from_cache(dep_node_index, false); } - if dep_node.kind != ::dep_graph::DepKind::Null { - self.queries.on_disk_cache - .store_diagnostics(dep_node_index, diagnostics); + if dep_node.kind != crate::dep_graph::DepKind::Null { + if unlikely!(!diagnostics.is_empty()) { + self.queries.on_disk_cache + .store_diagnostics(dep_node_index, diagnostics); + } } job.complete(&result, dep_node_index); - Ok((result, dep_node_index)) + (result, dep_node_index) } /// Ensure that either this query has all green inputs or been executed. @@ -583,10 +601,14 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { pub(super) fn ensure_query>(self, key: Q::Key) -> () { let dep_node = Q::to_dep_node(self, &key); - // Ensuring an "input" or anonymous query makes no sense + if dep_node.kind.is_eval_always() { + let _ = self.get_query::(DUMMY_SP, key); + return; + } + + // Ensuring an anonymous query makes no sense assert!(!dep_node.kind.is_anon()); - assert!(!dep_node.kind.is_input()); - if self.try_mark_green_and_read(&dep_node).is_none() { + if self.dep_graph.try_mark_green_and_read(self, &dep_node).is_none() { // A None return from `try_mark_green_and_read` means that this is either // a new dep node or that the dep node has already been marked red. // Either way, we can't call `dep_graph.read()` as we don't have the @@ -594,14 +616,10 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { // this introduces should be negligible as we'll immediately hit the // in-memory cache, or another query down the line will. - self.sess.profiler(|p| { - p.start_activity(Q::CATEGORY); - p.record_query(Q::CATEGORY); - }); - let _ = self.get_query::(DUMMY_SP, key); - - self.sess.profiler(|p| p.end_activity(Q::CATEGORY)); + } else { + profq_msg!(self, ProfileQueriesMsg::CacheHit); + self.sess.profiler(|p| p.record_query_hit(Q::NAME, Q::CATEGORY)); } } @@ -611,52 +629,56 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { key: Q::Key, span: Span, dep_node: DepNode - ) -> Result<(Q::Value, DepNodeIndex), CycleError<'gcx>> { + ) { + profq_msg!( + self, + ProfileQueriesMsg::QueryBegin(span.data(), profq_query_msg!(Q::NAME, self, key)) + ); + // We may be concurrently trying both execute and force a query // Ensure that only one of them runs the query let job = match JobOwner::try_get(self, span, &key) { TryGetJob::NotYetStarted(job) => job, - TryGetJob::JobCompleted(result) => return result, + TryGetJob::Cycle(_) | + TryGetJob::JobCompleted(_) => { + return + } }; - self.force_query_with_job::(key, job, dep_node) - } - - pub(super) fn try_get_query>( - self, - span: Span, - key: Q::Key, - ) -> Result> { - match self.try_get_with::(span, key) { - Ok(e) => Ok(e), - Err(e) => Err(self.report_cycle(e)), - } - } - - pub(super) fn get_query>( - self, - span: Span, - key: Q::Key, - ) -> Q::Value { - self.try_get_query::(span, key).unwrap_or_else(|mut e| { - e.emit(); - Q::handle_cycle_error(self) - }) + self.force_query_with_job::(key, job, dep_node); } } macro_rules! handle_cycle_error { - ([][$this: expr]) => {{ - Value::from_cycle_error($this.global_tcx()) + ([][$tcx: expr, $error:expr]) => {{ + $tcx.report_cycle($error).emit(); + Value::from_cycle_error($tcx.global_tcx()) + }}; + ([fatal_cycle$(, $modifiers:ident)*][$tcx:expr, $error:expr]) => {{ + $tcx.report_cycle($error).emit(); + $tcx.sess.abort_if_errors(); + unreachable!() }}; - ([fatal_cycle$(, $modifiers:ident)*][$this:expr]) => {{ - $this.sess.abort_if_errors(); - unreachable!(); + ([cycle_delay_bug$(, $modifiers:ident)*][$tcx:expr, $error:expr]) => {{ + $tcx.report_cycle($error).delay_as_bug(); + Value::from_cycle_error($tcx.global_tcx()) }}; ([$other:ident$(, $modifiers:ident)*][$($args:tt)*]) => { handle_cycle_error!([$($modifiers),*][$($args)*]) }; } +macro_rules! hash_result { + ([][$hcx:expr, $result:expr]) => {{ + dep_graph::hash_result($hcx, &$result) + }}; + ([no_hash$(, $modifiers:ident)*][$hcx:expr, $result:expr]) => {{ + None + }}; + ([$other:ident$(, $modifiers:ident)*][$($args:tt)*]) => { + hash_result!([$($modifiers),*][$($args)*]) + }; +} + macro_rules! define_queries { (<$tcx:tt> $($category:tt { $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)* @@ -673,16 +695,16 @@ macro_rules! define_queries_inner { [$($modifiers:tt)*] fn $name:ident: $node:ident($K:ty) -> $V:ty,)*) => { use std::mem; - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] use ty::query::job::QueryResult; use rustc_data_structures::sync::Lock; - use { + use crate::{ rustc_data_structures::stable_hasher::HashStable, rustc_data_structures::stable_hasher::StableHasherResult, rustc_data_structures::stable_hasher::StableHasher, ich::StableHashingContext }; - use util::profiling::ProfileCategory; + use crate::util::profiling::ProfileCategory; define_queries_struct! { tcx: $tcx, @@ -703,7 +725,19 @@ macro_rules! define_queries_inner { } } - #[cfg(parallel_queries)] + pub fn record_computed_queries(&self, sess: &Session) { + sess.profiler(|p| { + $( + p.record_computed_queries( + as QueryConfig<'_>>::NAME, + as QueryConfig<'_>>::CATEGORY, + self.$name.lock().results.len() + ); + )* + }); + } + + #[cfg(parallel_compiler)] pub fn collect_active_jobs(&self) -> Vec>> { let mut jobs = Vec::new(); @@ -723,6 +757,101 @@ macro_rules! define_queries_inner { jobs } + + pub fn print_stats(&self) { + let mut queries = Vec::new(); + + #[derive(Clone)] + struct QueryStats { + name: &'static str, + cache_hits: usize, + key_size: usize, + key_type: &'static str, + value_size: usize, + value_type: &'static str, + entry_count: usize, + } + + fn stats<'tcx, Q: QueryConfig<'tcx>>( + name: &'static str, + map: &QueryCache<'tcx, Q> + ) -> QueryStats { + QueryStats { + name, + #[cfg(debug_assertions)] + cache_hits: map.cache_hits, + #[cfg(not(debug_assertions))] + cache_hits: 0, + key_size: mem::size_of::(), + key_type: unsafe { type_name::() }, + value_size: mem::size_of::(), + value_type: unsafe { type_name::() }, + entry_count: map.results.len(), + } + } + + $( + queries.push(stats::>( + stringify!($name), + &*self.$name.lock() + )); + )* + + if cfg!(debug_assertions) { + let hits: usize = queries.iter().map(|s| s.cache_hits).sum(); + let results: usize = queries.iter().map(|s| s.entry_count).sum(); + println!("\nQuery cache hit rate: {}", hits as f64 / (hits + results) as f64); + } + + let mut query_key_sizes = queries.clone(); + query_key_sizes.sort_by_key(|q| q.key_size); + println!("\nLarge query keys:"); + for q in query_key_sizes.iter().rev() + .filter(|q| q.key_size > 8) { + println!( + " {} - {} x {} - {}", + q.name, + q.key_size, + q.entry_count, + q.key_type + ); + } + + let mut query_value_sizes = queries.clone(); + query_value_sizes.sort_by_key(|q| q.value_size); + println!("\nLarge query values:"); + for q in query_value_sizes.iter().rev() + .filter(|q| q.value_size > 8) { + println!( + " {} - {} x {} - {}", + q.name, + q.value_size, + q.entry_count, + q.value_type + ); + } + + if cfg!(debug_assertions) { + let mut query_cache_hits = queries.clone(); + query_cache_hits.sort_by_key(|q| q.cache_hits); + println!("\nQuery cache hits:"); + for q in query_cache_hits.iter().rev() { + println!( + " {} - {} ({}%)", + q.name, + q.cache_hits, + q.cache_hits as f64 / (q.cache_hits + q.entry_count) as f64 + ); + } + } + + let mut query_value_count = queries.clone(); + query_value_count.sort_by_key(|q| q.entry_count); + println!("\nQuery value count:"); + for q in query_value_count.iter().rev() { + println!(" {} - {}", q.name, q.entry_count); + } + } } #[allow(nonstandard_style)] @@ -806,17 +935,20 @@ macro_rules! define_queries_inner { } impl<$tcx> QueryAccessors<$tcx> for queries::$name<$tcx> { + #[inline(always)] fn query(key: Self::Key) -> Query<'tcx> { Query::$name(key) } + #[inline(always)] fn query_cache<'a>(tcx: TyCtxt<'a, $tcx, '_>) -> &'a Lock> { &tcx.queries.$name } #[allow(unused)] + #[inline(always)] fn to_dep_node(tcx: TyCtxt<'_, $tcx, '_>, key: &Self::Key) -> DepNode { - use dep_graph::DepConstructor::*; + use crate::dep_graph::DepConstructor::*; DepNode::new(tcx, $node(*key)) } @@ -828,31 +960,41 @@ macro_rules! define_queries_inner { // HACK(eddyb) it's possible crates may be loaded after // the query engine is created, and because crate loading // is not yet integrated with the query engine, such crates - // would be be missing appropriate entries in `providers`. + // would be missing appropriate entries in `providers`. .unwrap_or(&tcx.queries.fallback_extern_providers) .$name; provider(tcx.global_tcx(), key) }) } - fn handle_cycle_error(tcx: TyCtxt<'_, 'tcx, '_>) -> Self::Value { - handle_cycle_error!([$($modifiers)*][tcx]) + fn hash_result( + _hcx: &mut StableHashingContext<'_>, + _result: &Self::Value + ) -> Option { + hash_result!([$($modifiers)*][_hcx, _result]) } - } - impl<'a, $tcx, 'lcx> queries::$name<$tcx> { - /// Ensure that either this query has all green inputs or been executed. - /// Executing query::ensure(D) is considered a read of the dep-node D. - /// - /// This function is particularly useful when executing passes for their - /// side-effects -- e.g., in order to report errors for erroneous programs. - /// - /// Note: The optimization is only available during incr. comp. - pub fn ensure(tcx: TyCtxt<'a, $tcx, 'lcx>, key: $K) -> () { - tcx.ensure_query::>(key); + fn handle_cycle_error( + tcx: TyCtxt<'_, 'tcx, '_>, + error: CycleError<'tcx> + ) -> Self::Value { + handle_cycle_error!([$($modifiers)*][tcx, error]) } })* + #[derive(Copy, Clone)] + pub struct TyCtxtEnsure<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { + pub tcx: TyCtxt<'a, 'gcx, 'tcx>, + } + + impl<'a, $tcx, 'lcx> TyCtxtEnsure<'a, $tcx, 'lcx> { + $($(#[$attr])* + #[inline(always)] + pub fn $name(self, key: $K) { + self.tcx.ensure_query::>(key) + })* + } + #[derive(Copy, Clone)] pub struct TyCtxtAt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { pub tcx: TyCtxt<'a, 'gcx, 'tcx>, @@ -861,14 +1003,25 @@ macro_rules! define_queries_inner { impl<'a, 'gcx, 'tcx> Deref for TyCtxtAt<'a, 'gcx, 'tcx> { type Target = TyCtxt<'a, 'gcx, 'tcx>; + #[inline(always)] fn deref(&self) -> &Self::Target { &self.tcx } } impl<'a, $tcx, 'lcx> TyCtxt<'a, $tcx, 'lcx> { - /// Return a transparent wrapper for `TyCtxt` which uses + /// Returns a transparent wrapper for `TyCtxt`, which ensures queries + /// are executed instead of just returing their results. + #[inline(always)] + pub fn ensure(self) -> TyCtxtEnsure<'a, $tcx, 'lcx> { + TyCtxtEnsure { + tcx: self, + } + } + + /// Returns a transparent wrapper for `TyCtxt` which uses /// `span` as the location of queries performed through it. + #[inline(always)] pub fn at(self, span: Span) -> TyCtxtAt<'a, $tcx, 'lcx> { TyCtxtAt { tcx: self, @@ -877,6 +1030,7 @@ macro_rules! define_queries_inner { } $($(#[$attr])* + #[inline(always)] pub fn $name(self, key: $K) -> $V { self.at(DUMMY_SP).$name(key) })* @@ -884,6 +1038,7 @@ macro_rules! define_queries_inner { impl<'a, $tcx, 'lcx> TyCtxtAt<'a, $tcx, 'lcx> { $($(#[$attr])* + #[inline(always)] pub fn $name(self, key: $K) -> $V { self.tcx.get_query::>(self.span, key) })* @@ -904,8 +1059,8 @@ macro_rules! define_queries_inner { macro_rules! define_queries_struct { (tcx: $tcx:tt, input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => { - pub(crate) struct Queries<$tcx> { - /// This provides access to the incr. comp. on-disk cache for query results. + pub struct Queries<$tcx> { + /// This provides access to the incrimental comilation on-disk cache for query results. /// Do not access this directly. It is only meant to be used by /// `DepGraph::try_mark_green()` and the query infrastructure. pub(crate) on_disk_cache: OnDiskCache<'tcx>, @@ -961,33 +1116,34 @@ macro_rules! define_provider_struct { /// /// Now, if force_from_dep_node() would always fail, it would be pretty useless. /// Fortunately, we can use some contextual information that will allow us to -/// reconstruct query-keys for certain kinds of DepNodes. In particular, we -/// enforce by construction that the GUID/fingerprint of certain DepNodes is a -/// valid DefPathHash. Since we also always build a huge table that maps every -/// DefPathHash in the current codebase to the corresponding DefId, we have +/// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we +/// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a +/// valid `DefPathHash`. Since we also always build a huge table that maps every +/// `DefPathHash` in the current codebase to the corresponding `DefId`, we have /// everything we need to re-run the query. /// /// Take the `mir_validated` query as an example. Like many other queries, it -/// just has a single parameter: the DefId of the item it will compute the -/// validated MIR for. Now, when we call `force_from_dep_node()` on a dep-node -/// with kind `MirValidated`, we know that the GUID/fingerprint of the dep-node -/// is actually a DefPathHash, and can therefore just look up the corresponding -/// DefId in `tcx.def_path_hash_to_def_id`. +/// just has a single parameter: the `DefId` of the item it will compute the +/// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode` +/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode` +/// is actually a `DefPathHash`, and can therefore just look up the corresponding +/// `DefId` in `tcx.def_path_hash_to_def_id`. /// /// When you implement a new query, it will likely have a corresponding new -/// DepKind, and you'll have to support it here in `force_from_dep_node()`. As -/// a rule of thumb, if your query takes a DefId or DefIndex as sole parameter, +/// `DepKind`, and you'll have to support it here in `force_from_dep_node()`. As +/// a rule of thumb, if your query takes a `DefId` or `DefIndex` as sole parameter, /// then `force_from_dep_node()` should not fail for it. Otherwise, you can just /// add it to the "We don't have enough information to reconstruct..." group in /// the match below. -pub fn force_from_dep_node<'a, 'gcx, 'lcx>(tcx: TyCtxt<'a, 'gcx, 'lcx>, - dep_node: &DepNode) - -> bool { - use hir::def_id::LOCAL_CRATE; +pub fn force_from_dep_node<'tcx>( + tcx: TyCtxt<'_, 'tcx, 'tcx>, + dep_node: &DepNode +) -> bool { + use crate::dep_graph::RecoverKey; // We must avoid ever having to call force_from_dep_node() for a - // DepNode::CodegenUnit: - // Since we cannot reconstruct the query key of a DepNode::CodegenUnit, we + // DepNode::codegen_unit: + // Since we cannot reconstruct the query key of a DepNode::codegen_unit, we // would always end up having to evaluate the first caller of the // `codegen_unit` query that *is* reconstructible. This might very well be // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just @@ -998,8 +1154,8 @@ pub fn force_from_dep_node<'a, 'gcx, 'lcx>(tcx: TyCtxt<'a, 'gcx, 'lcx>, // each CGU, right after partitioning. This way `try_mark_green` will always // hit the cache instead of having to go through `force_from_dep_node`. // This assertion makes sure, we actually keep applying the solution above. - debug_assert!(dep_node.kind != DepKind::CodegenUnit, - "calling force_from_dep_node() on DepKind::CodegenUnit"); + debug_assert!(dep_node.kind != DepKind::codegen_unit, + "calling force_from_dep_node() on DepKind::codegen_unit"); if !dep_node.kind.can_reconstruct_query_key() { return false @@ -1020,30 +1176,23 @@ pub fn force_from_dep_node<'a, 'gcx, 'lcx>(tcx: TyCtxt<'a, 'gcx, 'lcx>, () => { (def_id!()).krate } }; - macro_rules! force { - ($query:ident, $key:expr) => { + macro_rules! force_ex { + ($tcx:expr, $query:ident, $key:expr) => { { - use $crate::util::common::{ProfileQueriesMsg, profq_msg}; - - profq_msg!(tcx, - ProfileQueriesMsg::QueryBegin( - DUMMY_SP.data(), - profq_query_msg!(::ty::query::queries::$query::NAME, tcx, $key), - ) + $tcx.force_query::>( + $key, + DUMMY_SP, + *dep_node ); - - if let Err(e) = tcx.force_query::<::ty::query::queries::$query<'_>>( - $key, DUMMY_SP, *dep_node - ) { - tcx.report_cycle(e).emit(); - } } } }; - // FIXME(#45015): We should try move this boilerplate code into a macro - // somehow. - match dep_node.kind { + macro_rules! force { + ($query:ident, $key:expr) => { force_ex!(tcx, $query, $key) } + }; + + rustc_dep_node_force!([dep_node, tcx] // These are inputs that are expected to be pre-allocated and that // should therefore always be red or green already DepKind::AllLocalTraitImpls | @@ -1057,225 +1206,12 @@ pub fn force_from_dep_node<'a, 'gcx, 'lcx>(tcx: TyCtxt<'a, 'gcx, 'lcx>, // We don't have enough information to reconstruct the query key of // these - DepKind::IsCopy | - DepKind::IsSized | - DepKind::IsFreeze | - DepKind::NeedsDrop | - DepKind::Layout | - DepKind::ConstEval | - DepKind::ConstEvalRaw | - DepKind::InstanceSymbolName | - DepKind::MirShim | - DepKind::BorrowCheckKrate | - DepKind::Specializes | - DepKind::ImplementationsOfTrait | - DepKind::TypeParamPredicates | - DepKind::CodegenUnit | - DepKind::CompileCodegenUnit | - DepKind::FulfillObligation | - DepKind::VtableMethods | - DepKind::EraseRegionsTy | - DepKind::NormalizeProjectionTy | - DepKind::NormalizeTyAfterErasingRegions | - DepKind::ImpliedOutlivesBounds | - DepKind::DropckOutlives | - DepKind::EvaluateObligation | - DepKind::TypeOpAscribeUserType | - DepKind::TypeOpEq | - DepKind::TypeOpSubtype | - DepKind::TypeOpProvePredicate | - DepKind::TypeOpNormalizeTy | - DepKind::TypeOpNormalizePredicate | - DepKind::TypeOpNormalizePolyFnSig | - DepKind::TypeOpNormalizeFnSig | - DepKind::SubstituteNormalizeAndTestPredicates | - DepKind::InstanceDefSizeEstimate | - DepKind::ProgramClausesForEnv | - - // This one should never occur in this context - DepKind::Null => { + DepKind::CompileCodegenUnit => { bug!("force_from_dep_node() - Encountered {:?}", dep_node) } - // These are not queries - DepKind::CoherenceCheckTrait | - DepKind::ItemVarianceConstraints => { - return false - } - - DepKind::RegionScopeTree => { force!(region_scope_tree, def_id!()); } - - DepKind::Coherence => { force!(crate_inherent_impls, LOCAL_CRATE); } - DepKind::CoherenceInherentImplOverlapCheck => { - force!(crate_inherent_impls_overlap_check, LOCAL_CRATE) - }, - DepKind::PrivacyAccessLevels => { force!(privacy_access_levels, LOCAL_CRATE); } - DepKind::MirBuilt => { force!(mir_built, def_id!()); } - DepKind::MirConstQualif => { force!(mir_const_qualif, def_id!()); } - DepKind::MirConst => { force!(mir_const, def_id!()); } - DepKind::MirValidated => { force!(mir_validated, def_id!()); } - DepKind::MirOptimized => { force!(optimized_mir, def_id!()); } - - DepKind::BorrowCheck => { force!(borrowck, def_id!()); } - DepKind::MirBorrowCheck => { force!(mir_borrowck, def_id!()); } - DepKind::UnsafetyCheckResult => { force!(unsafety_check_result, def_id!()); } - DepKind::UnsafeDeriveOnReprPacked => { force!(unsafe_derive_on_repr_packed, def_id!()); } - DepKind::Reachability => { force!(reachable_set, LOCAL_CRATE); } - DepKind::MirKeys => { force!(mir_keys, LOCAL_CRATE); } - DepKind::CrateVariances => { force!(crate_variances, LOCAL_CRATE); } - DepKind::AssociatedItems => { force!(associated_item, def_id!()); } - DepKind::TypeOfItem => { force!(type_of, def_id!()); } - DepKind::GenericsOfItem => { force!(generics_of, def_id!()); } - DepKind::PredicatesOfItem => { force!(predicates_of, def_id!()); } - DepKind::PredicatesDefinedOnItem => { force!(predicates_defined_on, def_id!()); } - DepKind::ExplicitPredicatesOfItem => { force!(explicit_predicates_of, def_id!()); } - DepKind::InferredOutlivesOf => { force!(inferred_outlives_of, def_id!()); } - DepKind::InferredOutlivesCrate => { force!(inferred_outlives_crate, LOCAL_CRATE); } - DepKind::SuperPredicatesOfItem => { force!(super_predicates_of, def_id!()); } - DepKind::TraitDefOfItem => { force!(trait_def, def_id!()); } - DepKind::AdtDefOfItem => { force!(adt_def, def_id!()); } - DepKind::ImplTraitRef => { force!(impl_trait_ref, def_id!()); } - DepKind::ImplPolarity => { force!(impl_polarity, def_id!()); } - DepKind::FnSignature => { force!(fn_sig, def_id!()); } - DepKind::CoerceUnsizedInfo => { force!(coerce_unsized_info, def_id!()); } - DepKind::ItemVariances => { force!(variances_of, def_id!()); } - DepKind::IsConstFn => { force!(is_const_fn_raw, def_id!()); } - DepKind::IsPromotableConstFn => { force!(is_promotable_const_fn, def_id!()); } - DepKind::IsForeignItem => { force!(is_foreign_item, def_id!()); } - DepKind::SizedConstraint => { force!(adt_sized_constraint, def_id!()); } - DepKind::DtorckConstraint => { force!(adt_dtorck_constraint, def_id!()); } - DepKind::AdtDestructor => { force!(adt_destructor, def_id!()); } - DepKind::AssociatedItemDefIds => { force!(associated_item_def_ids, def_id!()); } - DepKind::InherentImpls => { force!(inherent_impls, def_id!()); } - DepKind::TypeckBodiesKrate => { force!(typeck_item_bodies, LOCAL_CRATE); } - DepKind::TypeckTables => { force!(typeck_tables_of, def_id!()); } - DepKind::UsedTraitImports => { force!(used_trait_imports, def_id!()); } - DepKind::HasTypeckTables => { force!(has_typeck_tables, def_id!()); } - DepKind::SymbolName => { force!(def_symbol_name, def_id!()); } - DepKind::SpecializationGraph => { force!(specialization_graph_of, def_id!()); } - DepKind::ObjectSafety => { force!(is_object_safe, def_id!()); } - DepKind::TraitImpls => { force!(trait_impls_of, def_id!()); } - DepKind::CheckMatch => { force!(check_match, def_id!()); } - - DepKind::ParamEnv => { force!(param_env, def_id!()); } - DepKind::Environment => { force!(environment, def_id!()); } - DepKind::DescribeDef => { force!(describe_def, def_id!()); } - DepKind::DefSpan => { force!(def_span, def_id!()); } - DepKind::LookupStability => { force!(lookup_stability, def_id!()); } - DepKind::LookupDeprecationEntry => { - force!(lookup_deprecation_entry, def_id!()); - } - DepKind::ConstIsRvaluePromotableToStatic => { - force!(const_is_rvalue_promotable_to_static, def_id!()); - } - DepKind::RvaluePromotableMap => { force!(rvalue_promotable_map, def_id!()); } - DepKind::ImplParent => { force!(impl_parent, def_id!()); } - DepKind::TraitOfItem => { force!(trait_of_item, def_id!()); } - DepKind::IsReachableNonGeneric => { force!(is_reachable_non_generic, def_id!()); } - DepKind::IsUnreachableLocalDefinition => { - force!(is_unreachable_local_definition, def_id!()); - } - DepKind::IsMirAvailable => { force!(is_mir_available, def_id!()); } - DepKind::ItemAttrs => { force!(item_attrs, def_id!()); } - DepKind::CodegenFnAttrs => { force!(codegen_fn_attrs, def_id!()); } - DepKind::FnArgNames => { force!(fn_arg_names, def_id!()); } - DepKind::RenderedConst => { force!(rendered_const, def_id!()); } - DepKind::DylibDepFormats => { force!(dylib_dependency_formats, krate!()); } - DepKind::IsPanicRuntime => { force!(is_panic_runtime, krate!()); } - DepKind::IsCompilerBuiltins => { force!(is_compiler_builtins, krate!()); } - DepKind::HasGlobalAllocator => { force!(has_global_allocator, krate!()); } - DepKind::HasPanicHandler => { force!(has_panic_handler, krate!()); } - DepKind::ExternCrate => { force!(extern_crate, def_id!()); } - DepKind::LintLevels => { force!(lint_levels, LOCAL_CRATE); } - DepKind::InScopeTraits => { force!(in_scope_traits_map, def_id!().index); } - DepKind::ModuleExports => { force!(module_exports, def_id!()); } - DepKind::IsSanitizerRuntime => { force!(is_sanitizer_runtime, krate!()); } - DepKind::IsProfilerRuntime => { force!(is_profiler_runtime, krate!()); } - DepKind::GetPanicStrategy => { force!(panic_strategy, krate!()); } - DepKind::IsNoBuiltins => { force!(is_no_builtins, krate!()); } - DepKind::ImplDefaultness => { force!(impl_defaultness, def_id!()); } - DepKind::CheckItemWellFormed => { force!(check_item_well_formed, def_id!()); } - DepKind::CheckTraitItemWellFormed => { force!(check_trait_item_well_formed, def_id!()); } - DepKind::CheckImplItemWellFormed => { force!(check_impl_item_well_formed, def_id!()); } - DepKind::ReachableNonGenerics => { force!(reachable_non_generics, krate!()); } - DepKind::NativeLibraries => { force!(native_libraries, krate!()); } - DepKind::PluginRegistrarFn => { force!(plugin_registrar_fn, krate!()); } - DepKind::ProcMacroDeclsStatic => { force!(proc_macro_decls_static, krate!()); } - DepKind::CrateDisambiguator => { force!(crate_disambiguator, krate!()); } - DepKind::CrateHash => { force!(crate_hash, krate!()); } - DepKind::OriginalCrateName => { force!(original_crate_name, krate!()); } - DepKind::ExtraFileName => { force!(extra_filename, krate!()); } - - DepKind::AllTraitImplementations => { - force!(all_trait_implementations, krate!()); - } - - DepKind::DllimportForeignItems => { - force!(dllimport_foreign_items, krate!()); - } - DepKind::IsDllimportForeignItem => { - force!(is_dllimport_foreign_item, def_id!()); - } - DepKind::IsStaticallyIncludedForeignItem => { - force!(is_statically_included_foreign_item, def_id!()); - } - DepKind::NativeLibraryKind => { force!(native_library_kind, def_id!()); } - DepKind::LinkArgs => { force!(link_args, LOCAL_CRATE); } - - DepKind::ResolveLifetimes => { force!(resolve_lifetimes, krate!()); } - DepKind::NamedRegion => { force!(named_region_map, def_id!().index); } - DepKind::IsLateBound => { force!(is_late_bound_map, def_id!().index); } - DepKind::ObjectLifetimeDefaults => { - force!(object_lifetime_defaults_map, def_id!().index); - } - - DepKind::Visibility => { force!(visibility, def_id!()); } - DepKind::DepKind => { force!(dep_kind, krate!()); } - DepKind::CrateName => { force!(crate_name, krate!()); } - DepKind::ItemChildren => { force!(item_children, def_id!()); } - DepKind::ExternModStmtCnum => { force!(extern_mod_stmt_cnum, def_id!()); } - DepKind::GetLibFeatures => { force!(get_lib_features, LOCAL_CRATE); } - DepKind::DefinedLibFeatures => { force!(defined_lib_features, krate!()); } - DepKind::GetLangItems => { force!(get_lang_items, LOCAL_CRATE); } - DepKind::DefinedLangItems => { force!(defined_lang_items, krate!()); } - DepKind::MissingLangItems => { force!(missing_lang_items, krate!()); } - DepKind::VisibleParentMap => { force!(visible_parent_map, LOCAL_CRATE); } - DepKind::MissingExternCrateItem => { - force!(missing_extern_crate_item, krate!()); - } - DepKind::UsedCrateSource => { force!(used_crate_source, krate!()); } - DepKind::PostorderCnums => { force!(postorder_cnums, LOCAL_CRATE); } - - DepKind::Freevars => { force!(freevars, def_id!()); } - DepKind::MaybeUnusedTraitImport => { - force!(maybe_unused_trait_import, def_id!()); - } - DepKind::MaybeUnusedExternCrates => { force!(maybe_unused_extern_crates, LOCAL_CRATE); } - DepKind::StabilityIndex => { force!(stability_index, LOCAL_CRATE); } - DepKind::AllTraits => { force!(all_traits, LOCAL_CRATE); } - DepKind::AllCrateNums => { force!(all_crate_nums, LOCAL_CRATE); } - DepKind::ExportedSymbols => { force!(exported_symbols, krate!()); } - DepKind::CollectAndPartitionMonoItems => { - force!(collect_and_partition_mono_items, LOCAL_CRATE); - } - DepKind::IsCodegenedItem => { force!(is_codegened_item, def_id!()); } - DepKind::OutputFilenames => { force!(output_filenames, LOCAL_CRATE); } - - DepKind::TargetFeaturesWhitelist => { force!(target_features_whitelist, LOCAL_CRATE); } - - DepKind::Features => { force!(features_query, LOCAL_CRATE); } - - DepKind::ProgramClausesFor => { force!(program_clauses_for, def_id!()); } - DepKind::WasmImportModuleMap => { force!(wasm_import_module_map, krate!()); } - DepKind::ForeignModules => { force!(foreign_modules, krate!()); } - - DepKind::UpstreamMonomorphizations => { - force!(upstream_monomorphizations, krate!()); - } - DepKind::UpstreamMonomorphizationsFor => { - force!(upstream_monomorphizations_for, def_id!()); - } - } + DepKind::Analysis => { force!(analysis, krate!()); } + ); true } @@ -1289,13 +1225,13 @@ macro_rules! impl_load_from_cache { // Check whether the query invocation corresponding to the given // DepNode is eligible for on-disk-caching. pub fn cache_on_disk(&self, tcx: TyCtxt<'_, '_, '_>) -> bool { - use ty::query::queries; - use ty::query::QueryDescription; + use crate::ty::query::queries; + use crate::ty::query::QueryDescription; match self.kind { $(DepKind::$dep_kind => { let def_id = self.extract_def_id(tcx).unwrap(); - queries::$query_name::cache_on_disk(def_id) + queries::$query_name::cache_on_disk(tcx.global_tcx(), def_id) })* _ => false } @@ -1327,19 +1263,18 @@ macro_rules! impl_load_from_cache { } impl_load_from_cache!( - TypeckTables => typeck_tables_of, - MirOptimized => optimized_mir, - UnsafetyCheckResult => unsafety_check_result, - BorrowCheck => borrowck, - MirBorrowCheck => mir_borrowck, - MirConstQualif => mir_const_qualif, - SymbolName => def_symbol_name, - ConstIsRvaluePromotableToStatic => const_is_rvalue_promotable_to_static, - CheckMatch => check_match, - TypeOfItem => type_of, - GenericsOfItem => generics_of, - PredicatesOfItem => predicates_of, - UsedTraitImports => used_trait_imports, - CodegenFnAttrs => codegen_fn_attrs, - SpecializationGraph => specialization_graph_of, + typeck_tables_of => typeck_tables_of, + optimized_mir => optimized_mir, + unsafety_check_result => unsafety_check_result, + borrowck => borrowck, + mir_borrowck => mir_borrowck, + mir_const_qualif => mir_const_qualif, + const_is_rvalue_promotable_to_static => const_is_rvalue_promotable_to_static, + check_match => check_match, + type_of => type_of, + generics_of => generics_of, + predicates_of => predicates_of, + used_trait_imports => used_trait_imports, + codegen_fn_attrs => codegen_fn_attrs, + specialization_graph_of => specialization_graph_of, ); diff --git a/src/librustc/ty/query/values.rs b/src/librustc/ty/query/values.rs index d3d0624879240..a4b8d365a12ef 100644 --- a/src/librustc/ty/query/values.rs +++ b/src/librustc/ty/query/values.rs @@ -1,14 +1,5 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use ty::{self, Ty, TyCtxt}; +use crate::ty::{self, Ty, TyCtxt, AdtSizedConstraint}; +use crate::ty::util::NeedsDrop; use syntax::symbol::Symbol; @@ -41,3 +32,14 @@ impl<'tcx> Value<'tcx> for ty::SymbolName { } } +impl<'tcx> Value<'tcx> for NeedsDrop { + fn from_cycle_error(_: TyCtxt<'_, 'tcx, 'tcx>) -> Self { + NeedsDrop(false) + } +} + +impl<'tcx> Value<'tcx> for AdtSizedConstraint<'tcx> { + fn from_cycle_error(tcx: TyCtxt<'_, 'tcx, 'tcx>) -> Self { + AdtSizedConstraint(tcx.intern_type_list(&[tcx.types.err])) + } +} diff --git a/src/librustc/ty/relate.rs b/src/librustc/ty/relate.rs index 88c3e5c871566..810bd10c8f4f7 100644 --- a/src/librustc/ty/relate.rs +++ b/src/librustc/ty/relate.rs @@ -1,32 +1,21 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Generalized type relating mechanism. //! //! A type relation `R` relates a pair of values `(A, B)`. `A and B` are usually //! types or regions but can be other things. Examples of type relations are //! subtyping, type equality, etc. -use hir::def_id::DefId; -use mir::interpret::ConstValue; -use ty::subst::{Kind, UnpackedKind, Substs}; -use ty::{self, Ty, TyCtxt, TypeFoldable}; -use ty::error::{ExpectedFound, TypeError}; -use mir::interpret::GlobalId; -use util::common::ErrorReported; +use crate::hir::def_id::DefId; +use crate::ty::subst::{Kind, UnpackedKind, SubstsRef}; +use crate::ty::{self, Ty, TyCtxt, TypeFoldable}; +use crate::ty::error::{ExpectedFound, TypeError}; +use crate::mir::interpret::{GlobalId, ConstValue}; +use crate::util::common::ErrorReported; use syntax_pos::DUMMY_SP; use std::rc::Rc; use std::iter; use rustc_target::spec::abi; -use hir as ast; -use traits; +use crate::hir as ast; +use crate::traits; pub type RelateResult<'tcx, T> = Result>; @@ -35,24 +24,13 @@ pub enum Cause { ExistentialRegionBound, // relating an existential region bound } -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum TraitObjectMode { - NoSquash, - /// A temporary mode to treat `Send + Sync = Sync + Send`, should be - /// used only in coherence. - SquashAutoTraitsIssue33140 -} - pub trait TypeRelation<'a, 'gcx: 'a+'tcx, 'tcx: 'a> : Sized { fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx>; - /// Return the trait object mode to be used. - fn trait_object_mode(&self) -> TraitObjectMode; - /// Returns a static string we can use for printouts. fn tag(&self) -> &'static str; - /// Returns true if the value `a` is the "expected" type in the + /// Returns `true` if the value `a` is the "expected" type in the /// relation. Just affects error messages. fn a_is_expected(&self) -> bool; @@ -72,9 +50,9 @@ pub trait TypeRelation<'a, 'gcx: 'a+'tcx, 'tcx: 'a> : Sized { /// accordingly. fn relate_item_substs(&mut self, item_def_id: DefId, - a_subst: &'tcx Substs<'tcx>, - b_subst: &'tcx Substs<'tcx>) - -> RelateResult<'tcx, &'tcx Substs<'tcx>> + a_subst: SubstsRef<'tcx>, + b_subst: SubstsRef<'tcx>) + -> RelateResult<'tcx, SubstsRef<'tcx>> { debug!("relate_item_substs(item_def_id={:?}, a_subst={:?}, b_subst={:?})", item_def_id, @@ -145,9 +123,9 @@ impl<'tcx> Relate<'tcx> for ty::TypeAndMut<'tcx> { pub fn relate_substs<'a, 'gcx, 'tcx, R>(relation: &mut R, variances: Option<&Vec>, - a_subst: &'tcx Substs<'tcx>, - b_subst: &'tcx Substs<'tcx>) - -> RelateResult<'tcx, &'tcx Substs<'tcx>> + a_subst: SubstsRef<'tcx>, + b_subst: SubstsRef<'tcx>) + -> RelateResult<'tcx, SubstsRef<'tcx>> where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a { let tcx = relation.tcx(); @@ -169,9 +147,9 @@ impl<'tcx> Relate<'tcx> for ty::FnSig<'tcx> { { let tcx = relation.tcx(); - if a.variadic != b.variadic { + if a.c_variadic != b.c_variadic { return Err(TypeError::VariadicMismatch( - expected_found(relation, &a.variadic, &b.variadic))); + expected_found(relation, &a.c_variadic, &b.c_variadic))); } let unsafety = relation.relate(&a.unsafety, &b.unsafety)?; let abi = relation.relate(&a.abi, &b.abi)?; @@ -193,7 +171,7 @@ impl<'tcx> Relate<'tcx> for ty::FnSig<'tcx> { }); Ok(ty::FnSig { inputs_and_output: tcx.mk_type_list(inputs_and_output)?, - variadic: a.variadic, + c_variadic: a.c_variadic, unsafety, abi, }) @@ -373,10 +351,8 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a { let tcx = relation.tcx(); - let a_sty = &a.sty; - let b_sty = &b.sty; - debug!("super_relate_tys: a_sty={:?} b_sty={:?}", a_sty, b_sty); - match (a_sty, b_sty) { + debug!("super_relate_tys: a={:?} b={:?}", a, b); + match (&a.sty, &b.sty) { (&ty::Infer(_), _) | (_, &ty::Infer(_)) => { @@ -490,12 +466,7 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, (&ty::Array(a_t, sz_a), &ty::Array(b_t, sz_b)) => { let t = relation.relate(&a_t, &b_t)?; - assert_eq!(sz_a.ty, tcx.types.usize); - assert_eq!(sz_b.ty, tcx.types.usize); - let to_u64 = |x: &'tcx ty::Const<'tcx>| -> Result { - if let Some(s) = x.assert_usize(tcx) { - return Ok(s); - } + let to_u64 = |x: ty::Const<'tcx>| -> Result { match x.val { ConstValue::Unevaluated(def_id, substs) => { // FIXME(eddyb) get the right param_env. @@ -523,14 +494,14 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R, "array length could not be evaluated"); Err(ErrorReported) } - _ => { + _ => x.assert_usize(tcx).ok_or_else(|| { tcx.sess.delay_span_bug(DUMMY_SP, - &format!("arrays should not have {:?} as length", x)); - Err(ErrorReported) - } + "array length could not be evaluated"); + ErrorReported + }) } }; - match (to_u64(sz_a), to_u64(sz_b)) { + match (to_u64(*sz_a), to_u64(*sz_b)) { (Ok(sz_a_u64), Ok(sz_b_u64)) => { if sz_a_u64 == sz_b_u64 { Ok(tcx.mk_ty(ty::Array(t, sz_a))) @@ -607,45 +578,15 @@ impl<'tcx> Relate<'tcx> for &'tcx ty::List> { a: &Self, b: &Self) -> RelateResult<'tcx, Self> - where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a { - use ty::ExistentialPredicate::*; + where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a { - let tcx = relation.tcx(); - let (a_buf, b_buf); - let (a_norm, b_norm): (&[_], &[_]) = match relation.trait_object_mode() { - TraitObjectMode::NoSquash => { - (a, b) - } - TraitObjectMode::SquashAutoTraitsIssue33140 => { - // Treat auto-trait "principal" components as equal - // to the non-principal components, to make - // `dyn Send+Sync = dyn Sync+Send`. - let normalize = |d: &[ty::ExistentialPredicate<'tcx>]| { - let mut result: Vec<_> = d.iter().map(|pi| match pi { - Trait(ref a) if tcx.trait_is_auto(a.def_id) => { - AutoTrait(a.def_id) - }, - other => *other - }).collect(); - - result.sort_by(|a, b| a.stable_cmp(tcx, b)); - result.dedup(); - result - }; - - a_buf = normalize(a); - b_buf = normalize(b); - - (&a_buf, &b_buf) - } - }; - - if a_norm.len() != b_norm.len() { + if a.len() != b.len() { return Err(TypeError::ExistentialMismatch(expected_found(relation, a, b))); } - let v = a_norm.iter().zip(b_norm.iter()).map(|(ep_a, ep_b)| { - use ty::ExistentialPredicate::*; + let tcx = relation.tcx(); + let v = a.iter().zip(b.iter()).map(|(ep_a, ep_b)| { + use crate::ty::ExistentialPredicate::*; match (*ep_a, *ep_b) { (Trait(ref a), Trait(ref b)) => Ok(Trait(relation.relate(a, b)?)), (Projection(ref a), Projection(ref b)) => Ok(Projection(relation.relate(a, b)?)), @@ -681,11 +622,11 @@ impl<'tcx> Relate<'tcx> for ty::GeneratorSubsts<'tcx> { } } -impl<'tcx> Relate<'tcx> for &'tcx Substs<'tcx> { +impl<'tcx> Relate<'tcx> for SubstsRef<'tcx> { fn relate<'a, 'gcx, R>(relation: &mut R, - a: &&'tcx Substs<'tcx>, - b: &&'tcx Substs<'tcx>) - -> RelateResult<'tcx, &'tcx Substs<'tcx>> + a: &SubstsRef<'tcx>, + b: &SubstsRef<'tcx>) + -> RelateResult<'tcx, SubstsRef<'tcx>> where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'a+'tcx, 'tcx: 'a { relate_substs(relation, None, a, b) @@ -762,6 +703,9 @@ impl<'tcx> Relate<'tcx> for Kind<'tcx> { (UnpackedKind::Type(unpacked), x) => { bug!("impossible case reached: can't relate: {:?} with {:?}", unpacked, x) } + (UnpackedKind::Const(_), _) => { + unimplemented!() // FIXME(const_generics) + } } } } @@ -803,7 +747,7 @@ impl<'tcx> Relate<'tcx> for traits::WhereClause<'tcx> { ) -> RelateResult<'tcx, traits::WhereClause<'tcx>> where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a { - use traits::WhereClause::*; + use crate::traits::WhereClause::*; match (a, b) { (Implemented(a_pred), Implemented(b_pred)) => { Ok(Implemented(relation.relate(a_pred, b_pred)?)) @@ -840,7 +784,7 @@ impl<'tcx> Relate<'tcx> for traits::WellFormed<'tcx> { ) -> RelateResult<'tcx, traits::WellFormed<'tcx>> where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a { - use traits::WellFormed::*; + use crate::traits::WellFormed::*; match (a, b) { (Trait(a_pred), Trait(b_pred)) => Ok(Trait(relation.relate(a_pred, b_pred)?)), (Ty(a_ty), Ty(b_ty)) => Ok(Ty(relation.relate(a_ty, b_ty)?)), @@ -857,7 +801,7 @@ impl<'tcx> Relate<'tcx> for traits::FromEnv<'tcx> { ) -> RelateResult<'tcx, traits::FromEnv<'tcx>> where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a { - use traits::FromEnv::*; + use crate::traits::FromEnv::*; match (a, b) { (Trait(a_pred), Trait(b_pred)) => Ok(Trait(relation.relate(a_pred, b_pred)?)), (Ty(a_ty), Ty(b_ty)) => Ok(Ty(relation.relate(a_ty, b_ty)?)), @@ -874,7 +818,7 @@ impl<'tcx> Relate<'tcx> for traits::DomainGoal<'tcx> { ) -> RelateResult<'tcx, traits::DomainGoal<'tcx>> where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a { - use traits::DomainGoal::*; + use crate::traits::DomainGoal::*; match (a, b) { (Holds(a_wc), Holds(b_wc)) => Ok(Holds(relation.relate(a_wc, b_wc)?)), (WellFormed(a_wf), WellFormed(b_wf)) => Ok(WellFormed(relation.relate(a_wf, b_wf)?)), @@ -897,7 +841,7 @@ impl<'tcx> Relate<'tcx> for traits::Goal<'tcx> { ) -> RelateResult<'tcx, traits::Goal<'tcx>> where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a { - use traits::GoalKind::*; + use crate::traits::GoalKind::*; match (a, b) { (Implies(a_clauses, a_goal), Implies(b_clauses, b_goal)) => { let clauses = relation.relate(a_clauses, b_clauses)?; @@ -961,7 +905,7 @@ impl<'tcx> Relate<'tcx> for traits::Clause<'tcx> { ) -> RelateResult<'tcx, traits::Clause<'tcx>> where R: TypeRelation<'a, 'gcx, 'tcx>, 'gcx: 'tcx, 'tcx: 'a { - use traits::Clause::*; + use crate::traits::Clause::*; match (a, b) { (Implies(a_clause), Implies(b_clause)) => { let clause = relation.relate(a_clause, b_clause)?; diff --git a/src/librustc/ty/steal.rs b/src/librustc/ty/steal.rs index fc3353e339b4d..a8f9301ba51c9 100644 --- a/src/librustc/ty/steal.rs +++ b/src/librustc/ty/steal.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc_data_structures::sync::{RwLock, ReadGuard, MappedReadGuard}; /// The `Steal` struct is intended to used as the value for a query. @@ -22,14 +12,14 @@ use rustc_data_structures::sync::{RwLock, ReadGuard, MappedReadGuard}; /// Steal>` (to be very specific). Now we can read from this /// as much as we want (using `borrow()`), but you can also /// `steal()`. Once you steal, any further attempt to read will panic. -/// Therefore we know that -- assuming no ICE -- nobody is observing +/// Therefore, we know that -- assuming no ICE -- nobody is observing /// the fact that the MIR was updated. /// /// Obviously, whenever you have a query that yields a `Steal` value, /// you must treat it with caution, and make sure that you know that /// -- once the value is stolen -- it will never be read from again. -/// -/// FIXME(#41710) -- what is the best way to model linear queries? +// +// FIXME(#41710): what is the best way to model linear queries? pub struct Steal { value: RwLock> } diff --git a/src/librustc/ty/structural_impls.rs b/src/librustc/ty/structural_impls.rs index d6aeb288b5cdc..262dc30033472 100644 --- a/src/librustc/ty/structural_impls.rs +++ b/src/librustc/ty/structural_impls.rs @@ -1,28 +1,290 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This module contains implements of the `Lift` and `TypeFoldable` //! traits for various types in the Rust compiler. Most are written by //! hand, though we've recently added some macros (e.g., //! `BraceStructLiftImpl!`) to help with the tedium. -use mir::ProjectionKind; -use mir::interpret::ConstValue; -use ty::{self, Lift, Ty, TyCtxt}; -use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; +use crate::hir::def::Namespace; +use crate::mir::ProjectionKind; +use crate::mir::interpret::ConstValue; +use crate::ty::{self, Lift, Ty, TyCtxt, ConstVid}; +use crate::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; +use crate::ty::print::{FmtPrinter, Printer}; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use smallvec::SmallVec; -use mir::interpret; +use crate::mir::interpret; +use std::fmt; +use std::marker::PhantomData; use std::rc::Rc; +impl fmt::Debug for ty::GenericParamDef { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let type_name = match self.kind { + ty::GenericParamDefKind::Lifetime => "Lifetime", + ty::GenericParamDefKind::Type {..} => "Type", + ty::GenericParamDefKind::Const => "Const", + }; + write!(f, "{}({}, {:?}, {})", + type_name, + self.name, + self.def_id, + self.index) + } +} + +impl fmt::Debug for ty::TraitDef { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ty::tls::with(|tcx| { + FmtPrinter::new(tcx, f, Namespace::TypeNS) + .print_def_path(self.def_id, &[])?; + Ok(()) + }) + } +} + +impl fmt::Debug for ty::AdtDef { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ty::tls::with(|tcx| { + FmtPrinter::new(tcx, f, Namespace::TypeNS) + .print_def_path(self.did, &[])?; + Ok(()) + }) + } +} + +impl fmt::Debug for ty::ClosureUpvar<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "ClosureUpvar({:?},{:?})", + self.def, + self.ty) + } +} + +impl fmt::Debug for ty::UpvarId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let name = ty::tls::with(|tcx| { + tcx.hir().name_by_hir_id(self.var_path.hir_id) + }); + write!(f, "UpvarId({:?};`{}`;{:?})", + self.var_path.hir_id, + name, + self.closure_expr_id) + } +} + +impl fmt::Debug for ty::UpvarBorrow<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "UpvarBorrow({:?}, {:?})", + self.kind, self.region) + } +} + +impl fmt::Debug for ty::ExistentialTraitRef<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Debug for ty::adjustment::Adjustment<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?} -> {}", self.kind, self.target) + } +} + +impl fmt::Debug for ty::BoundRegion { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + ty::BrAnon(n) => write!(f, "BrAnon({:?})", n), + ty::BrFresh(n) => write!(f, "BrFresh({:?})", n), + ty::BrNamed(did, name) => { + write!(f, "BrNamed({:?}:{:?}, {})", + did.krate, did.index, name) + } + ty::BrEnv => write!(f, "BrEnv"), + } + } +} + +impl fmt::Debug for ty::RegionKind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + ty::ReEarlyBound(ref data) => { + write!(f, "ReEarlyBound({}, {})", + data.index, + data.name) + } + + ty::ReClosureBound(ref vid) => { + write!(f, "ReClosureBound({:?})", vid) + } + + ty::ReLateBound(binder_id, ref bound_region) => { + write!(f, "ReLateBound({:?}, {:?})", binder_id, bound_region) + } + + ty::ReFree(ref fr) => fr.fmt(f), + + ty::ReScope(id) => write!(f, "ReScope({:?})", id), + + ty::ReStatic => write!(f, "ReStatic"), + + ty::ReVar(ref vid) => vid.fmt(f), + + ty::RePlaceholder(placeholder) => { + write!(f, "RePlaceholder({:?})", placeholder) + } + + ty::ReEmpty => write!(f, "ReEmpty"), + + ty::ReErased => write!(f, "ReErased"), + } + } +} + +impl fmt::Debug for ty::FreeRegion { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "ReFree({:?}, {:?})", self.scope, self.bound_region) + } +} + +impl fmt::Debug for ty::Variance { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(match *self { + ty::Covariant => "+", + ty::Contravariant => "-", + ty::Invariant => "o", + ty::Bivariant => "*", + }) + } +} + +impl fmt::Debug for ty::FnSig<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "({:?}; c_variadic: {})->{:?}", + self.inputs(), self.c_variadic, self.output()) + } +} + +impl fmt::Debug for ty::TyVid { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "_#{}t", self.index) + } +} + +impl<'tcx> fmt::Debug for ty::ConstVid<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "_#{}c", self.index) + } +} + +impl fmt::Debug for ty::IntVid { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "_#{}i", self.index) + } +} + +impl fmt::Debug for ty::FloatVid { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "_#{}f", self.index) + } +} + +impl fmt::Debug for ty::RegionVid { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "'_#{}r", self.index()) + } +} + +impl fmt::Debug for ty::InferTy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + ty::TyVar(ref v) => v.fmt(f), + ty::IntVar(ref v) => v.fmt(f), + ty::FloatVar(ref v) => v.fmt(f), + ty::FreshTy(v) => write!(f, "FreshTy({:?})", v), + ty::FreshIntTy(v) => write!(f, "FreshIntTy({:?})", v), + ty::FreshFloatTy(v) => write!(f, "FreshFloatTy({:?})", v), + } + } +} + +impl fmt::Debug for ty::IntVarValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + ty::IntType(ref v) => v.fmt(f), + ty::UintType(ref v) => v.fmt(f), + } + } +} + +impl fmt::Debug for ty::FloatVarValue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl fmt::Debug for ty::TraitRef<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // FIXME(#59188) this is used across the compiler to print + // a `TraitRef` qualified (with the Self type explicit), + // instead of having a different way to make that choice. + write!(f, "<{} as {}>", self.self_ty(), self) + } +} + +impl fmt::Debug for Ty<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } +} + +impl fmt::Debug for ty::ParamTy { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}/#{}", self.name, self.idx) + } +} + +impl fmt::Debug for ty::ParamConst { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}/#{}", self.name, self.index) + } +} + +impl fmt::Debug for ty::TraitPredicate<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "TraitPredicate({:?})", self.trait_ref) + } +} + +impl fmt::Debug for ty::ProjectionPredicate<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "ProjectionPredicate({:?}, {:?})", self.projection_ty, self.ty) + } +} + +impl fmt::Debug for ty::Predicate<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + ty::Predicate::Trait(ref a) => a.fmt(f), + ty::Predicate::Subtype(ref pair) => pair.fmt(f), + ty::Predicate::RegionOutlives(ref pair) => pair.fmt(f), + ty::Predicate::TypeOutlives(ref pair) => pair.fmt(f), + ty::Predicate::Projection(ref pair) => pair.fmt(f), + ty::Predicate::WellFormed(ty) => write!(f, "WellFormed({:?})", ty), + ty::Predicate::ObjectSafe(trait_def_id) => { + write!(f, "ObjectSafe({:?})", trait_def_id) + } + ty::Predicate::ClosureKind(closure_def_id, closure_substs, kind) => { + write!(f, "ClosureKind({:?}, {:?}, {:?})", + closure_def_id, closure_substs, kind) + } + ty::Predicate::ConstEvaluatable(def_id, substs) => { + write!(f, "ConstEvaluatable({:?}, {:?})", def_id, substs) + } + } + } +} + /////////////////////////////////////////////////////////////////////////// // Atomic structs // @@ -33,40 +295,47 @@ CloneTypeFoldableAndLiftImpls! { (), bool, usize, - ::ty::layout::VariantIdx, + crate::ty::layout::VariantIdx, u64, - ::middle::region::Scope, + String, + crate::middle::region::Scope, ::syntax::ast::FloatTy, ::syntax::ast::NodeId, ::syntax_pos::symbol::Symbol, - ::hir::def::Def, - ::hir::def_id::DefId, - ::hir::InlineAsm, - ::hir::MatchSource, - ::hir::Mutability, - ::hir::Unsafety, + crate::hir::def::Def, + crate::hir::def_id::DefId, + crate::hir::InlineAsm, + crate::hir::MatchSource, + crate::hir::Mutability, + crate::hir::Unsafety, ::rustc_target::spec::abi::Abi, - ::mir::Local, - ::mir::Promoted, - ::traits::Reveal, - ::ty::adjustment::AutoBorrowMutability, - ::ty::AdtKind, + crate::mir::Local, + crate::mir::Promoted, + crate::traits::Reveal, + crate::ty::adjustment::AutoBorrowMutability, + crate::ty::AdtKind, // Including `BoundRegion` is a *bit* dubious, but direct // references to bound region appear in `ty::Error`, and aren't // really meant to be folded. In general, we can only fold a fully // general `Region`. - ::ty::BoundRegion, - ::ty::ClosureKind, - ::ty::IntVarValue, - ::ty::ParamTy, - ::ty::UniverseIndex, - ::ty::Variance, + crate::ty::BoundRegion, + crate::ty::Placeholder, + crate::ty::ClosureKind, + crate::ty::FreeRegion, + crate::ty::InferTy, + crate::ty::IntVarValue, + crate::ty::ParamConst, + crate::ty::ParamTy, + crate::ty::RegionVid, + crate::ty::UniverseIndex, + crate::ty::Variance, ::syntax_pos::Span, } /////////////////////////////////////////////////////////////////////////// // Lift implementations +// FIXME(eddyb) replace all the uses of `Option::map` with `?`. impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>> Lift<'tcx> for (A, B) { type Lifted = (A::Lifted, B::Lifted); fn lift_to_tcx<'a, 'gcx>(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Option { @@ -163,6 +432,23 @@ impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialTraitRef<'a> { } } +impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialPredicate<'a> { + type Lifted = ty::ExistentialPredicate<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + match self { + ty::ExistentialPredicate::Trait(x) => { + tcx.lift(x).map(ty::ExistentialPredicate::Trait) + } + ty::ExistentialPredicate::Projection(x) => { + tcx.lift(x).map(ty::ExistentialPredicate::Projection) + } + ty::ExistentialPredicate::AutoTrait(def_id) => { + Some(ty::ExistentialPredicate::AutoTrait(*def_id)) + } + } + } +} + impl<'a, 'tcx> Lift<'tcx> for ty::TraitPredicate<'a> { type Lifted = ty::TraitPredicate<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) @@ -285,6 +571,7 @@ impl<'a, 'tcx> Lift<'tcx> for ty::ParamEnv<'a> { ty::ParamEnv { reveal: self.reveal, caller_bounds, + def_id: self.def_id, } }) } @@ -343,8 +630,8 @@ impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjust<'a> { Some(ty::adjustment::Adjust::ReifyFnPointer), ty::adjustment::Adjust::UnsafeFnPointer => Some(ty::adjustment::Adjust::UnsafeFnPointer), - ty::adjustment::Adjust::ClosureFnPointer => - Some(ty::adjustment::Adjust::ClosureFnPointer), + ty::adjustment::Adjust::ClosureFnPointer(unsafety) => + Some(ty::adjustment::Adjust::ClosureFnPointer(unsafety)), ty::adjustment::Adjust::MutToConstPointer => Some(ty::adjustment::Adjust::MutToConstPointer), ty::adjustment::Adjust::Unsize => @@ -404,7 +691,7 @@ impl<'a, 'tcx> Lift<'tcx> for ty::FnSig<'a> { tcx.lift(&self.inputs_and_output).map(|x| { ty::FnSig { inputs_and_output: x, - variadic: self.variadic, + c_variadic: self.c_variadic, unsafety: self.unsafety, abi: self.abi, } @@ -429,7 +716,7 @@ impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::error::ExpectedFound { impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> { type Lifted = ty::error::TypeError<'tcx>; fn lift_to_tcx<'b, 'gcx>(&self, tcx: TyCtxt<'b, 'gcx, 'tcx>) -> Option { - use ty::error::TypeError::*; + use crate::ty::error::TypeError::*; Some(match *self { Mismatch => Mismatch, @@ -448,6 +735,7 @@ impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> { RegionsOverlyPolymorphic(a, b) => { return tcx.lift(&b).map(|b| RegionsOverlyPolymorphic(a, b)) } + RegionsPlaceholderMismatch => RegionsPlaceholderMismatch, IntMismatch(x) => IntMismatch(x), FloatMismatch(x) => FloatMismatch(x), Traits(x) => Traits(x), @@ -485,6 +773,13 @@ impl<'a, 'tcx> Lift<'tcx> for ty::InstanceDef<'a> { } } +BraceStructLiftImpl! { + impl<'a, 'tcx> Lift<'tcx> for ty::TypeAndMut<'a> { + type Lifted = ty::TypeAndMut<'tcx>; + ty, mutbl + } +} + BraceStructLiftImpl! { impl<'a, 'tcx> Lift<'tcx> for ty::Instance<'a> { type Lifted = ty::Instance<'tcx>; @@ -499,6 +794,16 @@ BraceStructLiftImpl! { } } +impl<'a, 'tcx> Lift<'tcx> for ConstVid<'a> { + type Lifted = ConstVid<'tcx>; + fn lift_to_tcx<'b, 'gcx>(&self, _: TyCtxt<'b, 'gcx, 'tcx>) -> Option { + Some(ConstVid { + index: self.index, + phantom: PhantomData, + }) + } +} + /////////////////////////////////////////////////////////////////////////// // TypeFoldable implementations. // @@ -598,7 +903,7 @@ impl<'tcx, T:TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder { } BraceStructTypeFoldableImpl! { - impl<'tcx> TypeFoldable<'tcx> for ty::ParamEnv<'tcx> { reveal, caller_bounds } + impl<'tcx> TypeFoldable<'tcx> for ty::ParamEnv<'tcx> { reveal, caller_bounds, def_id } } impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List> { @@ -631,7 +936,7 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List> { } } -impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List> { +impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { let v = self.iter().map(|t| t.fold_with(folder)).collect::>(); folder.tcx().intern_projs(&v) @@ -644,7 +949,7 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List> { impl<'tcx> TypeFoldable<'tcx> for ty::instance::Instance<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - use ty::InstanceDef::*; + use crate::ty::InstanceDef::*; Self { substs: self.substs.fold_with(folder), def: match self.def { @@ -675,7 +980,7 @@ impl<'tcx> TypeFoldable<'tcx> for ty::instance::Instance<'tcx> { } fn super_visit_with>(&self, visitor: &mut V) -> bool { - use ty::InstanceDef::*; + use crate::ty::InstanceDef::*; self.substs.visit_with(visitor) || match self.def { Item(did) | VtableShim(did) | Intrinsic(did) | Virtual(did, _) => { @@ -819,7 +1124,7 @@ BraceStructTypeFoldableImpl! { BraceStructTypeFoldableImpl! { impl<'tcx> TypeFoldable<'tcx> for ty::FnSig<'tcx> { - inputs_and_output, variadic, unsafety, abi + inputs_and_output, c_variadic, unsafety, abi } } @@ -882,7 +1187,7 @@ EnumTypeFoldableImpl! { (ty::adjustment::Adjust::NeverToAny), (ty::adjustment::Adjust::ReifyFnPointer), (ty::adjustment::Adjust::UnsafeFnPointer), - (ty::adjustment::Adjust::ClosureFnPointer), + (ty::adjustment::Adjust::ClosureFnPointer)(a), (ty::adjustment::Adjust::MutToConstPointer), (ty::adjustment::Adjust::Unsize), (ty::adjustment::Adjust::Deref)(a), @@ -1016,6 +1321,7 @@ EnumTypeFoldableImpl! { (ty::error::TypeError::RegionsDoesNotOutlive)(a, b), (ty::error::TypeError::RegionsInsufficientlyPolymorphic)(a, b), (ty::error::TypeError::RegionsOverlyPolymorphic)(a, b), + (ty::error::TypeError::RegionsPlaceholderMismatch), (ty::error::TypeError::IntMismatch)(x), (ty::error::TypeError::FloatMismatch)(x), (ty::error::TypeError::Traits)(x), @@ -1028,28 +1334,6 @@ EnumTypeFoldableImpl! { } } -impl<'tcx> TypeFoldable<'tcx> for ConstValue<'tcx> { - fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { - match *self { - ConstValue::Scalar(v) => ConstValue::Scalar(v), - ConstValue::ScalarPair(a, b) => ConstValue::ScalarPair(a, b), - ConstValue::ByRef(id, alloc, offset) => ConstValue::ByRef(id, alloc, offset), - ConstValue::Unevaluated(def_id, substs) => { - ConstValue::Unevaluated(def_id, substs.fold_with(folder)) - } - } - } - - fn super_visit_with>(&self, visitor: &mut V) -> bool { - match *self { - ConstValue::Scalar(_) | - ConstValue::ScalarPair(_, _) | - ConstValue::ByRef(_, _, _) => false, - ConstValue::Unevaluated(_, substs) => substs.visit_with(visitor), - } - } -} - impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Const<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { let ty = self.ty.fold_with(folder); @@ -1072,3 +1356,30 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Const<'tcx> { visitor.visit_const(self) } } + +impl<'tcx> TypeFoldable<'tcx> for ConstValue<'tcx> { + fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { + match *self { + ConstValue::ByRef(ptr, alloc) => ConstValue::ByRef(ptr, alloc), + // FIXME(const_generics): implement TypeFoldable for InferConst + ConstValue::Infer(ic) => ConstValue::Infer(ic), + ConstValue::Param(p) => ConstValue::Param(p.fold_with(folder)), + ConstValue::Scalar(a) => ConstValue::Scalar(a), + ConstValue::Slice(a, b) => ConstValue::Slice(a, b), + ConstValue::Unevaluated(did, substs) + => ConstValue::Unevaluated(did, substs.fold_with(folder)), + } + } + + fn super_visit_with>(&self, visitor: &mut V) -> bool { + match *self { + ConstValue::ByRef(..) => false, + // FIXME(const_generics): implement TypeFoldable for InferConst + ConstValue::Infer(_ic) => false, + ConstValue::Param(p) => p.visit_with(visitor), + ConstValue::Scalar(_) => false, + ConstValue::Slice(..) => false, + ConstValue::Unevaluated(_, substs) => substs.visit_with(visitor), + } + } +} diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs index f757f48e987d8..df76e6127e842 100644 --- a/src/librustc/ty/sty.rs +++ b/src/librustc/ty/sty.rs @@ -1,31 +1,23 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This module contains `TyKind` and its major components. -use hir; -use hir::def_id::DefId; -use infer::canonical::Canonical; -use mir::interpret::ConstValue; -use middle::region; +use crate::hir; +use crate::hir::def_id::DefId; +use crate::infer::canonical::Canonical; +use crate::mir::interpret::{ConstValue, truncate}; +use crate::middle::region; use polonius_engine::Atom; use rustc_data_structures::indexed_vec::Idx; -use ty::subst::{Substs, Subst, Kind, UnpackedKind}; -use ty::{self, AdtDef, TypeFlags, Ty, TyCtxt, TypeFoldable}; -use ty::{List, TyS, ParamEnvAnd, ParamEnv}; -use util::captures::Captures; -use mir::interpret::{Scalar, Pointer}; +use rustc_macros::HashStable; +use crate::ty::subst::{InternalSubsts, Subst, SubstsRef, Kind, UnpackedKind}; +use crate::ty::{self, AdtDef, DefIdTree, TypeFlags, Ty, TyCtxt, TypeFoldable}; +use crate::ty::{List, TyS, ParamEnvAnd, ParamEnv}; +use crate::util::captures::Captures; +use crate::mir::interpret::{Scalar, Pointer}; use smallvec::SmallVec; use std::iter; use std::cmp::Ordering; +use std::marker::PhantomData; use rustc_target::spec::abi; use syntax::ast::{self, Ident}; use syntax::symbol::{keywords, InternedString}; @@ -34,14 +26,15 @@ use serialize; use self::InferTy::*; use self::TyKind::*; -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, + Hash, Debug, RustcEncodable, RustcDecodable, HashStable)] pub struct TypeAndMut<'tcx> { pub ty: Ty<'tcx>, pub mutbl: hir::Mutability, } #[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, - RustcEncodable, RustcDecodable, Copy)] + RustcEncodable, RustcDecodable, Copy, HashStable)] /// A "free" region `fr` can be interpreted as "some region /// at least as big as the scope `fr.scope`". pub struct FreeRegion { @@ -50,14 +43,14 @@ pub struct FreeRegion { } #[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, - RustcEncodable, RustcDecodable, Copy)] + RustcEncodable, RustcDecodable, Copy, HashStable)] pub enum BoundRegion { /// An anonymous region parameter for a given fn (&T) BrAnon(u32), /// Named region parameters for functions (a in &'a T) /// - /// The def-id is needed to distinguish free regions in + /// The `DefId` is needed to distinguish free regions in /// the event of shadowing. BrNamed(DefId, InternedString), @@ -91,13 +84,14 @@ impl BoundRegion { /// N.B., if you change this, you'll probably want to change the corresponding /// AST structure in `libsyntax/ast.rs` as well. -#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, + RustcEncodable, RustcDecodable, HashStable)] pub enum TyKind<'tcx> { /// The primitive boolean type. Written as `bool`. Bool, /// The primitive character type; holds a Unicode scalar value - /// (a non-surrogate code point). Written as `char`. + /// (a non-surrogate code point). Written as `char`. Char, /// A primitive signed integer type. For example, `i32`. @@ -111,12 +105,13 @@ pub enum TyKind<'tcx> { /// Structures, enumerations and unions. /// - /// Substs here, possibly against intuition, *may* contain `Param`s. + /// InternalSubsts here, possibly against intuition, *may* contain `Param`s. /// That is, even after substitution it is possible that there are type /// variables. This happens when the `Adt` corresponds to an ADT /// definition and not a concrete use of it. - Adt(&'tcx AdtDef, &'tcx Substs<'tcx>), + Adt(&'tcx AdtDef, SubstsRef<'tcx>), + /// An unsized FFI type that is opaque to Rust. Written as `extern type T`. Foreign(DefId), /// The pointee of a string slice. Written as `str`. @@ -125,7 +120,7 @@ pub enum TyKind<'tcx> { /// An array with the given length. Written as `[T; n]`. Array(Ty<'tcx>, &'tcx ty::Const<'tcx>), - /// The pointee of an array slice. Written as `[T]`. + /// The pointee of an array slice. Written as `[T]`. Slice(Ty<'tcx>), /// A raw pointer. Written as `*mut T` or `*const T` @@ -145,9 +140,9 @@ pub enum TyKind<'tcx> { /// fn foo() -> i32 { 1 } /// let bar = foo; // bar: fn() -> i32 {foo} /// ``` - FnDef(DefId, &'tcx Substs<'tcx>), + FnDef(DefId, SubstsRef<'tcx>), - /// A pointer to a function. Written as `fn() -> i32`. + /// A pointer to a function. Written as `fn() -> i32`. /// /// For example the type of `bar` here: /// @@ -175,10 +170,10 @@ pub enum TyKind<'tcx> { /// The never type `!` Never, - /// A tuple type. For example, `(i32, bool)`. + /// A tuple type. For example, `(i32, bool)`. Tuple(&'tcx List>), - /// The projection of an associated type. For example, + /// The projection of an associated type. For example, /// `>::N`. Projection(ProjectionTy<'tcx>), @@ -193,7 +188,7 @@ pub enum TyKind<'tcx> { /// * or the `existential type` declaration /// The substitutions are for the generics of the function in question. /// After typeck, the concrete type can be found in the `types` map. - Opaque(DefId, &'tcx Substs<'tcx>), + Opaque(DefId, SubstsRef<'tcx>), /// A type parameter; for example, `T` in `fn f(x: T) {} Param(ParamTy), @@ -287,7 +282,7 @@ static_assert!(MEM_SIZE_OF_TY_KIND: ::std::mem::size_of::>() == 24); /// /// All right, you say, but why include the type parameters from the /// original function then? The answer is that codegen may need them -/// when monomorphizing, and they may not appear in the upvars. A +/// when monomorphizing, and they may not appear in the upvars. A /// closure could capture no variables but still make use of some /// in-scope type parameter with a bound (e.g., if our example above /// had an extra `U: Default`, and the closure called `U::default()`). @@ -304,21 +299,22 @@ static_assert!(MEM_SIZE_OF_TY_KIND: ::std::mem::size_of::>() == 24); /// ## Generators /// /// Perhaps surprisingly, `ClosureSubsts` are also used for -/// generators. In that case, what is written above is only half-true +/// generators. In that case, what is written above is only half-true /// -- the set of type parameters is similar, but the role of CK and -/// CS are different. CK represents the "yield type" and CS +/// CS are different. CK represents the "yield type" and CS /// represents the "return type" of the generator. /// /// It'd be nice to split this struct into ClosureSubsts and /// GeneratorSubsts, I believe. -nmatsakis -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, + Debug, RustcEncodable, RustcDecodable, HashStable)] pub struct ClosureSubsts<'tcx> { /// Lifetime and type parameters from the enclosing function, /// concatenated with the types of the upvars. /// /// These are separated out because codegen wants to pass them around /// when monomorphizing. - pub substs: &'tcx Substs<'tcx>, + pub substs: SubstsRef<'tcx>, } /// Struct returned by `split()`. Note that these are subslices of the @@ -387,16 +383,18 @@ impl<'tcx> ClosureSubsts<'tcx> { /// /// If you have an inference context, use `infcx.closure_sig()`. pub fn closure_sig(self, def_id: DefId, tcx: TyCtxt<'_, 'tcx, 'tcx>) -> ty::PolyFnSig<'tcx> { - match self.closure_sig_ty(def_id, tcx).sty { + let ty = self.closure_sig_ty(def_id, tcx); + match ty.sty { ty::FnPtr(sig) => sig, - ref t => bug!("closure_sig_ty is not a fn-ptr: {:?}", t), + _ => bug!("closure_sig_ty is not a fn-ptr: {:?}", ty), } } } -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, + RustcEncodable, RustcDecodable, HashStable)] pub struct GeneratorSubsts<'tcx> { - pub substs: &'tcx Substs<'tcx>, + pub substs: SubstsRef<'tcx>, } struct SplitGeneratorSubsts<'tcx> { @@ -451,17 +449,17 @@ impl<'tcx> GeneratorSubsts<'tcx> { self.split(def_id, tcx).return_ty } - /// Return the "generator signature", which consists of its yield + /// Returns the "generator signature", which consists of its yield /// and return types. /// - /// NB. Some bits of the code prefers to see this wrapped in a + /// N.B., some bits of the code prefers to see this wrapped in a /// binder, but it never contains bound regions. Probably this /// function should be removed. pub fn poly_sig(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> PolyGenSig<'tcx> { ty::Binder::dummy(self.sig(def_id, tcx)) } - /// Return the "generator signature", which consists of its yield + /// Returns the "generator signature", which consists of its yield /// and return types. pub fn sig(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> GenSig<'tcx> { ty::GenSig { @@ -527,13 +525,14 @@ impl<'tcx> UpvarSubsts<'tcx> { } } -#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq, Hash, + RustcEncodable, RustcDecodable, HashStable)] pub enum ExistentialPredicate<'tcx> { - /// e.g., Iterator + /// E.g., `Iterator`. Trait(ExistentialTraitRef<'tcx>), - /// e.g., Iterator::Item = T + /// E.g., `Iterator::Item = T`. Projection(ExistentialProjection<'tcx>), - /// e.g., Send + /// E.g., `Send`. AutoTrait(DefId), } @@ -560,7 +559,7 @@ impl<'a, 'gcx, 'tcx> ExistentialPredicate<'tcx> { impl<'a, 'gcx, 'tcx> Binder> { pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>) -> ty::Predicate<'tcx> { - use ty::ToPredicate; + use crate::ty::ToPredicate; match *self.skip_binder() { ExistentialPredicate::Trait(tr) => Binder(tr).with_self_ty(tcx, self_ty).to_predicate(), ExistentialPredicate::Projection(p) => @@ -579,13 +578,42 @@ impl<'a, 'gcx, 'tcx> Binder> { impl<'tcx> serialize::UseSpecializedDecodable for &'tcx List> {} impl<'tcx> List> { - pub fn principal(&self) -> ExistentialTraitRef<'tcx> { + /// Returns the "principal def id" of this set of existential predicates. + /// + /// A Rust trait object type consists (in addition to a lifetime bound) + /// of a set of trait bounds, which are separated into any number + /// of auto-trait bounds, and at most 1 non-auto-trait bound. The + /// non-auto-trait bound is called the "principal" of the trait + /// object. + /// + /// Only the principal can have methods or type parameters (because + /// auto traits can have neither of them). This is important, because + /// it means the auto traits can be treated as an unordered set (methods + /// would force an order for the vtable, while relating traits with + /// type parameters without knowing the order to relate them in is + /// a rather non-trivial task). + /// + /// For example, in the trait object `dyn fmt::Debug + Sync`, the + /// principal bound is `Some(fmt::Debug)`, while the auto-trait bounds + /// are the set `{Sync}`. + /// + /// It is also possible to have a "trivial" trait object that + /// consists only of auto traits, with no principal - for example, + /// `dyn Send + Sync`. In that case, the set of auto-trait bounds + /// is `{Send, Sync}`, while there is no principal. These trait objects + /// have a "trivial" vtable consisting of just the size, alignment, + /// and destructor. + pub fn principal(&self) -> Option> { match self[0] { - ExistentialPredicate::Trait(tr) => tr, - other => bug!("first predicate is {:?}", other), + ExistentialPredicate::Trait(tr) => Some(tr), + _ => None } } + pub fn principal_def_id(&self) -> Option { + self.principal().map(|d| d.def_id) + } + #[inline] pub fn projection_bounds<'a>(&'a self) -> impl Iterator> + 'a { @@ -609,8 +637,12 @@ impl<'tcx> List> { } impl<'tcx> Binder<&'tcx List>> { - pub fn principal(&self) -> PolyExistentialTraitRef<'tcx> { - Binder::bind(self.skip_binder().principal()) + pub fn principal(&self) -> Option>> { + self.skip_binder().principal().map(Binder::bind) + } + + pub fn principal_def_id(&self) -> Option { + self.skip_binder().principal_def_id() } #[inline] @@ -631,12 +663,12 @@ impl<'tcx> Binder<&'tcx List>> { } /// A complete reference to a trait. These take numerous guises in syntax, -/// but perhaps the most recognizable form is in a where clause: +/// but perhaps the most recognizable form is in a where-clause: /// /// T: Foo /// -/// This would be represented by a trait-reference where the def-id is the -/// def-id for the trait `Foo` and the substs define `T` as parameter 0, +/// This would be represented by a trait-reference where the `DefId` is the +/// `DefId` for the trait `Foo` and the substs define `T` as parameter 0, /// and `U` as parameter 1. /// /// Trait references also appear in object types like `Foo`, but in @@ -645,14 +677,14 @@ impl<'tcx> Binder<&'tcx List>> { /// Note that a `TraitRef` introduces a level of region binding, to /// account for higher-ranked trait bounds like `T: for<'a> Foo<&'a U>` /// or higher-ranked object types. -#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] pub struct TraitRef<'tcx> { pub def_id: DefId, - pub substs: &'tcx Substs<'tcx>, + pub substs: SubstsRef<'tcx>, } impl<'tcx> TraitRef<'tcx> { - pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>) -> TraitRef<'tcx> { + pub fn new(def_id: DefId, substs: SubstsRef<'tcx>) -> TraitRef<'tcx> { TraitRef { def_id: def_id, substs: substs } } @@ -661,7 +693,7 @@ impl<'tcx> TraitRef<'tcx> { pub fn identity<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId) -> TraitRef<'tcx> { TraitRef { def_id, - substs: Substs::identity_for_item(tcx, def_id), + substs: InternalSubsts::identity_for_item(tcx, def_id), } } @@ -680,7 +712,7 @@ impl<'tcx> TraitRef<'tcx> { pub fn from_method(tcx: TyCtxt<'_, '_, 'tcx>, trait_id: DefId, - substs: &Substs<'tcx>) + substs: SubstsRef<'tcx>) -> ty::TraitRef<'tcx> { let defs = tcx.generics_of(trait_id); @@ -715,10 +747,11 @@ impl<'tcx> PolyTraitRef<'tcx> { /// /// The substitutions don't include the erased `Self`, only trait /// type and lifetime parameters (`[X, Y]` and `['a, 'b]` above). -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, + RustcEncodable, RustcDecodable, HashStable)] pub struct ExistentialTraitRef<'tcx> { pub def_id: DefId, - pub substs: &'tcx Substs<'tcx>, + pub substs: SubstsRef<'tcx>, } impl<'a, 'gcx, 'tcx> ExistentialTraitRef<'tcx> { @@ -742,9 +775,9 @@ impl<'a, 'gcx, 'tcx> ExistentialTraitRef<'tcx> { } } - /// Object types don't have a self-type specified. Therefore, when + /// Object types don't have a self type specified. Therefore, when /// we convert the principal trait-ref into a normal trait-ref, - /// you must give *some* self-type. A common choice is `mk_err()` + /// you must give *some* self type. A common choice is `mk_err()` /// or some placeholder type. pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>) -> ty::TraitRef<'tcx> { @@ -765,9 +798,9 @@ impl<'tcx> PolyExistentialTraitRef<'tcx> { self.skip_binder().def_id } - /// Object types don't have a self-type specified. Therefore, when + /// Object types don't have a self type specified. Therefore, when /// we convert the principal trait-ref into a normal trait-ref, - /// you must give *some* self-type. A common choice is `mk_err()` + /// you must give *some* self type. A common choice is `mk_err()` /// or some placeholder type. pub fn with_self_ty(&self, tcx: TyCtxt<'_, '_, 'tcx>, self_ty: Ty<'tcx>) @@ -805,7 +838,7 @@ impl Binder { /// Skips the binder and returns the "bound" value. This is a /// risky thing to do because it's easy to get confused about - /// debruijn indices and the like. It is usually better to + /// De Bruijn indices and the like. It is usually better to /// discharge the binder using `no_bound_vars` or /// `replace_late_bound_regions` or something like /// that. `skip_binder` is only valid when you are either @@ -816,7 +849,7 @@ impl Binder { /// /// Some examples where `skip_binder` is reasonable: /// - /// - extracting the def-id from a PolyTraitRef; + /// - extracting the `DefId` from a PolyTraitRef; /// - comparing the self type of a PolyTraitRef to see if it is equal to /// a type parameter `X`, since the type `X` does not reference any regions pub fn skip_binder(&self) -> &T { @@ -860,8 +893,8 @@ impl Binder { } /// Given two things that have the same binder level, - /// and an operation that wraps on their contents, execute the operation - /// and then wrap its result. + /// and an operation that wraps on their contents, executes the operation + /// and then wraps its result. /// /// `f` should consider bound regions at depth 1 to be free, and /// anything it produces with bound regions at depth 1 will be @@ -872,7 +905,7 @@ impl Binder { Binder(f(self.0, u.0)) } - /// Split the contents into two things that share the same binder + /// Splits the contents into two things that share the same binder /// level as the original, returning two distinct binders. /// /// `f` should consider bound regions at depth 1 to be free, and @@ -888,10 +921,11 @@ impl Binder { /// Represents the projection of an associated type. In explicit UFCS /// form this would be written `>::N`. -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, + Hash, Debug, RustcEncodable, RustcDecodable, HashStable)] pub struct ProjectionTy<'tcx> { /// The parameters of the associated item. - pub substs: &'tcx Substs<'tcx>, + pub substs: SubstsRef<'tcx>, /// The `DefId` of the `TraitItem` for the associated type `N`. /// @@ -933,7 +967,7 @@ impl<'a, 'tcx> ProjectionTy<'tcx> { } } -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] pub struct GenSig<'tcx> { pub yield_ty: Ty<'tcx>, pub return_ty: Ty<'tcx>, @@ -953,13 +987,14 @@ impl<'tcx> PolyGenSig<'tcx> { /// Signature of a function type, which I have arbitrarily /// decided to use to refer to the input/output types. /// -/// - `inputs` is the list of arguments and their modes. -/// - `output` is the return type. -/// - `variadic` indicates whether this is a variadic function. (only true for foreign fns) -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +/// - `inputs`: is the list of arguments and their modes. +/// - `output`: is the return type. +/// - `c_variadic`: indicates whether this is a C-variadic function. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, + Hash, RustcEncodable, RustcDecodable, HashStable)] pub struct FnSig<'tcx> { pub inputs_and_output: &'tcx List>, - pub variadic: bool, + pub c_variadic: bool, pub unsafety: hir::Unsafety, pub abi: abi::Abi, } @@ -992,8 +1027,8 @@ impl<'tcx> PolyFnSig<'tcx> { pub fn output(&self) -> ty::Binder> { self.map_bound_ref(|fn_sig| fn_sig.output()) } - pub fn variadic(&self) -> bool { - self.skip_binder().variadic + pub fn c_variadic(&self) -> bool { + self.skip_binder().c_variadic } pub fn unsafety(&self) -> hir::Unsafety { self.skip_binder().unsafety @@ -1006,7 +1041,8 @@ impl<'tcx> PolyFnSig<'tcx> { pub type CanonicalPolyFnSig<'tcx> = Canonical<'tcx, Binder>>; -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, + Hash, RustcEncodable, RustcDecodable, HashStable)] pub struct ParamTy { pub idx: u32, pub name: InternedString, @@ -1037,46 +1073,67 @@ impl<'a, 'gcx, 'tcx> ParamTy { } } -/// A [De Bruijn index][dbi] is a standard means of representing -/// regions (and perhaps later types) in a higher-ranked setting. In -/// particular, imagine a type like this: -/// -/// for<'a> fn(for<'b> fn(&'b isize, &'a isize), &'a char) -/// ^ ^ | | | -/// | | | | | -/// | +------------+ 0 | | -/// | | | -/// +--------------------------------+ 1 | -/// | | -/// +------------------------------------------+ 0 -/// -/// In this type, there are two binders (the outer fn and the inner -/// fn). We need to be able to determine, for any given region, which -/// fn type it is bound by, the inner or the outer one. There are -/// various ways you can do this, but a De Bruijn index is one of the -/// more convenient and has some nice properties. The basic idea is to -/// count the number of binders, inside out. Some examples should help -/// clarify what I mean. -/// -/// Let's start with the reference type `&'b isize` that is the first -/// argument to the inner function. This region `'b` is assigned a De -/// Bruijn index of 0, meaning "the innermost binder" (in this case, a -/// fn). The region `'a` that appears in the second argument type (`&'a -/// isize`) would then be assigned a De Bruijn index of 1, meaning "the -/// second-innermost binder". (These indices are written on the arrays -/// in the diagram). -/// -/// What is interesting is that De Bruijn index attached to a particular -/// variable will vary depending on where it appears. For example, -/// the final type `&'a char` also refers to the region `'a` declared on -/// the outermost fn. But this time, this reference is not nested within -/// any other binders (i.e., it is not an argument to the inner fn, but -/// rather the outer one). Therefore, in this case, it is assigned a -/// De Bruijn index of 0, because the innermost binder in that location -/// is the outer fn. -/// -/// [dbi]: http://en.wikipedia.org/wiki/De_Bruijn_index +#[derive(Copy, Clone, Hash, RustcEncodable, RustcDecodable, + Eq, PartialEq, Ord, PartialOrd, HashStable)] +pub struct ParamConst { + pub index: u32, + pub name: InternedString, +} + +impl<'a, 'gcx, 'tcx> ParamConst { + pub fn new(index: u32, name: InternedString) -> ParamConst { + ParamConst { index, name } + } + + pub fn for_def(def: &ty::GenericParamDef) -> ParamConst { + ParamConst::new(def.index, def.name) + } + + pub fn to_const(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, ty: Ty<'tcx>) -> &'tcx Const<'tcx> { + tcx.mk_const_param(self.index, self.name, ty) + } +} + newtype_index! { + /// A [De Bruijn index][dbi] is a standard means of representing + /// regions (and perhaps later types) in a higher-ranked setting. In + /// particular, imagine a type like this: + /// + /// for<'a> fn(for<'b> fn(&'b isize, &'a isize), &'a char) + /// ^ ^ | | | + /// | | | | | + /// | +------------+ 0 | | + /// | | | + /// +--------------------------------+ 1 | + /// | | + /// +------------------------------------------+ 0 + /// + /// In this type, there are two binders (the outer fn and the inner + /// fn). We need to be able to determine, for any given region, which + /// fn type it is bound by, the inner or the outer one. There are + /// various ways you can do this, but a De Bruijn index is one of the + /// more convenient and has some nice properties. The basic idea is to + /// count the number of binders, inside out. Some examples should help + /// clarify what I mean. + /// + /// Let's start with the reference type `&'b isize` that is the first + /// argument to the inner function. This region `'b` is assigned a De + /// Bruijn index of 0, meaning "the innermost binder" (in this case, a + /// fn). The region `'a` that appears in the second argument type (`&'a + /// isize`) would then be assigned a De Bruijn index of 1, meaning "the + /// second-innermost binder". (These indices are written on the arrays + /// in the diagram). + /// + /// What is interesting is that De Bruijn index attached to a particular + /// variable will vary depending on where it appears. For example, + /// the final type `&'a char` also refers to the region `'a` declared on + /// the outermost fn. But this time, this reference is not nested within + /// any other binders (i.e., it is not an argument to the inner fn, but + /// rather the outer one). Therefore, in this case, it is assigned a + /// De Bruijn index of 0, because the innermost binder in that location + /// is the outer fn. + /// + /// [dbi]: http://en.wikipedia.org/wiki/De_Bruijn_index pub struct DebruijnIndex { DEBUG_FORMAT = "DebruijnIndex({})", const INNERMOST = 0, @@ -1094,14 +1151,14 @@ pub type Region<'tcx> = &'tcx RegionKind; /// ## Bound Regions /// /// These are regions that are stored behind a binder and must be substituted -/// with some concrete region before being used. There are 2 kind of -/// bound regions: early-bound, which are bound in an item's Generics, -/// and are substituted by a Substs, and late-bound, which are part of -/// higher-ranked types (e.g., `for<'a> fn(&'a ())`) and are substituted by +/// with some concrete region before being used. There are two kind of +/// bound regions: early-bound, which are bound in an item's `Generics`, +/// and are substituted by a `InternalSubsts`, and late-bound, which are part of +/// higher-ranked types (e.g., `for<'a> fn(&'a ())`), and are substituted by /// the likes of `liberate_late_bound_regions`. The distinction exists /// because higher-ranked lifetimes aren't supported in all places. See [1][2]. /// -/// Unlike Param-s, bound regions are not supposed to exist "in the wild" +/// Unlike `Param`s, bound regions are not supposed to exist "in the wild" /// outside their binder, e.g., in types passed to type inference, and /// should first be substituted (by placeholder regions, free regions, /// or region variables). @@ -1117,7 +1174,7 @@ pub type Region<'tcx> = &'tcx RegionKind; /// To do this, we replace the bound regions with placeholder markers, /// which don't satisfy any relation not explicitly provided. /// -/// There are 2 kinds of placeholder regions in rustc: `ReFree` and +/// There are two kinds of placeholder regions in rustc: `ReFree` and /// `RePlaceholder`. When checking an item's body, `ReFree` is supposed /// to be used. These also support explicit bounds: both the internally-stored /// *scope*, which the region is assumed to outlive, as well as other @@ -1143,13 +1200,13 @@ pub type Region<'tcx> = &'tcx RegionKind; /// [rustc guide]: https://rust-lang.github.io/rustc-guide/traits/hrtb.html #[derive(Clone, PartialEq, Eq, Hash, Copy, RustcEncodable, RustcDecodable, PartialOrd, Ord)] pub enum RegionKind { - // Region bound in a type or fn declaration which will be - // substituted 'early' -- that is, at the same time when type - // parameters are substituted. + /// Region bound in a type or fn declaration which will be + /// substituted 'early' -- that is, at the same time when type + /// parameters are substituted. ReEarlyBound(EarlyBoundRegion), - // Region bound in a function scope, which will be substituted when the - // function is called. + /// Region bound in a function scope, which will be substituted when the + /// function is called. ReLateBound(DebruijnIndex, BoundRegion), /// When checking a function body, the types of all arguments and so forth @@ -1165,7 +1222,7 @@ pub enum RegionKind { /// Static data that has an "infinite" lifetime. Top in the region lattice. ReStatic, - /// A region variable. Should not exist after typeck. + /// A region variable. Should not exist after typeck. ReVar(RegionVid), /// A placeholder region - basically the higher-ranked version of ReFree. @@ -1205,6 +1262,12 @@ pub struct TyVid { pub index: u32, } +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +pub struct ConstVid<'tcx> { + pub index: u32, + pub phantom: PhantomData<&'tcx ()>, +} + #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct IntVid { pub index: u32, @@ -1227,7 +1290,8 @@ impl Atom for RegionVid { } } -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, + Hash, RustcEncodable, RustcDecodable, HashStable)] pub enum InferTy { TyVar(TyVid), IntVar(IntVid), @@ -1270,10 +1334,11 @@ impl From for BoundTy { } /// A `ProjectionPredicate` for an `ExistentialTraitRef`. -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, + Debug, RustcEncodable, RustcDecodable, HashStable)] pub struct ExistentialProjection<'tcx> { pub item_def_id: DefId, - pub substs: &'tcx Substs<'tcx>, + pub substs: SubstsRef<'tcx>, pub ty: Ty<'tcx>, } @@ -1322,11 +1387,11 @@ impl<'a, 'tcx, 'gcx> PolyExistentialProjection<'tcx> { impl DebruijnIndex { /// Returns the resulting index when this value is moved into - /// `amount` number of new binders. So e.g., if you had + /// `amount` number of new binders. So, e.g., if you had /// /// for<'a> fn(&'a x) /// - /// and you wanted to change to + /// and you wanted to change it to /// /// for<'a> fn(for<'b> fn(&'a x)) /// @@ -1354,7 +1419,7 @@ impl DebruijnIndex { *self = self.shifted_out(amount); } - /// Adjusts any Debruijn Indices so as to make `to_binder` the + /// Adjusts any De Bruijn indices so as to make `to_binder` the /// innermost binder. That is, if we have something bound at `to_binder`, /// it will now be bound at INNERMOST. This is an appropriate thing to do /// when moving a region out from inside binders: @@ -1364,12 +1429,12 @@ impl DebruijnIndex { /// // Binder: D3 D2 D1 ^^ /// ``` /// - /// Here, the region `'a` would have the debruijn index D3, + /// Here, the region `'a` would have the De Bruijn index D3, /// because it is the bound 3 binders out. However, if we wanted /// to refer to that region `'a` in the second argument (the `_`), /// those two binders would not be in scope. In that case, we /// might invoke `shift_out_to_binder(D3)`. This would adjust the - /// debruijn index of `'a` to D1 (the innermost binder). + /// De Bruijn index of `'a` to D1 (the innermost binder). /// /// If we invoke `shift_out_to_binder` and the region is in fact /// bound by one of the binders we are shifting out of, that is an @@ -1406,6 +1471,13 @@ impl RegionKind { } } + pub fn is_placeholder(&self) -> bool { + match *self { + ty::RePlaceholder(..) => true, + _ => false, + } + } + pub fn bound_at_or_above_binder(&self, index: DebruijnIndex) -> bool { match *self { ty::ReLateBound(debruijn, _) => debruijn >= index, @@ -1413,7 +1485,7 @@ impl RegionKind { } } - /// Adjusts any Debruijn Indices so as to make `to_binder` the + /// Adjusts any De Bruijn indices so as to make `to_binder` the /// innermost binder. That is, if we have something bound at `to_binder`, /// it will now be bound at INNERMOST. This is an appropriate thing to do /// when moving a region out from inside binders: @@ -1423,12 +1495,12 @@ impl RegionKind { /// // Binder: D3 D2 D1 ^^ /// ``` /// - /// Here, the region `'a` would have the debruijn index D3, + /// Here, the region `'a` would have the De Bruijn index D3, /// because it is the bound 3 binders out. However, if we wanted /// to refer to that region `'a` in the second argument (the `_`), /// those two binders would not be in scope. In that case, we /// might invoke `shift_out_to_binder(D3)`. This would adjust the - /// debruijn index of `'a` to D1 (the innermost binder). + /// De Bruijn index of `'a` to D1 (the innermost binder). /// /// If we invoke `shift_out_to_binder` and the region is in fact /// bound by one of the binders we are shifting out of, that is an @@ -1497,7 +1569,7 @@ impl RegionKind { flags } - /// Given an early-bound or free region, returns the def-id where it was bound. + /// Given an early-bound or free region, returns the `DefId` where it was bound. /// For example, consider the regions in this snippet of code: /// /// ``` @@ -1512,14 +1584,14 @@ impl RegionKind { /// } /// ``` /// - /// Here, `free_region_binding_scope('a)` would return the def-id + /// Here, `free_region_binding_scope('a)` would return the `DefId` /// of the impl, and for all the other highlighted regions, it - /// would return the def-id of the function. In other cases (not shown), this - /// function might return the def-id of a closure. + /// would return the `DefId` of the function. In other cases (not shown), this + /// function might return the `DefId` of a closure. pub fn free_region_binding_scope(&self, tcx: TyCtxt<'_, '_, '_>) -> DefId { match self { ty::ReEarlyBound(br) => { - tcx.parent_def_id(br.def_id).unwrap() + tcx.parent(br.def_id).unwrap() } ty::ReFree(fr) => fr.scope, _ => bug!("free_region_binding_scope invoked on inappropriate region: {:?}", self), @@ -1543,6 +1615,51 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } + /// Checks whether a type is definitely uninhabited. This is + /// conservative: for some types that are uninhabited we return `false`, + /// but we only return `true` for types that are definitely uninhabited. + /// `ty.conservative_is_privately_uninhabited` implies that any value of type `ty` + /// will be `Abi::Uninhabited`. (Note that uninhabited types may have nonzero + /// size, to account for partial initialisation. See #49298 for details.) + pub fn conservative_is_privately_uninhabited(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> bool { + // FIXME(varkor): we can make this less conversative by substituting concrete + // type arguments. + match self.sty { + ty::Never => true, + ty::Adt(def, _) if def.is_union() => { + // For now, `union`s are never considered uninhabited. + false + } + ty::Adt(def, _) => { + // Any ADT is uninhabited if either: + // (a) It has no variants (i.e. an empty `enum`); + // (b) Each of its variants (a single one in the case of a `struct`) has at least + // one uninhabited field. + def.variants.iter().all(|var| { + var.fields.iter().any(|field| { + tcx.type_of(field.did).conservative_is_privately_uninhabited(tcx) + }) + }) + } + ty::Tuple(tys) => tys.iter().any(|ty| ty.conservative_is_privately_uninhabited(tcx)), + ty::Array(ty, len) => { + match len.assert_usize(tcx) { + // If the array is definitely non-empty, it's uninhabited if + // the type of its elements is uninhabited. + Some(n) if n != 0 => ty.conservative_is_privately_uninhabited(tcx), + _ => false + } + } + ty::Ref(..) => { + // References to uninitialised memory is valid for any type, including + // uninhabited types, in unsafe code, so we treat all references as + // inhabited. + false + } + _ => false, + } + } + pub fn is_primitive(&self) -> bool { match self.sty { Bool | Char | Int(_) | Uint(_) | Float(_) => true, @@ -1696,7 +1813,7 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } } - /// Returns true if this type is a floating point type and false otherwise. + /// Returns `true` if this type is a floating point type. pub fn is_floating_point(&self) -> bool { match self.sty { Float(_) | @@ -1794,7 +1911,6 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { pub fn is_machine(&self) -> bool { match self.sty { - Int(ast::IntTy::Isize) | Uint(ast::UintTy::Usize) => false, Int(..) | Uint(..) | Float(..) => true, _ => false, } @@ -1875,7 +1991,9 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } Dynamic(ref obj, region) => { out.push(region); - out.extend(obj.principal().skip_binder().substs.regions()); + if let Some(principal) = obj.principal() { + out.extend(principal.skip_binder().substs.regions()); + } } Adt(_, substs) | Opaque(_, substs) => { out.extend(substs.regions()) @@ -1979,45 +2097,27 @@ impl<'a, 'gcx, 'tcx> TyS<'tcx> { } /// Typed constant value. -#[derive(Copy, Clone, Debug, Hash, RustcEncodable, RustcDecodable, Eq, PartialEq, Ord, PartialOrd)] +#[derive(Copy, Clone, Debug, Hash, RustcEncodable, RustcDecodable, + Eq, PartialEq, Ord, PartialOrd, HashStable)] pub struct Const<'tcx> { pub ty: Ty<'tcx>, pub val: ConstValue<'tcx>, } -impl<'tcx> Const<'tcx> { - pub fn unevaluated( - tcx: TyCtxt<'_, '_, 'tcx>, - def_id: DefId, - substs: &'tcx Substs<'tcx>, - ty: Ty<'tcx>, - ) -> &'tcx Self { - tcx.mk_const(Const { - val: ConstValue::Unevaluated(def_id, substs), - ty, - }) - } - - #[inline] - pub fn from_const_value( - tcx: TyCtxt<'_, '_, 'tcx>, - val: ConstValue<'tcx>, - ty: Ty<'tcx>, - ) -> &'tcx Self { - tcx.mk_const(Const { - val, - ty, - }) - } +#[cfg(target_arch = "x86_64")] +static_assert!(CONST_SIZE: ::std::mem::size_of::>() == 48); +impl<'tcx> Const<'tcx> { #[inline] pub fn from_scalar( - tcx: TyCtxt<'_, '_, 'tcx>, val: Scalar, ty: Ty<'tcx>, - ) -> &'tcx Self { - Self::from_const_value(tcx, ConstValue::Scalar(val), ty) + ) -> Self { + Self { + val: ConstValue::Scalar(val), + ty, + } } #[inline] @@ -2025,29 +2125,28 @@ impl<'tcx> Const<'tcx> { tcx: TyCtxt<'_, '_, 'tcx>, bits: u128, ty: ParamEnvAnd<'tcx, Ty<'tcx>>, - ) -> &'tcx Self { + ) -> Self { let ty = tcx.lift_to_global(&ty).unwrap(); let size = tcx.layout_of(ty).unwrap_or_else(|e| { panic!("could not compute layout for {:?}: {:?}", ty, e) }).size; - let shift = 128 - size.bits(); - let truncated = (bits << shift) >> shift; + let truncated = truncate(bits, size); assert_eq!(truncated, bits, "from_bits called with untruncated value"); - Self::from_scalar(tcx, Scalar::Bits { bits, size: size.bytes() as u8 }, ty.value) + Self::from_scalar(Scalar::Bits { bits, size: size.bytes() as u8 }, ty.value) } #[inline] - pub fn zero_sized(tcx: TyCtxt<'_, '_, 'tcx>, ty: Ty<'tcx>) -> &'tcx Self { - Self::from_scalar(tcx, Scalar::Bits { bits: 0, size: 0 }, ty) + pub fn zero_sized(ty: Ty<'tcx>) -> Self { + Self::from_scalar(Scalar::Bits { bits: 0, size: 0 }, ty) } #[inline] - pub fn from_bool(tcx: TyCtxt<'_, '_, 'tcx>, v: bool) -> &'tcx Self { + pub fn from_bool(tcx: TyCtxt<'_, '_, 'tcx>, v: bool) -> Self { Self::from_bits(tcx, v as u128, ParamEnv::empty().and(tcx.types.bool)) } #[inline] - pub fn from_usize(tcx: TyCtxt<'_, '_, 'tcx>, n: u64) -> &'tcx Self { + pub fn from_usize(tcx: TyCtxt<'_, '_, 'tcx>, n: u64) -> Self { Self::from_bits(tcx, n as u128, ParamEnv::empty().and(tcx.types.usize)) } @@ -2114,3 +2213,15 @@ impl<'tcx> Const<'tcx> { } impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Const<'tcx> {} + +/// An inference variable for a const, for use in const generics. +#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, + Ord, RustcEncodable, RustcDecodable, Hash, HashStable)] +pub enum InferConst<'tcx> { + /// Infer the value of the const. + Var(ConstVid<'tcx>), + /// A fresh const variable. See `infer::freshen` for more details. + Fresh(u32), + /// Canonicalized const variable, used only when preparing a trait query. + Canonical(DebruijnIndex, BoundVar), +} diff --git a/src/librustc/ty/subst.rs b/src/librustc/ty/subst.rs index cda281e053a34..3ba2c4cbf6c8c 100644 --- a/src/librustc/ty/subst.rs +++ b/src/librustc/ty/subst.rs @@ -1,51 +1,44 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // Type substitutions. -use hir::def_id::DefId; -use infer::canonical::Canonical; -use ty::{self, BoundVar, Lift, List, Ty, TyCtxt}; -use ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; +use crate::hir::def_id::DefId; +use crate::infer::canonical::Canonical; +use crate::ty::{self, Lift, List, Ty, TyCtxt, InferConst, ParamConst}; +use crate::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor}; +use crate::mir::interpret::ConstValue; use serialize::{self, Encodable, Encoder, Decodable, Decoder}; use syntax_pos::{Span, DUMMY_SP}; -use rustc_data_structures::indexed_vec::Idx; use smallvec::SmallVec; +use rustc_macros::HashStable; use core::intrinsics; -use std::cmp::Ordering; use std::fmt; +use std::cmp::Ordering; use std::marker::PhantomData; use std::mem; use std::num::NonZeroUsize; /// An entity in the Rust type system, which can be one of -/// several kinds (only types and lifetimes for now). +/// several kinds (types, lifetimes, and consts). /// To reduce memory usage, a `Kind` is a interned pointer, /// with the lowest 2 bits being reserved for a tag to -/// indicate the type (`Ty` or `Region`) it points to. +/// indicate the type (`Ty`, `Region`, or `Const`) it points to. #[derive(Copy, Clone, PartialEq, Eq, Hash)] pub struct Kind<'tcx> { ptr: NonZeroUsize, - marker: PhantomData<(Ty<'tcx>, ty::Region<'tcx>)> + marker: PhantomData<(Ty<'tcx>, ty::Region<'tcx>, &'tcx ty::Const<'tcx>)> } const TAG_MASK: usize = 0b11; const TYPE_TAG: usize = 0b00; const REGION_TAG: usize = 0b01; +const CONST_TAG: usize = 0b10; -#[derive(Debug, RustcEncodable, RustcDecodable, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, RustcEncodable, RustcDecodable, PartialEq, Eq, PartialOrd, Ord, HashStable)] pub enum UnpackedKind<'tcx> { Lifetime(ty::Region<'tcx>), Type(Ty<'tcx>), + Const(&'tcx ty::Const<'tcx>), } impl<'tcx> UnpackedKind<'tcx> { @@ -61,6 +54,11 @@ impl<'tcx> UnpackedKind<'tcx> { assert_eq!(mem::align_of_val(ty) & TAG_MASK, 0); (TYPE_TAG, ty as *const _ as usize) } + UnpackedKind::Const(ct) => { + // Ensure we can use the tag bits. + assert_eq!(mem::align_of_val(ct) & TAG_MASK, 0); + (CONST_TAG, ct as *const _ as usize) + } }; Kind { @@ -72,6 +70,16 @@ impl<'tcx> UnpackedKind<'tcx> { } } +impl fmt::Debug for Kind<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.unpack() { + UnpackedKind::Lifetime(lt) => lt.fmt(f), + UnpackedKind::Type(ty) => ty.fmt(f), + UnpackedKind::Const(ct) => ct.fmt(f), + } + } +} + impl<'tcx> Ord for Kind<'tcx> { fn cmp(&self, other: &Kind<'_>) -> Ordering { self.unpack().cmp(&other.unpack()) @@ -96,6 +104,12 @@ impl<'tcx> From> for Kind<'tcx> { } } +impl<'tcx> From<&'tcx ty::Const<'tcx>> for Kind<'tcx> { + fn from(c: &'tcx ty::Const<'tcx>) -> Kind<'tcx> { + UnpackedKind::Const(c).pack() + } +} + impl<'tcx> Kind<'tcx> { #[inline] pub fn unpack(self) -> UnpackedKind<'tcx> { @@ -104,37 +118,21 @@ impl<'tcx> Kind<'tcx> { match ptr & TAG_MASK { REGION_TAG => UnpackedKind::Lifetime(&*((ptr & !TAG_MASK) as *const _)), TYPE_TAG => UnpackedKind::Type(&*((ptr & !TAG_MASK) as *const _)), + CONST_TAG => UnpackedKind::Const(&*((ptr & !TAG_MASK) as *const _)), _ => intrinsics::unreachable() } } } } -impl<'tcx> fmt::Debug for Kind<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.unpack() { - UnpackedKind::Lifetime(lt) => write!(f, "{:?}", lt), - UnpackedKind::Type(ty) => write!(f, "{:?}", ty), - } - } -} - -impl<'tcx> fmt::Display for Kind<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.unpack() { - UnpackedKind::Lifetime(lt) => write!(f, "{}", lt), - UnpackedKind::Type(ty) => write!(f, "{}", ty), - } - } -} - impl<'a, 'tcx> Lift<'tcx> for Kind<'a> { type Lifted = Kind<'tcx>; fn lift_to_tcx<'cx, 'gcx>(&self, tcx: TyCtxt<'cx, 'gcx, 'tcx>) -> Option { match self.unpack() { - UnpackedKind::Lifetime(a) => a.lift_to_tcx(tcx).map(|a| a.into()), - UnpackedKind::Type(a) => a.lift_to_tcx(tcx).map(|a| a.into()), + UnpackedKind::Lifetime(lt) => tcx.lift(<).map(|lt| lt.into()), + UnpackedKind::Type(ty) => tcx.lift(&ty).map(|ty| ty.into()), + UnpackedKind::Const(ct) => tcx.lift(&ct).map(|ct| ct.into()), } } } @@ -144,6 +142,7 @@ impl<'tcx> TypeFoldable<'tcx> for Kind<'tcx> { match self.unpack() { UnpackedKind::Lifetime(lt) => lt.fold_with(folder).into(), UnpackedKind::Type(ty) => ty.fold_with(folder).into(), + UnpackedKind::Const(ct) => ct.fold_with(folder).into(), } } @@ -151,6 +150,7 @@ impl<'tcx> TypeFoldable<'tcx> for Kind<'tcx> { match self.unpack() { UnpackedKind::Lifetime(lt) => lt.visit_with(visitor), UnpackedKind::Type(ty) => ty.visit_with(visitor), + UnpackedKind::Const(ct) => ct.visit_with(visitor), } } } @@ -168,26 +168,28 @@ impl<'tcx> Decodable for Kind<'tcx> { } /// A substitution mapping generic parameters to new values. -pub type Substs<'tcx> = List>; +pub type InternalSubsts<'tcx> = List>; + +pub type SubstsRef<'tcx> = &'tcx InternalSubsts<'tcx>; -impl<'a, 'gcx, 'tcx> Substs<'tcx> { - /// Creates a `Substs` that maps each generic parameter to itself. +impl<'a, 'gcx, 'tcx> InternalSubsts<'tcx> { + /// Creates a `InternalSubsts` that maps each generic parameter to itself. pub fn identity_for_item(tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId) - -> &'tcx Substs<'tcx> { - Substs::for_item(tcx, def_id, |param, _| { + -> SubstsRef<'tcx> { + Self::for_item(tcx, def_id, |param, _| { tcx.mk_param_from_def(param) }) } - /// Creates a `Substs` that maps each generic parameter to a higher-ranked + /// Creates a `InternalSubsts` that maps each generic parameter to a higher-ranked /// var bound at index `0`. For types, we use a `BoundVar` index equal to /// the type parameter index. For regions, we use the `BoundRegion::BrNamed` - /// variant (which has a def-id). + /// variant (which has a `DefId`). pub fn bound_vars_for_item( tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId - ) -> &'tcx Substs<'tcx> { - Substs::for_item(tcx, def_id, |param, _| { + ) -> SubstsRef<'tcx> { + Self::for_item(tcx, def_id, |param, _| { match param.kind { ty::GenericParamDefKind::Type { .. } => { tcx.mk_ty( @@ -204,25 +206,34 @@ impl<'a, 'gcx, 'tcx> Substs<'tcx> { ty::BoundRegion::BrNamed(param.def_id, param.name) )).into() } + + ty::GenericParamDefKind::Const => { + tcx.mk_const(ty::Const { + val: ConstValue::Infer( + InferConst::Canonical(ty::INNERMOST, ty::BoundVar::from(param.index)) + ), + ty: tcx.type_of(def_id), + }).into() + } } }) } - /// Creates a `Substs` for generic parameter definitions, + /// Creates a `InternalSubsts` for generic parameter definitions, /// by calling closures to obtain each kind. - /// The closures get to observe the `Substs` as they're + /// The closures get to observe the `InternalSubsts` as they're /// being built, which can be used to correctly /// substitute defaults of generic parameters. pub fn for_item(tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId, mut mk_kind: F) - -> &'tcx Substs<'tcx> + -> SubstsRef<'tcx> where F: FnMut(&ty::GenericParamDef, &[Kind<'tcx>]) -> Kind<'tcx> { let defs = tcx.generics_of(def_id); let count = defs.count(); let mut substs = SmallVec::with_capacity(count); - Substs::fill_item(&mut substs, tcx, defs, &mut mk_kind); + Self::fill_item(&mut substs, tcx, defs, &mut mk_kind); tcx.intern_substs(&substs) } @@ -230,10 +241,10 @@ impl<'a, 'gcx, 'tcx> Substs<'tcx> { tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId, mut mk_kind: F) - -> &'tcx Substs<'tcx> + -> SubstsRef<'tcx> where F: FnMut(&ty::GenericParamDef, &[Kind<'tcx>]) -> Kind<'tcx> { - Substs::for_item(tcx, def_id, |param, substs| { + Self::for_item(tcx, def_id, |param, substs| { self.get(param.index as usize) .cloned() .unwrap_or_else(|| mk_kind(param, substs)) @@ -248,9 +259,9 @@ impl<'a, 'gcx, 'tcx> Substs<'tcx> { { if let Some(def_id) = defs.parent { let parent_defs = tcx.generics_of(def_id); - Substs::fill_item(substs, tcx, parent_defs, mk_kind); + Self::fill_item(substs, tcx, parent_defs, mk_kind); } - Substs::fill_single(substs, defs, mk_kind) + Self::fill_single(substs, defs, mk_kind) } fn fill_single(substs: &mut SmallVec<[Kind<'tcx>; 8]>, @@ -292,6 +303,29 @@ impl<'a, 'gcx, 'tcx> Substs<'tcx> { }) } + #[inline] + pub fn consts(&'a self) -> impl DoubleEndedIterator> + 'a { + self.iter().filter_map(|k| { + if let UnpackedKind::Const(ct) = k.unpack() { + Some(ct) + } else { + None + } + }) + } + + #[inline] + pub fn non_erasable_generics( + &'a self + ) -> impl DoubleEndedIterator> + 'a { + self.iter().filter_map(|k| { + match k.unpack() { + UnpackedKind::Lifetime(_) => None, + generic => Some(generic), + } + }) + } + #[inline] pub fn type_at(&self, i: usize) -> Ty<'tcx> { if let UnpackedKind::Type(ty) = self[i].unpack() { @@ -310,6 +344,15 @@ impl<'a, 'gcx, 'tcx> Substs<'tcx> { } } + #[inline] + pub fn const_at(&self, i: usize) -> &'tcx ty::Const<'tcx> { + if let UnpackedKind::Const(ct) = self[i].unpack() { + ct + } else { + bug!("expected const for param #{} in {:?}", i, self); + } + } + #[inline] pub fn type_for_def(&self, def: &ty::GenericParamDef) -> Kind<'tcx> { self.type_at(def.index as usize).into() @@ -322,19 +365,19 @@ impl<'a, 'gcx, 'tcx> Substs<'tcx> { /// parameters (e.g., method parameters) on top of that base. pub fn rebase_onto(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, source_ancestor: DefId, - target_substs: &Substs<'tcx>) - -> &'tcx Substs<'tcx> { + target_substs: SubstsRef<'tcx>) + -> SubstsRef<'tcx> { let defs = tcx.generics_of(source_ancestor); tcx.mk_substs(target_substs.iter().chain(&self[defs.params.len()..]).cloned()) } pub fn truncate_to(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, generics: &ty::Generics) - -> &'tcx Substs<'tcx> { + -> SubstsRef<'tcx> { tcx.mk_substs(self.iter().take(generics.count()).cloned()) } } -impl<'tcx> TypeFoldable<'tcx> for &'tcx Substs<'tcx> { +impl<'tcx> TypeFoldable<'tcx> for SubstsRef<'tcx> { fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self { let params: SmallVec<[_; 8]> = self.iter().map(|k| k.fold_with(folder)).collect(); @@ -352,7 +395,7 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx Substs<'tcx> { } } -impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Substs<'tcx> {} +impl<'tcx> serialize::UseSpecializedDecodable for SubstsRef<'tcx> {} /////////////////////////////////////////////////////////////////////////// // Public trait `Subst` @@ -478,6 +521,18 @@ impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for SubstFolder<'a, 'gcx, 'tcx> { return t1; } + + fn fold_const(&mut self, c: &'tcx ty::Const<'tcx>) -> &'tcx ty::Const<'tcx> { + if !c.needs_subst() { + return c; + } + + if let ConstValue::Param(p) = c.val { + self.const_for_param(p, c) + } else { + c.super_fold_with(self) + } + } } impl<'a, 'gcx, 'tcx> SubstFolder<'a, 'gcx, 'tcx> { @@ -503,7 +558,35 @@ impl<'a, 'gcx, 'tcx> SubstFolder<'a, 'gcx, 'tcx> { self.shift_vars_through_binders(ty) } - /// It is sometimes necessary to adjust the debruijn indices during substitution. This occurs + fn const_for_param( + &self, + p: ParamConst, + source_cn: &'tcx ty::Const<'tcx> + ) -> &'tcx ty::Const<'tcx> { + // Look up the const in the substitutions. It really should be in there. + let opt_cn = self.substs.get(p.index as usize).map(|k| k.unpack()); + let cn = match opt_cn { + Some(UnpackedKind::Const(cn)) => cn, + _ => { + let span = self.span.unwrap_or(DUMMY_SP); + span_bug!( + span, + "Const parameter `{:?}` ({:?}/{}) out of range \ + when substituting (root type={:?}) substs={:?}", + p, + source_cn, + p.index, + self.root_ty, + self.substs, + ); + } + }; + + // FIXME(const_generics): shift const through binders + cn + } + + /// It is sometimes necessary to adjust the De Bruijn indices during substitution. This occurs /// when we are substituting a type with escaping bound vars into a context where we have /// passed through binders. That's quite a mouthful. Let's see an example: /// @@ -522,9 +605,9 @@ impl<'a, 'gcx, 'tcx> SubstFolder<'a, 'gcx, 'tcx> { /// /// Here the `'a` lifetime is bound in the outer function, but appears as an argument of the /// inner one. Therefore, that appearance will have a DebruijnIndex of 2, because we must skip - /// over the inner binder (remember that we count Debruijn indices from 1). However, in the + /// over the inner binder (remember that we count De Bruijn indices from 1). However, in the /// definition of `MetaFunc`, the binder is not visible, so the type `&'a int` will have a - /// debruijn index of 1. It's only during the substitution that we can see we must increase the + /// De Bruijn index of 1. It's only during the substitution that we can see we must increase the /// depth by 1 to account for the binder that we passed through. /// /// As a second example, consider this twist: @@ -543,7 +626,7 @@ impl<'a, 'gcx, 'tcx> SubstFolder<'a, 'gcx, 'tcx> { /// DebruijnIndex of 2 /// /// As indicated in the diagram, here the same type `&'a int` is substituted once, but in the - /// first case we do not increase the Debruijn index and in the second case we do. The reason + /// first case we do not increase the De Bruijn index and in the second case we do. The reason /// is that only in the second case have we passed through a fn binder. fn shift_vars_through_binders(&self, ty: Ty<'tcx>) -> Ty<'tcx> { debug!("shift_vars(ty={:?}, binders_passed={:?}, has_escaping_bound_vars={:?})", @@ -569,51 +652,14 @@ impl<'a, 'gcx, 'tcx> SubstFolder<'a, 'gcx, 'tcx> { pub type CanonicalUserSubsts<'tcx> = Canonical<'tcx, UserSubsts<'tcx>>; -impl CanonicalUserSubsts<'tcx> { - /// True if this represents a substitution like - /// - /// ```text - /// [?0, ?1, ?2] - /// ``` - /// - /// i.e., each thing is mapped to a canonical variable with the same index. - pub fn is_identity(&self) -> bool { - if self.value.user_self_ty.is_some() { - return false; - } - - self.value.substs.iter().zip(BoundVar::new(0)..).all(|(kind, cvar)| { - match kind.unpack() { - UnpackedKind::Type(ty) => match ty.sty { - ty::Bound(debruijn, b) => { - // We only allow a `ty::INNERMOST` index in substitutions. - assert_eq!(debruijn, ty::INNERMOST); - cvar == b.var - } - _ => false, - }, - - UnpackedKind::Lifetime(r) => match r { - ty::ReLateBound(debruijn, br) => { - // We only allow a `ty::INNERMOST` index in substitutions. - assert_eq!(*debruijn, ty::INNERMOST); - cvar == br.assert_bound_var() - } - _ => false, - }, - } - }) - } -} - /// Stores the user-given substs to reach some fully qualified path /// (e.g., `::Item` or `::Item`). -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] pub struct UserSubsts<'tcx> { /// The substitutions for the item as given by the user. - pub substs: &'tcx Substs<'tcx>, + pub substs: SubstsRef<'tcx>, - /// The self-type, in the case of a `::Item` path (when applied + /// The self type, in the case of a `::Item` path (when applied /// to an inherent impl). See `UserSelfTy` below. pub user_self_ty: Option>, } @@ -633,8 +679,8 @@ BraceStructLiftImpl! { } } -/// Specifies the user-given self-type. In the case of a path that -/// refers to a member in an inherent impl, this self-type is +/// Specifies the user-given self type. In the case of a path that +/// refers to a member in an inherent impl, this self type is /// sometimes needed to constrain the type parameters on the impl. For /// example, in this code: /// @@ -644,12 +690,12 @@ BraceStructLiftImpl! { /// ``` /// /// when you then have a path like `>::method`, -/// this struct would carry the def-id of the impl along with the -/// self-type `Foo`. Then we can instantiate the parameters of +/// this struct would carry the `DefId` of the impl along with the +/// self type `Foo`. Then we can instantiate the parameters of /// the impl (with the substs from `UserSubsts`) and apply those to -/// the self-type, giving `Foo`. Finally, we unify that with -/// the self-type here, which contains `?A` to be `&'static u32` -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] +/// the self type, giving `Foo`. Finally, we unify that with +/// the self type here, which contains `?A` to be `&'static u32` +#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, HashStable)] pub struct UserSelfTy<'tcx> { pub impl_def_id: DefId, pub self_ty: Ty<'tcx>, diff --git a/src/librustc/ty/trait_def.rs b/src/librustc/ty/trait_def.rs index fadb1a1cf6ee6..58f21893de143 100644 --- a/src/librustc/ty/trait_def.rs +++ b/src/librustc/ty/trait_def.rs @@ -1,29 +1,23 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir; -use hir::def_id::DefId; -use hir::map::DefPathHash; -use ich::{self, StableHashingContext}; -use traits::specialization_graph; -use ty::fast_reject; -use ty::fold::TypeFoldable; -use ty::{Ty, TyCtxt}; +use crate::hir; +use crate::hir::def_id::DefId; +use crate::hir::map::DefPathHash; +use crate::ich::{self, StableHashingContext}; +use crate::traits::specialization_graph; +use crate::ty::fast_reject; +use crate::ty::fold::TypeFoldable; +use crate::ty::{Ty, TyCtxt}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableHasherResult}; use rustc_data_structures::sync::Lrc; +use rustc_macros::HashStable; /// A trait's definition with type information. +#[derive(HashStable)] pub struct TraitDef { + // We already have the def_path_hash below, no need to hash it twice + #[stable_hasher(ignore)] pub def_id: DefId, pub unsafety: hir::Unsafety, @@ -49,7 +43,7 @@ pub struct TraitDef { #[derive(Default)] pub struct TraitImpls { blanket_impls: Vec, - /// Impls indexed by their simplified self-type, for fast lookup. + /// Impls indexed by their simplified self type, for fast lookup. non_blanket_impls: FxHashMap>, } @@ -73,7 +67,7 @@ impl<'a, 'gcx, 'tcx> TraitDef { pub fn ancestors(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, of_impl: DefId) - -> specialization_graph::Ancestors { + -> specialization_graph::Ancestors<'gcx> { specialization_graph::ancestors(tcx, self.def_id, of_impl) } } @@ -94,7 +88,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } /// Iterate over every impl that could possibly match the - /// self-type `self_ty`. + /// self type `self_ty`. pub fn for_each_relevant_impl(self, def_id: DefId, self_ty: Ty<'tcx>, @@ -144,7 +138,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - /// Return a vector containing all impls + /// Returns a vector containing all impls pub fn all_impls(self, def_id: DefId) -> Vec { let impls = self.trait_impls_of(def_id); @@ -189,8 +183,8 @@ pub(super) fn trait_impls_of_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } - for &node_id in tcx.hir().trait_impls(trait_id) { - add_impl(tcx.hir().local_def_id(node_id)); + for &hir_id in tcx.hir().trait_impls(trait_id) { + add_impl(tcx.hir().local_def_id_from_hir_id(hir_id)); } } diff --git a/src/librustc/ty/util.rs b/src/librustc/ty/util.rs index f9ce228a30c23..ccead14e76b23 100644 --- a/src/librustc/ty/util.rs +++ b/src/librustc/ty/util.rs @@ -1,31 +1,24 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! misc. type-system utilities too small to deserve their own file - -use hir::def::Def; -use hir::def_id::DefId; -use hir::map::DefPathData; -use hir::{self, Node}; -use ich::NodeIdHashingMode; -use traits::{self, ObligationCause}; -use ty::{self, Ty, TyCtxt, GenericParamDefKind, TypeFoldable}; -use ty::subst::{Substs, UnpackedKind}; -use ty::query::TyCtxtAt; -use ty::TyKind::*; -use ty::layout::{Integer, IntegerExt}; -use util::common::ErrorReported; -use middle::lang_items; +//! Miscellaneous type-system utilities that are too small to deserve their own modules. + +use crate::hir::def::Def; +use crate::hir::def_id::DefId; +use crate::hir::map::DefPathData; +use crate::hir::{self, Node}; +use crate::mir::interpret::{sign_extend, truncate}; +use crate::ich::NodeIdHashingMode; +use crate::traits::{self, ObligationCause}; +use crate::ty::{self, DefIdTree, Ty, TyCtxt, GenericParamDefKind, TypeFoldable}; +use crate::ty::subst::{Subst, InternalSubsts, SubstsRef, UnpackedKind}; +use crate::ty::query::TyCtxtAt; +use crate::ty::TyKind::*; +use crate::ty::layout::{Integer, IntegerExt}; +use crate::mir::interpret::ConstValue; +use crate::util::common::ErrorReported; +use crate::middle::lang_items; use rustc_data_structures::stable_hasher::{StableHasher, HashStable}; -use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_macros::HashStable; use std::{cmp, fmt}; use syntax::ast; use syntax::attr::{self, SignedInt, UnsignedInt}; @@ -33,7 +26,7 @@ use syntax_pos::{Span, DUMMY_SP}; #[derive(Copy, Clone, Debug)] pub struct Discr<'tcx> { - /// bit representation of the discriminant, so `-128i8` is `0xFF_u128` + /// Bit representation of the discriminant (e.g., `-128i8` is `0xFF_u128`). pub val: u128, pub ty: Ty<'tcx> } @@ -42,12 +35,12 @@ impl<'tcx> fmt::Display for Discr<'tcx> { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { match self.ty.sty { ty::Int(ity) => { - let bits = ty::tls::with(|tcx| { - Integer::from_attr(&tcx, SignedInt(ity)).size().bits() + let size = ty::tls::with(|tcx| { + Integer::from_attr(&tcx, SignedInt(ity)).size() }); - let x = self.val as i128; + let x = self.val; // sign extend the raw representation to be an i128 - let x = (x << (128 - bits)) >> (128 - bits); + let x = sign_extend(x, size) as i128; write!(fmt, "{}", x) }, _ => write!(fmt, "{}", self.val), @@ -56,7 +49,7 @@ impl<'tcx> fmt::Display for Discr<'tcx> { } impl<'tcx> Discr<'tcx> { - /// Adds 1 to the value and wraps around if the maximum for the type is reached + /// Adds `1` to the value and wraps around if the maximum for the type is reached. pub fn wrap_incr<'a, 'gcx>(self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Self { self.checked_add(tcx, 1).0 } @@ -67,12 +60,12 @@ impl<'tcx> Discr<'tcx> { _ => bug!("non integer discriminant"), }; + let size = int.size(); let bit_size = int.size().bits(); let shift = 128 - bit_size; if signed { let sext = |u| { - let i = u as i128; - (i << shift) >> shift + sign_extend(u, size) as i128 }; let min = sext(1_u128 << (bit_size - 1)); let max = i128::max_value() >> shift; @@ -87,7 +80,7 @@ impl<'tcx> Discr<'tcx> { }; // zero the upper bits let val = val as u128; - let val = (val << shift) >> shift; + let val = truncate(val, size); (Self { val: val as u128, ty: self.ty, @@ -213,7 +206,7 @@ impl<'tcx> ty::ParamEnv<'tcx> { let cause = ObligationCause { span, ..ObligationCause::dummy() }; let ctx = traits::FulfillmentContext::new(); match traits::fully_normalize(&infcx, ctx, cause, self, &ty) { - Ok(ty) => if infcx.type_moves_by_default(self, ty, span) { + Ok(ty) => if !infcx.type_is_copy_modulo_regions(self, ty, span) { infringing.push(field); } Err(errors) => { @@ -352,9 +345,9 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { /// /// Requires that trait definitions have been processed so that we can /// elaborate predicates and walk supertraits. - /// - /// FIXME callers may only have a &[Predicate], not a Vec, so that's - /// what this code should accept. + // + // FIXME: callers may only have a `&[Predicate]`, not a `Vec`, so that's + // what this code should accept. pub fn required_region_bounds(self, erased_self_ty: Ty<'tcx>, predicates: Vec>) @@ -412,7 +405,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { return None; }; - ty::query::queries::coherent_trait::ensure(self, drop_trait); + self.ensure().coherent_trait(drop_trait); let mut dtor_did = None; let ty = self.type_of(adt_did); @@ -427,7 +420,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { Some(ty::Destructor { did: dtor_did? }) } - /// Return the set of types that are required to be alive in + /// Returns the set of types that are required to be alive in /// order to run the destructor of `def` (see RFCs 769 and /// 1238). /// @@ -504,8 +497,16 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { }) => { !impl_generics.type_param(pt, self).pure_wrt_drop } - UnpackedKind::Lifetime(_) | UnpackedKind::Type(_) => { - // not a type or region param - this should be reported + UnpackedKind::Const(&ty::Const { + val: ConstValue::Param(ref pc), + .. + }) => { + !impl_generics.const_param(pc, self).pure_wrt_drop + } + UnpackedKind::Lifetime(_) | + UnpackedKind::Type(_) | + UnpackedKind::Const(_) => { + // Not a type, const or region param: this should be reported // as an error. false } @@ -517,17 +518,17 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { result } - /// True if `def_id` refers to a closure (e.g., `|x| x * 2`). Note - /// that closures have a def-id, but the closure *expression* also + /// Returns `true` if `def_id` refers to a closure (e.g., `|x| x * 2`). Note + /// that closures have a `DefId`, but the closure *expression* also /// has a `HirId` that is located within the context where the /// closure appears (and, sadly, a corresponding `NodeId`, since /// those are not yet phased out). The parent of the closure's - /// def-id will also be the context where it appears. + /// `DefId` will also be the context where it appears. pub fn is_closure(self, def_id: DefId) -> bool { self.def_key(def_id).disambiguated_data.data == DefPathData::ClosureExpr } - /// True if `def_id` refers to a trait (i.e., `trait Foo { ... }`). + /// Returns `true` if `def_id` refers to a trait (i.e., `trait Foo { ... }`). pub fn is_trait(self, def_id: DefId) -> bool { if let DefPathData::Trait(_) = self.def_key(def_id).disambiguated_data.data { true @@ -536,33 +537,43 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } - /// True if this def-id refers to the implicit constructor for - /// a tuple struct like `struct Foo(u32)`. - pub fn is_struct_constructor(self, def_id: DefId) -> bool { - self.def_key(def_id).disambiguated_data.data == DefPathData::StructCtor + /// Returns `true` if `def_id` refers to a trait alias (i.e., `trait Foo = ...;`), + /// and `false` otherwise. + pub fn is_trait_alias(self, def_id: DefId) -> bool { + if let DefPathData::TraitAlias(_) = self.def_key(def_id).disambiguated_data.data { + true + } else { + false + } + } + + /// Returns `true` if this `DefId` refers to the implicit constructor for + /// a tuple struct like `struct Foo(u32)`, and `false` otherwise. + pub fn is_constructor(self, def_id: DefId) -> bool { + self.def_key(def_id).disambiguated_data.data == DefPathData::Ctor } /// Given the `DefId` of a fn or closure, returns the `DefId` of /// the innermost fn item that the closure is contained within. - /// This is a significant def-id because, when we do + /// This is a significant `DefId` because, when we do /// type-checking, we type-check this fn item and all of its - /// (transitive) closures together. Therefore, when we fetch the + /// (transitive) closures together. Therefore, when we fetch the /// `typeck_tables_of` the closure, for example, we really wind up /// fetching the `typeck_tables_of` the enclosing fn item. pub fn closure_base_def_id(self, def_id: DefId) -> DefId { let mut def_id = def_id; while self.is_closure(def_id) { - def_id = self.parent_def_id(def_id).unwrap_or_else(|| { + def_id = self.parent(def_id).unwrap_or_else(|| { bug!("closure {:?} has no parent", def_id); }); } def_id } - /// Given the def-id and substs a closure, creates the type of + /// Given the `DefId` and substs a closure, creates the type of /// `self` argument that the closure expects. For example, for a /// `Fn` closure, this would return a reference type `&T` where - /// `T=closure_ty`. + /// `T = closure_ty`. /// /// Returns `None` if this closure's kind has not yet been inferred. /// This should only be possible during type checking. @@ -586,20 +597,23 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { Some(ty::Binder::bind(env_ty)) } - /// Given the def-id of some item that has no type parameters, make + /// Given the `DefId` of some item that has no type or const parameters, make /// a suitable "empty substs" for it. - pub fn empty_substs_for_def_id(self, item_def_id: DefId) -> &'tcx Substs<'tcx> { - Substs::for_item(self, item_def_id, |param, _| { + pub fn empty_substs_for_def_id(self, item_def_id: DefId) -> SubstsRef<'tcx> { + InternalSubsts::for_item(self, item_def_id, |param, _| { match param.kind { GenericParamDefKind::Lifetime => self.types.re_erased.into(), - GenericParamDefKind::Type {..} => { + GenericParamDefKind::Type { .. } => { bug!("empty_substs_for_def_id: {:?} has type parameters", item_def_id) } + GenericParamDefKind::Const { .. } => { + bug!("empty_substs_for_def_id: {:?} has const parameters", item_def_id) + } } }) } - /// Return whether the node pointed to by def_id is a static item, and its mutability + /// Returns `true` if the node pointed to by `def_id` is a static item, and its mutability. pub fn is_static(&self, def_id: DefId) -> Option { if let Some(node) = self.hir().get_if_local(def_id) { match node { @@ -628,17 +642,100 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> { } } } + + /// Expands the given impl trait type, stopping if the type is recursive. + pub fn try_expand_impl_trait_type( + self, + def_id: DefId, + substs: SubstsRef<'tcx>, + ) -> Result, Ty<'tcx>> { + use crate::ty::fold::TypeFolder; + + struct OpaqueTypeExpander<'a, 'gcx, 'tcx> { + // Contains the DefIds of the opaque types that are currently being + // expanded. When we expand an opaque type we insert the DefId of + // that type, and when we finish expanding that type we remove the + // its DefId. + seen_opaque_tys: FxHashSet, + primary_def_id: DefId, + found_recursion: bool, + tcx: TyCtxt<'a, 'gcx, 'tcx>, + } + + impl<'a, 'gcx, 'tcx> OpaqueTypeExpander<'a, 'gcx, 'tcx> { + fn expand_opaque_ty( + &mut self, + def_id: DefId, + substs: SubstsRef<'tcx>, + ) -> Option> { + if self.found_recursion { + None + } else if self.seen_opaque_tys.insert(def_id) { + let generic_ty = self.tcx.type_of(def_id); + let concrete_ty = generic_ty.subst(self.tcx, substs); + let expanded_ty = self.fold_ty(concrete_ty); + self.seen_opaque_tys.remove(&def_id); + Some(expanded_ty) + } else { + // If another opaque type that we contain is recursive, then it + // will report the error, so we don't have to. + self.found_recursion = def_id == self.primary_def_id; + None + } + } + } + + impl<'a, 'gcx, 'tcx> TypeFolder<'gcx, 'tcx> for OpaqueTypeExpander<'a, 'gcx, 'tcx> { + fn tcx(&self) -> TyCtxt<'_, 'gcx, 'tcx> { + self.tcx + } + + fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> { + if let ty::Opaque(def_id, substs) = t.sty { + self.expand_opaque_ty(def_id, substs).unwrap_or(t) + } else { + t.super_fold_with(self) + } + } + } + + let mut visitor = OpaqueTypeExpander { + seen_opaque_tys: FxHashSet::default(), + primary_def_id: def_id, + found_recursion: false, + tcx: self, + }; + let expanded_type = visitor.expand_opaque_ty(def_id, substs).unwrap(); + if visitor.found_recursion { + Err(expanded_type) + } else { + Ok(expanded_type) + } + } } impl<'a, 'tcx> ty::TyS<'tcx> { - pub fn moves_by_default(&'tcx self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - span: Span) - -> bool { - !tcx.at(span).is_copy_raw(param_env.and(self)) + /// Checks whether values of this type `T` are *moved* or *copied* + /// when referenced -- this amounts to a check for whether `T: + /// Copy`, but note that we **don't** consider lifetimes when + /// doing this check. This means that we may generate MIR which + /// does copies even when the type actually doesn't satisfy the + /// full requirements for the `Copy` trait (cc #29149) -- this + /// winds up being reported as an error during NLL borrow check. + pub fn is_copy_modulo_regions(&'tcx self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + span: Span) + -> bool { + tcx.at(span).is_copy_raw(param_env.and(self)) } + /// Checks whether values of this type `T` have a size known at + /// compile time (i.e., whether `T: Sized`). Lifetimes are ignored + /// for the purposes of this check, so it can be an + /// over-approximation in generic contexts, where one can have + /// strange rules like `>::Bar: Sized` that + /// actually carry lifetime requirements. pub fn is_sized(&'tcx self, tcx_at: TyCtxtAt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>)-> bool @@ -646,6 +743,13 @@ impl<'a, 'tcx> ty::TyS<'tcx> { tcx_at.is_sized_raw(param_env.and(self)) } + /// Checks whether values of this type `T` implement the `Freeze` + /// trait -- frozen types are those that do not contain a + /// `UnsafeCell` anywhere. This is a language concept used to + /// distinguish "true immutability", which is relevant to + /// optimization as well as the rules around static values. Note + /// that the `Freeze` trait is not exposed to end users and is + /// effectively an implementation detail. pub fn is_freeze(&'tcx self, tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, @@ -665,7 +769,20 @@ impl<'a, 'tcx> ty::TyS<'tcx> { tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool { - tcx.needs_drop_raw(param_env.and(self)) + tcx.needs_drop_raw(param_env.and(self)).0 + } + + pub fn same_type(a: Ty<'tcx>, b: Ty<'tcx>) -> bool { + match (&a.sty, &b.sty) { + (&Adt(did_a, substs_a), &Adt(did_b, substs_b)) => { + if did_a != did_b { + return false; + } + + substs_a.types().zip(substs_b.types()).all(|(a, b)| Self::same_type(a, b)) + } + _ => a == b, + } } /// Check whether a type is representable. This means it cannot contain unboxed @@ -740,19 +857,6 @@ impl<'a, 'tcx> ty::TyS<'tcx> { } } - fn same_type<'tcx>(a: Ty<'tcx>, b: Ty<'tcx>) -> bool { - match (&a.sty, &b.sty) { - (&Adt(did_a, substs_a), &Adt(did_b, substs_b)) => { - if did_a != did_b { - return false; - } - - substs_a.types().zip(substs_b.types()).all(|(a, b)| same_type(a, b)) - } - _ => a == b, - } - } - // Does the type `ty` directly (without indirection through a pointer) // contain any types on stack `seen`? fn is_type_structurally_recursive<'a, 'tcx>( @@ -817,7 +921,7 @@ impl<'a, 'tcx> ty::TyS<'tcx> { // struct Foo { Option> } for &seen_type in iter { - if same_type(ty, seen_type) { + if ty::TyS::same_type(ty, seen_type) { debug!("ContainsRecursive: {:?} contains {:?}", seen_type, ty); @@ -861,11 +965,13 @@ fn is_copy_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let (param_env, ty) = query.into_parts(); let trait_def_id = tcx.require_lang_item(lang_items::CopyTraitLangItem); tcx.infer_ctxt() - .enter(|infcx| traits::type_known_to_meet_bound(&infcx, - param_env, - ty, - trait_def_id, - DUMMY_SP)) + .enter(|infcx| traits::type_known_to_meet_bound_modulo_regions( + &infcx, + param_env, + ty, + trait_def_id, + DUMMY_SP, + )) } fn is_sized_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -875,11 +981,13 @@ fn is_sized_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let (param_env, ty) = query.into_parts(); let trait_def_id = tcx.require_lang_item(lang_items::SizedTraitLangItem); tcx.infer_ctxt() - .enter(|infcx| traits::type_known_to_meet_bound(&infcx, - param_env, - ty, - trait_def_id, - DUMMY_SP)) + .enter(|infcx| traits::type_known_to_meet_bound_modulo_regions( + &infcx, + param_env, + ty, + trait_def_id, + DUMMY_SP, + )) } fn is_freeze_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -889,36 +997,31 @@ fn is_freeze_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let (param_env, ty) = query.into_parts(); let trait_def_id = tcx.require_lang_item(lang_items::FreezeTraitLangItem); tcx.infer_ctxt() - .enter(|infcx| traits::type_known_to_meet_bound(&infcx, - param_env, - ty, - trait_def_id, - DUMMY_SP)) + .enter(|infcx| traits::type_known_to_meet_bound_modulo_regions( + &infcx, + param_env, + ty, + trait_def_id, + DUMMY_SP, + )) } +#[derive(Clone, HashStable)] +pub struct NeedsDrop(pub bool); + fn needs_drop_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) - -> bool + -> NeedsDrop { let (param_env, ty) = query.into_parts(); let needs_drop = |ty: Ty<'tcx>| -> bool { - tcx.try_needs_drop_raw(DUMMY_SP, param_env.and(ty)).unwrap_or_else(|mut bug| { - // Cycles should be reported as an error by `check_representable`. - // - // Consider the type as not needing drop in the meanwhile to - // avoid further errors. - // - // In case we forgot to emit a bug elsewhere, delay our - // diagnostic to get emitted as a compiler bug. - bug.delay_as_bug(); - false - }) + tcx.needs_drop_raw(param_env.and(ty)).0 }; assert!(!ty.needs_infer()); - match ty.sty { + NeedsDrop(match ty.sty { // Fast-path for primitive types ty::Infer(ty::FreshIntTy(_)) | ty::Infer(ty::FreshFloatTy(_)) | ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Never | @@ -931,11 +1034,11 @@ fn needs_drop_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // `ManuallyDrop` doesn't have a destructor regardless of field types. ty::Adt(def, _) if Some(def.did) == tcx.lang_items().manually_drop() => false, - // Issue #22536: We first query type_moves_by_default. It sees a + // Issue #22536: We first query `is_copy_modulo_regions`. It sees a // normalized version of the type, and therefore will definitely // know whether the type implements Copy (and thus needs no // cleanup/drop/zeroing) ... - _ if !ty.moves_by_default(tcx, param_env, DUMMY_SP) => false, + _ if ty.is_copy_modulo_regions(tcx, param_env, DUMMY_SP) => false, // ... (issue #22536 continued) but as an optimization, still use // prior logic of asking for the structural "may drop". @@ -976,7 +1079,7 @@ fn needs_drop_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def.variants.iter().any( |variant| variant.fields.iter().any( |field| needs_drop(field.ty(tcx, substs)))), - } + }) } pub enum ExplicitSelf<'tcx> { diff --git a/src/librustc/ty/walk.rs b/src/librustc/ty/walk.rs index 82b95b9df6031..fa1eadf34ac89 100644 --- a/src/librustc/ty/walk.rs +++ b/src/librustc/ty/walk.rs @@ -1,19 +1,9 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! An iterator over the type substructure. //! WARNING: this does not keep track of the region depth. -use mir::interpret::ConstValue; -use ty::{self, Ty}; +use crate::ty::{self, Ty}; use smallvec::{self, SmallVec}; +use crate::mir::interpret::ConstValue; // The TypeWalker's stack is hot enough that it's worth going to some effort to // avoid heap allocations. @@ -85,7 +75,10 @@ fn push_subtypes<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent_ty: Ty<'tcx>) { ty::Placeholder(..) | ty::Bound(..) | ty::Foreign(..) => { } ty::Array(ty, len) => { - push_const(stack, len); + if let ConstValue::Unevaluated(_, substs) = len.val { + stack.extend(substs.types().rev()); + } + stack.push(len.ty); stack.push(ty); } ty::Slice(ty) => { @@ -108,7 +101,7 @@ fn push_subtypes<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent_ty: Ty<'tcx>) { (p.substs, Some(p.ty)), ty::ExistentialPredicate::AutoTrait(_) => // Empty iterator - (ty::Substs::empty(), None), + (ty::InternalSubsts::empty(), None), }; substs.types().rev().chain(opt_ty) @@ -138,10 +131,3 @@ fn push_subtypes<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent_ty: Ty<'tcx>) { } } } - -fn push_const<'tcx>(stack: &mut TypeWalkerStack<'tcx>, constant: &'tcx ty::Const<'tcx>) { - if let ConstValue::Unevaluated(_, substs) = constant.val { - stack.extend(substs.types().rev()); - } - stack.push(constant.ty); -} diff --git a/src/librustc/ty/wf.rs b/src/librustc/ty/wf.rs index 6ae0793d92471..7bfda6a6557a3 100644 --- a/src/librustc/ty/wf.rs +++ b/src/librustc/ty/wf.rs @@ -1,23 +1,13 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir::def_id::DefId; -use mir::interpret::ConstValue; -use infer::InferCtxt; -use ty::subst::Substs; -use traits; -use ty::{self, ToPredicate, Ty, TyCtxt, TypeFoldable}; +use crate::hir; +use crate::hir::def_id::DefId; +use crate::infer::InferCtxt; +use crate::ty::subst::SubstsRef; +use crate::traits; +use crate::ty::{self, ToPredicate, Ty, TyCtxt, TypeFoldable}; use std::iter::once; -use syntax::ast; use syntax_pos::Span; -use middle::lang_items; +use crate::middle::lang_items; +use crate::mir::interpret::ConstValue; /// Returns the set of obligations needed to make `ty` well-formed. /// If `ty` contains unresolved inference variables, this may include @@ -27,7 +17,7 @@ use middle::lang_items; /// say "$0 is WF if $0 is WF". pub fn obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, - body_id: ast::NodeId, + body_id: hir::HirId, ty: Ty<'tcx>, span: Span) -> Option>> @@ -53,7 +43,7 @@ pub fn obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, /// if `Bar: Eq`. pub fn trait_obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, - body_id: ast::NodeId, + body_id: hir::HirId, trait_ref: &ty::TraitRef<'tcx>, span: Span) -> Vec> @@ -65,7 +55,7 @@ pub fn trait_obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, pub fn predicate_obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, - body_id: ast::NodeId, + body_id: hir::HirId, predicate: &ty::Predicate<'tcx>, span: Span) -> Vec> @@ -114,7 +104,7 @@ pub fn predicate_obligations<'a, 'gcx, 'tcx>(infcx: &InferCtxt<'a, 'gcx, 'tcx>, struct WfPredicates<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, - body_id: ast::NodeId, + body_id: hir::HirId, span: Span, out: Vec>, } @@ -212,10 +202,9 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { } } - /// Pushes the obligations required for a constant value to be WF + /// Pushes the obligations required for an array length to be WF /// into `self.out`. - fn compute_const(&mut self, constant: &'tcx ty::Const<'tcx>) { - self.require_sized(constant.ty, traits::ConstSized); + fn compute_array_len(&mut self, constant: ty::Const<'tcx>) { if let ConstValue::Unevaluated(def_id, substs) = constant.val { let obligations = self.nominal_obligations(def_id, substs); self.out.extend(obligations); @@ -239,7 +228,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { } } - /// Push new obligations into `out`. Returns true if it was able + /// Pushes new obligations into `out`. Returns `true` if it was able /// to generate all the predicates needed to validate that `ty0` /// is WF. Returns false if `ty0` is an unresolved type variable, /// in which case we are not able to simplify at all. @@ -270,8 +259,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { ty::Array(subty, len) => { self.require_sized(subty, traits::SliceOrArrayElem); - assert_eq!(len.ty, self.infcx.tcx.types.usize); - self.compute_const(len); + self.compute_array_len(*len); } ty::Tuple(ref tys) => { @@ -299,6 +287,11 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { self.out.extend(obligations); } + ty::FnDef(did, substs) => { + let obligations = self.nominal_obligations(did, substs); + self.out.extend(obligations); + } + ty::Ref(r, rty, _) => { // WfReference if !r.has_escaping_bound_vars() && !rty.has_escaping_bound_vars() { @@ -359,7 +352,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { } } - ty::FnDef(..) | ty::FnPtr(_) => { + ty::FnPtr(_) => { // let the loop iterate into the argument/return // types appearing in the fn signature } @@ -389,7 +382,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { let cause = self.cause(traits::MiscObligation); let component_traits = - data.auto_traits().chain(once(data.principal().def_id())); + data.auto_traits().chain(data.principal_def_id()); self.out.extend( component_traits.map(|did| traits::Obligation::new( cause.clone(), @@ -440,7 +433,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { fn nominal_obligations(&mut self, def_id: DefId, - substs: &Substs<'tcx>) + substs: SubstsRef<'tcx>) -> Vec> { let predicates = @@ -490,8 +483,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { // // Note: in fact we only permit builtin traits, not `Bar<'d>`, I // am looking forward to the future here. - - if !data.has_escaping_bound_vars() { + if !data.has_escaping_bound_vars() && !region.has_escaping_bound_vars() { let implicit_bounds = object_region_bounds(self.infcx.tcx, data); @@ -510,7 +502,7 @@ impl<'a, 'gcx, 'tcx> WfPredicates<'a, 'gcx, 'tcx> { } } -/// Given an object type like `SomeTrait+Send`, computes the lifetime +/// Given an object type like `SomeTrait + Send`, computes the lifetime /// bounds that must hold on the elided self type. These are derived /// from the declarations of `SomeTrait`, `Send`, and friends -- if /// they declare `trait SomeTrait : 'static`, for example, then diff --git a/src/librustc/util/bug.rs b/src/librustc/util/bug.rs index 863b70c3df3f7..02ddfab6d826e 100644 --- a/src/librustc/util/bug.rs +++ b/src/librustc/util/bug.rs @@ -1,16 +1,6 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // These functions are used by macro expansion for bug! and span_bug! -use ty::tls; +use crate::ty::tls; use std::fmt; use syntax_pos::{Span, MultiSpan}; diff --git a/src/librustc/util/captures.rs b/src/librustc/util/captures.rs index b68cfd278fa9e..09d576b23c0f5 100644 --- a/src/librustc/util/captures.rs +++ b/src/librustc/util/captures.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// "Signaling" trait used in impl trait to tag lifetimes that you may /// need to capture but don't really need for other reasons. /// Basically a workaround; see [this comment] for details. diff --git a/src/librustc/util/common.rs b/src/librustc/util/common.rs index 68e197849b03e..26194176350ac 100644 --- a/src/librustc/util/common.rs +++ b/src/librustc/util/common.rs @@ -1,38 +1,28 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_camel_case_types)] -use rustc_data_structures::sync::Lock; +use rustc_data_structures::{fx::FxHashMap, sync::Lock}; use std::cell::{RefCell, Cell}; -use std::collections::HashMap; use std::fmt::Debug; -use std::hash::{Hash, BuildHasher}; +use std::hash::Hash; use std::panic; use std::env; use std::time::{Duration, Instant}; use std::sync::mpsc::{Sender}; use syntax_pos::{SpanData}; -use ty::TyCtxt; -use dep_graph::{DepNode}; +use rustc_macros::HashStable; +use crate::ty::TyCtxt; +use crate::dep_graph::{DepNode}; use lazy_static; -use session::Session; +use crate::session::Session; // The name of the associated type for `Fn` return types pub const FN_OUTPUT_NAME: &str = "Output"; // Useful type to use with `Result<>` indicate that an error has already // been reported to the user, so no need to continue checking. -#[derive(Clone, Copy, Debug, RustcEncodable, RustcDecodable)] +#[derive(Clone, Copy, Debug, RustcEncodable, RustcDecodable, HashStable)] pub struct ErrorReported; thread_local!(static TIME_DEPTH: Cell = Cell::new(0)); @@ -73,11 +63,11 @@ pub fn install_panic_hook() { /// Parameters to the `Dump` variant of type `ProfileQueriesMsg`. #[derive(Clone,Debug)] pub struct ProfQDumpParams { - /// A base path for the files we will dump + /// A base path for the files we will dump. pub path:String, - /// To ensure that the compiler waits for us to finish our dumps + /// To ensure that the compiler waits for us to finish our dumps. pub ack:Sender<()>, - /// toggle dumping a log file with every `ProfileQueriesMsg` + /// Toggle dumping a log file with every `ProfileQueriesMsg`. pub dump_profq_msg_log:bool, } @@ -141,7 +131,7 @@ pub fn time_depth() -> usize { TIME_DEPTH.with(|slot| slot.get()) } -/// Set the current depth of `time()` calls. The idea is to call +/// Sets the current depth of `time()` calls. The idea is to call /// `set_time_depth()` with the result from `time_depth()` in the /// parent thread. pub fn set_time_depth(depth: usize) { @@ -350,8 +340,8 @@ pub trait MemoizationMap { where OP: FnOnce() -> Self::Value; } -impl MemoizationMap for RefCell> - where K: Hash+Eq+Clone, V: Clone, S: BuildHasher +impl MemoizationMap for RefCell> + where K: Hash+Eq+Clone, V: Clone { type Key = K; type Value = V; diff --git a/src/librustc/util/nodemap.rs b/src/librustc/util/nodemap.rs index 6adfe2cde6c3b..63c7b76d1b6a5 100644 --- a/src/librustc/util/nodemap.rs +++ b/src/librustc/util/nodemap.rs @@ -1,17 +1,7 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +//! An efficient hash map for `NodeId`s. -//! An efficient hash map for node IDs - -use hir::def_id::DefId; -use hir::{HirId, ItemLocalId}; +use crate::hir::def_id::DefId; +use crate::hir::{HirId, ItemLocalId}; use syntax::ast; pub use rustc_data_structures::fx::FxHashMap; diff --git a/src/librustc/util/ppaux.rs b/src/librustc/util/ppaux.rs deleted file mode 100644 index e248f6b42bea2..0000000000000 --- a/src/librustc/util/ppaux.rs +++ /dev/null @@ -1,1439 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hir::def_id::DefId; -use hir::map::definitions::DefPathData; -use mir::interpret::ConstValue; -use middle::region; -use ty::subst::{self, Subst}; -use ty::{BrAnon, BrEnv, BrFresh, BrNamed}; -use ty::{Bool, Char, Adt}; -use ty::{Error, Str, Array, Slice, Float, FnDef, FnPtr}; -use ty::{Param, Bound, RawPtr, Ref, Never, Tuple}; -use ty::{Closure, Generator, GeneratorWitness, Foreign, Projection, Opaque}; -use ty::{Placeholder, UnnormalizedProjection, Dynamic, Int, Uint, Infer}; -use ty::{self, RegionVid, Ty, TyCtxt, TypeFoldable, GenericParamCount, GenericParamDefKind}; -use util::nodemap::FxHashSet; - -use std::cell::Cell; -use std::fmt; -use std::usize; - -use rustc_target::spec::abi::Abi; -use syntax::ast::CRATE_NODE_ID; -use syntax::symbol::{Symbol, InternedString}; -use hir; - -thread_local! { - /// Mechanism for highlighting of specific regions for display in NLL region inference errors. - /// Contains region to highlight and counter for number to use when highlighting. - static HIGHLIGHT_REGION_FOR_REGIONVID: Cell> = Cell::new(None) -} - -thread_local! { - /// Mechanism for highlighting of specific regions for display in NLL's 'borrow does not live - /// long enough' errors. Contains a region to highlight and a counter to use. - static HIGHLIGHT_REGION_FOR_BOUND_REGION: Cell> = - Cell::new(None) -} - -macro_rules! gen_display_debug_body { - ( $with:path ) => { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut cx = PrintContext::new(); - $with(self, f, &mut cx) - } - }; -} -macro_rules! gen_display_debug { - ( ($($x:tt)+) $target:ty, display yes ) => { - impl<$($x)+> fmt::Display for $target { - gen_display_debug_body! { Print::print_display } - } - }; - ( () $target:ty, display yes ) => { - impl fmt::Display for $target { - gen_display_debug_body! { Print::print_display } - } - }; - ( ($($x:tt)+) $target:ty, debug yes ) => { - impl<$($x)+> fmt::Debug for $target { - gen_display_debug_body! { Print::print_debug } - } - }; - ( () $target:ty, debug yes ) => { - impl fmt::Debug for $target { - gen_display_debug_body! { Print::print_debug } - } - }; - ( $generic:tt $target:ty, $t:ident no ) => {}; -} -macro_rules! gen_print_impl { - ( ($($x:tt)+) $target:ty, ($self:ident, $f:ident, $cx:ident) $disp:block $dbg:block ) => { - impl<$($x)+> Print for $target { - fn print(&$self, $f: &mut F, $cx: &mut PrintContext) -> fmt::Result { - if $cx.is_debug $dbg - else $disp - } - } - }; - ( () $target:ty, ($self:ident, $f:ident, $cx:ident) $disp:block $dbg:block ) => { - impl Print for $target { - fn print(&$self, $f: &mut F, $cx: &mut PrintContext) -> fmt::Result { - if $cx.is_debug $dbg - else $disp - } - } - }; - ( $generic:tt $target:ty, - $vars:tt $gendisp:ident $disp:block $gendbg:ident $dbg:block ) => { - gen_print_impl! { $generic $target, $vars $disp $dbg } - gen_display_debug! { $generic $target, display $gendisp } - gen_display_debug! { $generic $target, debug $gendbg } - } -} -macro_rules! define_print { - ( $generic:tt $target:ty, - $vars:tt { display $disp:block debug $dbg:block } ) => { - gen_print_impl! { $generic $target, $vars yes $disp yes $dbg } - }; - ( $generic:tt $target:ty, - $vars:tt { debug $dbg:block display $disp:block } ) => { - gen_print_impl! { $generic $target, $vars yes $disp yes $dbg } - }; - ( $generic:tt $target:ty, - $vars:tt { debug $dbg:block } ) => { - gen_print_impl! { $generic $target, $vars no { - bug!(concat!("display not implemented for ", stringify!($target))); - } yes $dbg } - }; - ( $generic:tt $target:ty, - ($self:ident, $f:ident, $cx:ident) { display $disp:block } ) => { - gen_print_impl! { $generic $target, ($self, $f, $cx) yes $disp no { - write!($f, "{:?}", $self) - } } - }; -} -macro_rules! define_print_multi { - ( [ $($generic:tt $target:ty),* ] $vars:tt $def:tt ) => { - $(define_print! { $generic $target, $vars $def })* - }; -} -macro_rules! print_inner { - ( $f:expr, $cx:expr, write ($($data:expr),+) ) => { - write!($f, $($data),+) - }; - ( $f:expr, $cx:expr, $kind:ident ($data:expr) ) => { - $data.$kind($f, $cx) - }; -} -macro_rules! print { - ( $f:expr, $cx:expr $(, $kind:ident $data:tt)+ ) => { - Ok(())$(.and_then(|_| print_inner!($f, $cx, $kind $data)))+ - }; -} - - -struct LateBoundRegionNameCollector(FxHashSet); -impl<'tcx> ty::fold::TypeVisitor<'tcx> for LateBoundRegionNameCollector { - fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool { - match *r { - ty::ReLateBound(_, ty::BrNamed(_, name)) => { - self.0.insert(name); - }, - _ => {}, - } - r.super_visit_with(self) - } -} - -#[derive(Debug)] -pub struct PrintContext { - is_debug: bool, - is_verbose: bool, - identify_regions: bool, - used_region_names: Option>, - region_index: usize, - binder_depth: usize, -} -impl PrintContext { - fn new() -> Self { - ty::tls::with_opt(|tcx| { - let (is_verbose, identify_regions) = tcx.map( - |tcx| (tcx.sess.verbose(), tcx.sess.opts.debugging_opts.identify_regions) - ).unwrap_or((false, false)); - PrintContext { - is_debug: false, - is_verbose: is_verbose, - identify_regions: identify_regions, - used_region_names: None, - region_index: 0, - binder_depth: 0, - } - }) - } - fn prepare_late_bound_region_info<'tcx, T>(&mut self, value: &ty::Binder) - where T: TypeFoldable<'tcx> - { - let mut collector = LateBoundRegionNameCollector(Default::default()); - value.visit_with(&mut collector); - self.used_region_names = Some(collector.0); - self.region_index = 0; - } -} - -pub trait Print { - fn print(&self, f: &mut F, cx: &mut PrintContext) -> fmt::Result; - fn print_to_string(&self, cx: &mut PrintContext) -> String { - let mut result = String::new(); - let _ = self.print(&mut result, cx); - result - } - fn print_display(&self, f: &mut F, cx: &mut PrintContext) -> fmt::Result { - let old_debug = cx.is_debug; - cx.is_debug = false; - let result = self.print(f, cx); - cx.is_debug = old_debug; - result - } - fn print_display_to_string(&self, cx: &mut PrintContext) -> String { - let mut result = String::new(); - let _ = self.print_display(&mut result, cx); - result - } - fn print_debug(&self, f: &mut F, cx: &mut PrintContext) -> fmt::Result { - let old_debug = cx.is_debug; - cx.is_debug = true; - let result = self.print(f, cx); - cx.is_debug = old_debug; - result - } - fn print_debug_to_string(&self, cx: &mut PrintContext) -> String { - let mut result = String::new(); - let _ = self.print_debug(&mut result, cx); - result - } -} - -impl PrintContext { - fn fn_sig(&mut self, - f: &mut F, - inputs: &[Ty<'_>], - variadic: bool, - output: Ty<'_>) - -> fmt::Result { - write!(f, "(")?; - let mut inputs = inputs.iter(); - if let Some(&ty) = inputs.next() { - print!(f, self, print_display(ty))?; - for &ty in inputs { - print!(f, self, write(", "), print_display(ty))?; - } - if variadic { - write!(f, ", ...")?; - } - } - write!(f, ")")?; - if !output.is_unit() { - print!(f, self, write(" -> "), print_display(output))?; - } - - Ok(()) - } - - fn parameterized(&mut self, - f: &mut F, - substs: &subst::Substs<'_>, - did: DefId, - projections: &[ty::ProjectionPredicate<'_>]) - -> fmt::Result { - let key = ty::tls::with(|tcx| tcx.def_key(did)); - - let verbose = self.is_verbose; - let mut num_supplied_defaults = 0; - let mut has_self = false; - let mut own_counts: GenericParamCount = Default::default(); - let mut is_value_path = false; - let mut item_name = Some(key.disambiguated_data.data.as_interned_str()); - let fn_trait_kind = ty::tls::with(|tcx| { - // Unfortunately, some kinds of items (e.g., closures) don't have - // generics. So walk back up the find the closest parent that DOES - // have them. - let mut item_def_id = did; - loop { - let key = tcx.def_key(item_def_id); - match key.disambiguated_data.data { - DefPathData::AssocTypeInTrait(_) | - DefPathData::AssocTypeInImpl(_) | - DefPathData::AssocExistentialInImpl(_) | - DefPathData::Trait(_) | - DefPathData::Impl | - DefPathData::TypeNs(_) => { - break; - } - DefPathData::ValueNs(_) | - DefPathData::EnumVariant(_) => { - is_value_path = true; - break; - } - DefPathData::CrateRoot | - DefPathData::Misc | - DefPathData::Module(_) | - DefPathData::MacroDef(_) | - DefPathData::ClosureExpr | - DefPathData::TypeParam(_) | - DefPathData::LifetimeParam(_) | - DefPathData::Field(_) | - DefPathData::StructCtor | - DefPathData::AnonConst | - DefPathData::ImplTrait | - DefPathData::GlobalMetaData(_) => { - // if we're making a symbol for something, there ought - // to be a value or type-def or something in there - // *somewhere* - item_def_id.index = key.parent.unwrap_or_else(|| { - bug!("finding type for {:?}, encountered def-id {:?} with no \ - parent", did, item_def_id); - }); - } - } - } - let mut generics = tcx.generics_of(item_def_id); - let child_own_counts = generics.own_counts(); - let mut path_def_id = did; - has_self = generics.has_self; - - let mut child_types = 0; - if let Some(def_id) = generics.parent { - // Methods. - assert!(is_value_path); - child_types = child_own_counts.types; - generics = tcx.generics_of(def_id); - own_counts = generics.own_counts(); - - if has_self { - print!(f, self, write("<"), print_display(substs.type_at(0)), write(" as "))?; - } - - path_def_id = def_id; - } else { - item_name = None; - - if is_value_path { - // Functions. - assert_eq!(has_self, false); - } else { - // Types and traits. - own_counts = child_own_counts; - } - } - - if !verbose { - let mut type_params = - generics.params.iter().rev().filter_map(|param| match param.kind { - GenericParamDefKind::Lifetime => None, - GenericParamDefKind::Type { has_default, .. } => { - Some((param.def_id, has_default)) - } - }).peekable(); - let has_default = { - let has_default = type_params.peek().map(|(_, has_default)| has_default); - *has_default.unwrap_or(&false) - }; - if has_default { - if let Some(substs) = tcx.lift(&substs) { - let types = substs.types().rev().skip(child_types); - for ((def_id, has_default), actual) in type_params.zip(types) { - if !has_default { - break; - } - if tcx.type_of(def_id).subst(tcx, substs) != actual { - break; - } - num_supplied_defaults += 1; - } - } - } - } - - print!(f, self, write("{}", tcx.item_path_str(path_def_id)))?; - Ok(tcx.lang_items().fn_trait_kind(path_def_id)) - })?; - - if !verbose && fn_trait_kind.is_some() && projections.len() == 1 { - let projection_ty = projections[0].ty; - if let Tuple(ref args) = substs.type_at(1).sty { - return self.fn_sig(f, args, false, projection_ty); - } - } - - let empty = Cell::new(true); - let start_or_continue = |f: &mut F, start: &str, cont: &str| { - if empty.get() { - empty.set(false); - write!(f, "{}", start) - } else { - write!(f, "{}", cont) - } - }; - - let print_regions = |f: &mut F, start: &str, skip, count| { - // Don't print any regions if they're all erased. - let regions = || substs.regions().skip(skip).take(count); - if regions().all(|r: ty::Region<'_>| *r == ty::ReErased) { - return Ok(()); - } - - for region in regions() { - let region: ty::Region<'_> = region; - start_or_continue(f, start, ", ")?; - if verbose { - write!(f, "{:?}", region)?; - } else { - let s = region.to_string(); - if s.is_empty() { - // This happens when the value of the region - // parameter is not easily serialized. This may be - // because the user omitted it in the first place, - // or because it refers to some block in the code, - // etc. I'm not sure how best to serialize this. - write!(f, "'_")?; - } else { - write!(f, "{}", s)?; - } - } - } - - Ok(()) - }; - - print_regions(f, "<", 0, own_counts.lifetimes)?; - - let tps = substs.types() - .take(own_counts.types - num_supplied_defaults) - .skip(has_self as usize); - - for ty in tps { - start_or_continue(f, "<", ", ")?; - ty.print_display(f, self)?; - } - - for projection in projections { - start_or_continue(f, "<", ", ")?; - ty::tls::with(|tcx| - print!(f, self, - write("{}=", - tcx.associated_item(projection.projection_ty.item_def_id).ident), - print_display(projection.ty)) - )?; - } - - start_or_continue(f, "", ">")?; - - // For values, also print their name and type parameters. - if is_value_path { - empty.set(true); - - if has_self { - write!(f, ">")?; - } - - if let Some(item_name) = item_name { - write!(f, "::{}", item_name)?; - } - - print_regions(f, "::<", own_counts.lifetimes, usize::MAX)?; - - // FIXME: consider being smart with defaults here too - for ty in substs.types().skip(own_counts.types) { - start_or_continue(f, "::<", ", ")?; - ty.print_display(f, self)?; - } - - start_or_continue(f, "", ">")?; - } - - Ok(()) - } - - fn in_binder<'a, 'gcx, 'tcx, T, U, F>(&mut self, - f: &mut F, - tcx: TyCtxt<'a, 'gcx, 'tcx>, - original: &ty::Binder, - lifted: Option>) -> fmt::Result - where T: Print, U: Print + TypeFoldable<'tcx>, F: fmt::Write - { - fn name_by_region_index(index: usize) -> InternedString { - match index { - 0 => Symbol::intern("'r"), - 1 => Symbol::intern("'s"), - i => Symbol::intern(&format!("'t{}", i-2)), - }.as_interned_str() - } - - // Replace any anonymous late-bound regions with named - // variants, using gensym'd identifiers, so that we can - // clearly differentiate between named and unnamed regions in - // the output. We'll probably want to tweak this over time to - // decide just how much information to give. - let value = if let Some(v) = lifted { - v - } else { - return original.skip_binder().print_display(f, self); - }; - - if self.binder_depth == 0 { - self.prepare_late_bound_region_info(&value); - } - - let mut empty = true; - let mut start_or_continue = |f: &mut F, start: &str, cont: &str| { - if empty { - empty = false; - write!(f, "{}", start) - } else { - write!(f, "{}", cont) - } - }; - - let old_region_index = self.region_index; - let mut region_index = old_region_index; - let new_value = tcx.replace_late_bound_regions(&value, |br| { - let _ = start_or_continue(f, "for<", ", "); - let br = match br { - ty::BrNamed(_, name) => { - let _ = write!(f, "{}", name); - br - } - ty::BrAnon(_) | - ty::BrFresh(_) | - ty::BrEnv => { - let name = loop { - let name = name_by_region_index(region_index); - region_index += 1; - if !self.is_name_used(&name) { - break name; - } - }; - let _ = write!(f, "{}", name); - ty::BrNamed(tcx.hir().local_def_id(CRATE_NODE_ID), name) - } - }; - tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br)) - }).0; - start_or_continue(f, "", "> ")?; - - // Push current state to gcx, and restore after writing new_value. - self.binder_depth += 1; - self.region_index = region_index; - let result = new_value.print_display(f, self); - self.region_index = old_region_index; - self.binder_depth -= 1; - result - } - - fn is_name_used(&self, name: &InternedString) -> bool { - match self.used_region_names { - Some(ref names) => names.contains(name), - None => false, - } - } -} - -pub fn verbose() -> bool { - ty::tls::with(|tcx| tcx.sess.verbose()) -} - -pub fn identify_regions() -> bool { - ty::tls::with(|tcx| tcx.sess.opts.debugging_opts.identify_regions) -} - -pub fn parameterized(f: &mut F, - substs: &subst::Substs<'_>, - did: DefId, - projections: &[ty::ProjectionPredicate<'_>]) - -> fmt::Result { - PrintContext::new().parameterized(f, substs, did, projections) -} - -fn get_highlight_region_for_regionvid() -> Option<(RegionVid, usize)> { - HIGHLIGHT_REGION_FOR_REGIONVID.with(|hr| hr.get()) -} - -pub fn with_highlight_region_for_regionvid( - r: RegionVid, - counter: usize, - op: impl FnOnce() -> R -) -> R { - HIGHLIGHT_REGION_FOR_REGIONVID.with(|hr| { - assert_eq!(hr.get(), None); - hr.set(Some((r, counter))); - let r = op(); - hr.set(None); - r - }) -} - -fn get_highlight_region_for_bound_region() -> Option<(ty::BoundRegion, usize)> { - HIGHLIGHT_REGION_FOR_BOUND_REGION.with(|hr| hr.get()) -} - -pub fn with_highlight_region_for_bound_region( - r: ty::BoundRegion, - counter: usize, - op: impl Fn() -> R -) -> R { - HIGHLIGHT_REGION_FOR_BOUND_REGION.with(|hr| { - assert_eq!(hr.get(), None); - hr.set(Some((r, counter))); - let r = op(); - hr.set(None); - r - }) -} - -impl<'a, T: Print> Print for &'a T { - fn print(&self, f: &mut F, cx: &mut PrintContext) -> fmt::Result { - (*self).print(f, cx) - } -} - -define_print! { - ('tcx) &'tcx ty::List>, (self, f, cx) { - display { - // Generate the main trait ref, including associated types. - ty::tls::with(|tcx| { - // Use a type that can't appear in defaults of type parameters. - let dummy_self = tcx.mk_infer(ty::FreshTy(0)); - - let principal = tcx - .lift(&self.principal()) - .expect("could not lift TraitRef for printing") - .with_self_ty(tcx, dummy_self); - let projections = self.projection_bounds().map(|p| { - tcx.lift(&p) - .expect("could not lift projection for printing") - .with_self_ty(tcx, dummy_self) - }).collect::>(); - cx.parameterized(f, principal.substs, principal.def_id, &projections)?; - - // Builtin bounds. - for did in self.auto_traits() { - write!(f, " + {}", tcx.item_path_str(did))?; - } - - Ok(()) - })?; - - Ok(()) - } - } -} - -impl fmt::Debug for ty::GenericParamDef { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let type_name = match self.kind { - ty::GenericParamDefKind::Lifetime => "Lifetime", - ty::GenericParamDefKind::Type {..} => "Type", - }; - write!(f, "{}({}, {:?}, {})", - type_name, - self.name, - self.def_id, - self.index) - } -} - -impl fmt::Debug for ty::TraitDef { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ty::tls::with(|tcx| { - write!(f, "{}", tcx.item_path_str(self.def_id)) - }) - } -} - -impl fmt::Debug for ty::AdtDef { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ty::tls::with(|tcx| { - write!(f, "{}", tcx.item_path_str(self.did)) - }) - } -} - -impl<'tcx> fmt::Debug for ty::ClosureUpvar<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "ClosureUpvar({:?},{:?})", - self.def, - self.ty) - } -} - -impl fmt::Debug for ty::UpvarId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "UpvarId({:?};`{}`;{:?})", - self.var_path.hir_id, - ty::tls::with(|tcx| tcx.hir().name(tcx.hir().hir_to_node_id(self.var_path.hir_id))), - self.closure_expr_id) - } -} - -impl<'tcx> fmt::Debug for ty::UpvarBorrow<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "UpvarBorrow({:?}, {:?})", - self.kind, self.region) - } -} - -define_print! { - ('tcx) &'tcx ty::List>, (self, f, cx) { - display { - write!(f, "{{")?; - let mut tys = self.iter(); - if let Some(&ty) = tys.next() { - print!(f, cx, print(ty))?; - for &ty in tys { - print!(f, cx, write(", "), print(ty))?; - } - } - write!(f, "}}") - } - } -} - -define_print! { - ('tcx) ty::TypeAndMut<'tcx>, (self, f, cx) { - display { - print!(f, cx, - write("{}", if self.mutbl == hir::MutMutable { "mut " } else { "" }), - print(self.ty)) - } - } -} - -define_print! { - ('tcx) ty::ExistentialTraitRef<'tcx>, (self, f, cx) { - debug { - ty::tls::with(|tcx| { - let dummy_self = tcx.mk_infer(ty::FreshTy(0)); - - let trait_ref = *tcx.lift(&ty::Binder::bind(*self)) - .expect("could not lift TraitRef for printing") - .with_self_ty(tcx, dummy_self).skip_binder(); - cx.parameterized(f, trait_ref.substs, trait_ref.def_id, &[]) - }) - } - } -} - -define_print! { - ('tcx) ty::adjustment::Adjustment<'tcx>, (self, f, cx) { - debug { - print!(f, cx, write("{:?} -> ", self.kind), print(self.target)) - } - } -} - -define_print! { - () ty::BoundRegion, (self, f, cx) { - display { - if cx.is_verbose { - return self.print_debug(f, cx); - } - - if let Some((region, counter)) = get_highlight_region_for_bound_region() { - if *self == region { - return match *self { - BrNamed(_, name) => write!(f, "{}", name), - BrAnon(_) | BrFresh(_) | BrEnv => write!(f, "'{}", counter) - }; - } - } - - match *self { - BrNamed(_, name) => write!(f, "{}", name), - BrAnon(_) | BrFresh(_) | BrEnv => Ok(()) - } - } - debug { - return match *self { - BrAnon(n) => write!(f, "BrAnon({:?})", n), - BrFresh(n) => write!(f, "BrFresh({:?})", n), - BrNamed(did, name) => { - write!(f, "BrNamed({:?}:{:?}, {})", - did.krate, did.index, name) - } - BrEnv => write!(f, "BrEnv"), - }; - } - } -} - -define_print! { - () ty::RegionKind, (self, f, cx) { - display { - if cx.is_verbose || get_highlight_region_for_regionvid().is_some() { - return self.print_debug(f, cx); - } - - // These printouts are concise. They do not contain all the information - // the user might want to diagnose an error, but there is basically no way - // to fit that into a short string. Hence the recommendation to use - // `explain_region()` or `note_and_explain_region()`. - match *self { - ty::ReEarlyBound(ref data) => { - write!(f, "{}", data.name) - } - ty::ReLateBound(_, br) | - ty::ReFree(ty::FreeRegion { bound_region: br, .. }) | - ty::RePlaceholder(ty::PlaceholderRegion { name: br, .. }) => { - write!(f, "{}", br) - } - ty::ReScope(scope) if cx.identify_regions => { - match scope.data { - region::ScopeData::Node => - write!(f, "'{}s", scope.item_local_id().as_usize()), - region::ScopeData::CallSite => - write!(f, "'{}cs", scope.item_local_id().as_usize()), - region::ScopeData::Arguments => - write!(f, "'{}as", scope.item_local_id().as_usize()), - region::ScopeData::Destruction => - write!(f, "'{}ds", scope.item_local_id().as_usize()), - region::ScopeData::Remainder(first_statement_index) => write!( - f, - "'{}_{}rs", - scope.item_local_id().as_usize(), - first_statement_index.index() - ), - } - } - ty::ReVar(region_vid) if cx.identify_regions => { - write!(f, "'{}rv", region_vid.index()) - } - ty::ReScope(_) | - ty::ReVar(_) | - ty::ReErased => Ok(()), - ty::ReStatic => write!(f, "'static"), - ty::ReEmpty => write!(f, "'"), - - // The user should never encounter these in unsubstituted form. - ty::ReClosureBound(vid) => write!(f, "{:?}", vid), - } - } - debug { - match *self { - ty::ReEarlyBound(ref data) => { - write!(f, "ReEarlyBound({}, {})", - data.index, - data.name) - } - - ty::ReClosureBound(ref vid) => { - write!(f, "ReClosureBound({:?})", - vid) - } - - ty::ReLateBound(binder_id, ref bound_region) => { - write!(f, "ReLateBound({:?}, {:?})", - binder_id, - bound_region) - } - - ty::ReFree(ref fr) => write!(f, "{:?}", fr), - - ty::ReScope(id) => { - write!(f, "ReScope({:?})", id) - } - - ty::ReStatic => write!(f, "ReStatic"), - - ty::ReVar(ref vid) => { - write!(f, "{:?}", vid) - } - - ty::RePlaceholder(placeholder) => { - write!(f, "RePlaceholder({:?})", placeholder) - } - - ty::ReEmpty => write!(f, "ReEmpty"), - - ty::ReErased => write!(f, "ReErased") - } - } - } -} - -define_print! { - () ty::FreeRegion, (self, f, cx) { - debug { - write!(f, "ReFree({:?}, {:?})", self.scope, self.bound_region) - } - } -} - -define_print! { - () ty::Variance, (self, f, cx) { - debug { - f.write_str(match *self { - ty::Covariant => "+", - ty::Contravariant => "-", - ty::Invariant => "o", - ty::Bivariant => "*", - }) - } - } -} - -define_print! { - ('tcx) ty::GenericPredicates<'tcx>, (self, f, cx) { - debug { - write!(f, "GenericPredicates({:?})", self.predicates) - } - } -} - -define_print! { - ('tcx) ty::InstantiatedPredicates<'tcx>, (self, f, cx) { - debug { - write!(f, "InstantiatedPredicates({:?})", self.predicates) - } - } -} - -define_print! { - ('tcx) ty::FnSig<'tcx>, (self, f, cx) { - display { - if self.unsafety == hir::Unsafety::Unsafe { - write!(f, "unsafe ")?; - } - - if self.abi != Abi::Rust { - write!(f, "extern {} ", self.abi)?; - } - - write!(f, "fn")?; - cx.fn_sig(f, self.inputs(), self.variadic, self.output()) - } - debug { - write!(f, "({:?}; variadic: {})->{:?}", self.inputs(), self.variadic, self.output()) - } - } -} - -impl fmt::Debug for ty::TyVid { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "_#{}t", self.index) - } -} - -impl fmt::Debug for ty::IntVid { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "_#{}i", self.index) - } -} - -impl fmt::Debug for ty::FloatVid { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "_#{}f", self.index) - } -} - -impl fmt::Debug for ty::RegionVid { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some((region, counter)) = get_highlight_region_for_regionvid() { - debug!("RegionVid.fmt: region={:?} self={:?} counter={:?}", region, self, counter); - return if *self == region { - write!(f, "'{:?}", counter) - } else { - write!(f, "'_") - } - } - - write!(f, "'_#{}r", self.index()) - } -} - -define_print! { - () ty::InferTy, (self, f, cx) { - display { - if cx.is_verbose { - print!(f, cx, print_debug(self)) - } else { - match *self { - ty::TyVar(_) => write!(f, "_"), - ty::IntVar(_) => write!(f, "{}", "{integer}"), - ty::FloatVar(_) => write!(f, "{}", "{float}"), - ty::FreshTy(v) => write!(f, "FreshTy({})", v), - ty::FreshIntTy(v) => write!(f, "FreshIntTy({})", v), - ty::FreshFloatTy(v) => write!(f, "FreshFloatTy({})", v) - } - } - } - debug { - match *self { - ty::TyVar(ref v) => write!(f, "{:?}", v), - ty::IntVar(ref v) => write!(f, "{:?}", v), - ty::FloatVar(ref v) => write!(f, "{:?}", v), - ty::FreshTy(v) => write!(f, "FreshTy({:?})", v), - ty::FreshIntTy(v) => write!(f, "FreshIntTy({:?})", v), - ty::FreshFloatTy(v) => write!(f, "FreshFloatTy({:?})", v) - } - } - } -} - -impl fmt::Debug for ty::IntVarValue { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match *self { - ty::IntType(ref v) => v.fmt(f), - ty::UintType(ref v) => v.fmt(f), - } - } -} - -impl fmt::Debug for ty::FloatVarValue { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) - } -} - -// The generic impl doesn't work yet because projections are not -// normalized under HRTB. -/*impl fmt::Display for ty::Binder - where T: fmt::Display + for<'a> ty::Lift<'a>, - for<'a> >::Lifted: fmt::Display + TypeFoldable<'a> -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ty::tls::with(|tcx| in_binder(f, tcx, self, tcx.lift(self))) - } -}*/ - -define_print_multi! { - [ - ('tcx) ty::Binder<&'tcx ty::List>>, - ('tcx) ty::Binder>, - ('tcx) ty::Binder>, - ('tcx) ty::Binder>, - ('tcx) ty::Binder>, - ('tcx) ty::Binder>, - ('tcx) ty::Binder, ty::Region<'tcx>>>, - ('tcx) ty::Binder, ty::Region<'tcx>>> - ] - (self, f, cx) { - display { - ty::tls::with(|tcx| cx.in_binder(f, tcx, self, tcx.lift(self))) - } - } -} - -define_print! { - ('tcx) ty::TraitRef<'tcx>, (self, f, cx) { - display { - cx.parameterized(f, self.substs, self.def_id, &[]) - } - debug { - // when printing out the debug representation, we don't need - // to enumerate the `for<...>` etc because the debruijn index - // tells you everything you need to know. - print!(f, cx, - write("<"), - print(self.self_ty()), - write(" as "))?; - cx.parameterized(f, self.substs, self.def_id, &[])?; - write!(f, ">") - } - } -} - -define_print! { - ('tcx) ty::TyKind<'tcx>, (self, f, cx) { - display { - match *self { - Bool => write!(f, "bool"), - Char => write!(f, "char"), - Int(t) => write!(f, "{}", t.ty_to_string()), - Uint(t) => write!(f, "{}", t.ty_to_string()), - Float(t) => write!(f, "{}", t.ty_to_string()), - RawPtr(ref tm) => { - write!(f, "*{} ", match tm.mutbl { - hir::MutMutable => "mut", - hir::MutImmutable => "const", - })?; - tm.ty.print(f, cx) - } - Ref(r, ty, mutbl) => { - write!(f, "&")?; - let s = r.print_to_string(cx); - if s != "'_" { - write!(f, "{}", s)?; - if !s.is_empty() { - write!(f, " ")?; - } - } - ty::TypeAndMut { ty, mutbl }.print(f, cx) - } - Never => write!(f, "!"), - Tuple(ref tys) => { - write!(f, "(")?; - let mut tys = tys.iter(); - if let Some(&ty) = tys.next() { - print!(f, cx, print(ty), write(","))?; - if let Some(&ty) = tys.next() { - print!(f, cx, write(" "), print(ty))?; - for &ty in tys { - print!(f, cx, write(", "), print(ty))?; - } - } - } - write!(f, ")") - } - FnDef(def_id, substs) => { - ty::tls::with(|tcx| { - let mut sig = tcx.fn_sig(def_id); - if let Some(substs) = tcx.lift(&substs) { - sig = sig.subst(tcx, substs); - } - print!(f, cx, print(sig), write(" {{")) - })?; - cx.parameterized(f, substs, def_id, &[])?; - write!(f, "}}") - } - FnPtr(ref bare_fn) => { - bare_fn.print(f, cx) - } - Infer(infer_ty) => write!(f, "{}", infer_ty), - Error => write!(f, "[type error]"), - Param(ref param_ty) => write!(f, "{}", param_ty), - Bound(debruijn, bound_ty) => { - match bound_ty.kind { - ty::BoundTyKind::Anon => { - if debruijn == ty::INNERMOST { - write!(f, "^{}", bound_ty.var.index()) - } else { - write!(f, "^{}_{}", debruijn.index(), bound_ty.var.index()) - } - } - - ty::BoundTyKind::Param(p) => write!(f, "{}", p), - } - } - Adt(def, substs) => cx.parameterized(f, substs, def.did, &[]), - Dynamic(data, r) => { - let r = r.print_to_string(cx); - if !r.is_empty() { - write!(f, "(")?; - } - write!(f, "dyn ")?; - data.print(f, cx)?; - if !r.is_empty() { - write!(f, " + {})", r) - } else { - Ok(()) - } - } - Foreign(def_id) => parameterized(f, subst::Substs::empty(), def_id, &[]), - Projection(ref data) => data.print(f, cx), - UnnormalizedProjection(ref data) => { - write!(f, "Unnormalized(")?; - data.print(f, cx)?; - write!(f, ")") - } - Placeholder(placeholder) => { - write!(f, "Placeholder({:?})", placeholder) - } - Opaque(def_id, substs) => { - if cx.is_verbose { - return write!(f, "Opaque({:?}, {:?})", def_id, substs); - } - - ty::tls::with(|tcx| { - let def_key = tcx.def_key(def_id); - if let Some(name) = def_key.disambiguated_data.data.get_opt_name() { - write!(f, "{}", name)?; - let mut substs = substs.iter(); - if let Some(first) = substs.next() { - write!(f, "::<")?; - write!(f, "{}", first)?; - for subst in substs { - write!(f, ", {}", subst)?; - } - write!(f, ">")?; - } - return Ok(()); - } - // Grab the "TraitA + TraitB" from `impl TraitA + TraitB`, - // by looking up the projections associated with the def_id. - let predicates_of = tcx.predicates_of(def_id); - let substs = tcx.lift(&substs).unwrap_or_else(|| { - tcx.intern_substs(&[]) - }); - let bounds = predicates_of.instantiate(tcx, substs); - - let mut first = true; - let mut is_sized = false; - write!(f, "impl")?; - for predicate in bounds.predicates { - if let Some(trait_ref) = predicate.to_opt_poly_trait_ref() { - // Don't print +Sized, but rather +?Sized if absent. - if Some(trait_ref.def_id()) == tcx.lang_items().sized_trait() { - is_sized = true; - continue; - } - - print!(f, cx, - write("{}", if first { " " } else { "+" }), - print(trait_ref))?; - first = false; - } - } - if !is_sized { - write!(f, "{}?Sized", if first { " " } else { "+" })?; - } - Ok(()) - }) - } - Str => write!(f, "str"), - Generator(did, substs, movability) => ty::tls::with(|tcx| { - let upvar_tys = substs.upvar_tys(did, tcx); - let witness = substs.witness(did, tcx); - if movability == hir::GeneratorMovability::Movable { - write!(f, "[generator")?; - } else { - write!(f, "[static generator")?; - } - - if let Some(node_id) = tcx.hir().as_local_node_id(did) { - write!(f, "@{:?}", tcx.hir().span(node_id))?; - let mut sep = " "; - tcx.with_freevars(node_id, |freevars| { - for (freevar, upvar_ty) in freevars.iter().zip(upvar_tys) { - print!(f, cx, - write("{}{}:", - sep, - tcx.hir().name(freevar.var_id())), - print(upvar_ty))?; - sep = ", "; - } - Ok(()) - })? - } else { - // cross-crate closure types should only be - // visible in codegen bug reports, I imagine. - write!(f, "@{:?}", did)?; - let mut sep = " "; - for (index, upvar_ty) in upvar_tys.enumerate() { - print!(f, cx, - write("{}{}:", sep, index), - print(upvar_ty))?; - sep = ", "; - } - } - - print!(f, cx, write(" "), print(witness), write("]")) - }), - GeneratorWitness(types) => { - ty::tls::with(|tcx| cx.in_binder(f, tcx, &types, tcx.lift(&types))) - } - Closure(did, substs) => ty::tls::with(|tcx| { - let upvar_tys = substs.upvar_tys(did, tcx); - write!(f, "[closure")?; - - if let Some(node_id) = tcx.hir().as_local_node_id(did) { - if tcx.sess.opts.debugging_opts.span_free_formats { - write!(f, "@{:?}", node_id)?; - } else { - write!(f, "@{:?}", tcx.hir().span(node_id))?; - } - let mut sep = " "; - tcx.with_freevars(node_id, |freevars| { - for (freevar, upvar_ty) in freevars.iter().zip(upvar_tys) { - print!(f, cx, - write("{}{}:", - sep, - tcx.hir().name(freevar.var_id())), - print(upvar_ty))?; - sep = ", "; - } - Ok(()) - })? - } else { - // cross-crate closure types should only be - // visible in codegen bug reports, I imagine. - write!(f, "@{:?}", did)?; - let mut sep = " "; - for (index, upvar_ty) in upvar_tys.enumerate() { - print!(f, cx, - write("{}{}:", sep, index), - print(upvar_ty))?; - sep = ", "; - } - } - - write!(f, "]") - }), - Array(ty, sz) => { - print!(f, cx, write("["), print(ty), write("; "))?; - match sz.val { - ConstValue::Unevaluated(_def_id, _substs) => { - write!(f, "_")?; - } - _ => ty::tls::with(|tcx| { - write!(f, "{}", sz.unwrap_usize(tcx)) - })?, - } - write!(f, "]") - } - Slice(ty) => { - print!(f, cx, write("["), print(ty), write("]")) - } - } - } - } -} - -define_print! { - ('tcx) ty::TyS<'tcx>, (self, f, cx) { - display { - self.sty.print(f, cx) - } - debug { - self.sty.print_display(f, cx) - } - } -} - -define_print! { - () ty::ParamTy, (self, f, cx) { - display { - write!(f, "{}", self.name) - } - debug { - write!(f, "{}/#{}", self.name, self.idx) - } - } -} - -define_print! { - ('tcx, T: Print + fmt::Debug, U: Print + fmt::Debug) ty::OutlivesPredicate, - (self, f, cx) { - display { - print!(f, cx, print(self.0), write(" : "), print(self.1)) - } - } -} - -define_print! { - ('tcx) ty::SubtypePredicate<'tcx>, (self, f, cx) { - display { - print!(f, cx, print(self.a), write(" <: "), print(self.b)) - } - } -} - -define_print! { - ('tcx) ty::TraitPredicate<'tcx>, (self, f, cx) { - debug { - write!(f, "TraitPredicate({:?})", - self.trait_ref) - } - display { - print!(f, cx, print(self.trait_ref.self_ty()), write(": "), print(self.trait_ref)) - } - } -} - -define_print! { - ('tcx) ty::ProjectionPredicate<'tcx>, (self, f, cx) { - debug { - print!(f, cx, - write("ProjectionPredicate("), - print(self.projection_ty), - write(", "), - print(self.ty), - write(")")) - } - display { - print!(f, cx, print(self.projection_ty), write(" == "), print(self.ty)) - } - } -} - -define_print! { - ('tcx) ty::ProjectionTy<'tcx>, (self, f, cx) { - display { - // FIXME(tschottdorf): use something like - // parameterized(f, self.substs, self.item_def_id, &[]) - // (which currently ICEs). - let (trait_ref, item_name) = ty::tls::with(|tcx| - (self.trait_ref(tcx), tcx.associated_item(self.item_def_id).ident) - ); - print!(f, cx, print_debug(trait_ref), write("::{}", item_name)) - } - } -} - -define_print! { - () ty::ClosureKind, (self, f, cx) { - display { - match *self { - ty::ClosureKind::Fn => write!(f, "Fn"), - ty::ClosureKind::FnMut => write!(f, "FnMut"), - ty::ClosureKind::FnOnce => write!(f, "FnOnce"), - } - } - } -} - -define_print! { - ('tcx) ty::Predicate<'tcx>, (self, f, cx) { - display { - match *self { - ty::Predicate::Trait(ref data) => data.print(f, cx), - ty::Predicate::Subtype(ref predicate) => predicate.print(f, cx), - ty::Predicate::RegionOutlives(ref predicate) => predicate.print(f, cx), - ty::Predicate::TypeOutlives(ref predicate) => predicate.print(f, cx), - ty::Predicate::Projection(ref predicate) => predicate.print(f, cx), - ty::Predicate::WellFormed(ty) => print!(f, cx, print(ty), write(" well-formed")), - ty::Predicate::ObjectSafe(trait_def_id) => - ty::tls::with(|tcx| { - write!(f, "the trait `{}` is object-safe", tcx.item_path_str(trait_def_id)) - }), - ty::Predicate::ClosureKind(closure_def_id, _closure_substs, kind) => - ty::tls::with(|tcx| { - write!(f, "the closure `{}` implements the trait `{}`", - tcx.item_path_str(closure_def_id), kind) - }), - ty::Predicate::ConstEvaluatable(def_id, substs) => { - write!(f, "the constant `")?; - cx.parameterized(f, substs, def_id, &[])?; - write!(f, "` can be evaluated") - } - } - } - debug { - match *self { - ty::Predicate::Trait(ref a) => a.print(f, cx), - ty::Predicate::Subtype(ref pair) => pair.print(f, cx), - ty::Predicate::RegionOutlives(ref pair) => pair.print(f, cx), - ty::Predicate::TypeOutlives(ref pair) => pair.print(f, cx), - ty::Predicate::Projection(ref pair) => pair.print(f, cx), - ty::Predicate::WellFormed(ty) => ty.print(f, cx), - ty::Predicate::ObjectSafe(trait_def_id) => { - write!(f, "ObjectSafe({:?})", trait_def_id) - } - ty::Predicate::ClosureKind(closure_def_id, closure_substs, kind) => { - write!(f, "ClosureKind({:?}, {:?}, {:?})", closure_def_id, closure_substs, kind) - } - ty::Predicate::ConstEvaluatable(def_id, substs) => { - write!(f, "ConstEvaluatable({:?}, {:?})", def_id, substs) - } - } - } - } -} diff --git a/src/librustc/util/profiling.rs b/src/librustc/util/profiling.rs index c2bfa62cf9d06..2739a30a29135 100644 --- a/src/librustc/util/profiling.rs +++ b/src/librustc/util/profiling.rs @@ -1,133 +1,17 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use session::config::Options; - +use std::borrow::Cow; use std::fs; -use std::io::{self, StderrLock, Write}; -use std::time::{Duration, Instant}; - -macro_rules! define_categories { - ($($name:ident,)*) => { - #[derive(Clone, Copy, Debug, PartialEq, Eq)] - pub enum ProfileCategory { - $($name),* - } - - #[allow(nonstandard_style)] - struct Categories { - $($name: T),* - } - - impl Categories { - fn new() -> Categories { - Categories { - $($name: T::default()),* - } - } - } - - impl Categories { - fn get(&self, category: ProfileCategory) -> &T { - match category { - $(ProfileCategory::$name => &self.$name),* - } - } - - fn set(&mut self, category: ProfileCategory, value: T) { - match category { - $(ProfileCategory::$name => self.$name = value),* - } - } - } - - struct CategoryData { - times: Categories, - query_counts: Categories<(u64, u64)>, - } - - impl CategoryData { - fn new() -> CategoryData { - CategoryData { - times: Categories::new(), - query_counts: Categories::new(), - } - } - - fn print(&self, lock: &mut StderrLock<'_>) { - writeln!(lock, "| Phase | Time (ms) \ - | Time (%) | Queries | Hits (%)") - .unwrap(); - writeln!(lock, "| ---------------- | -------------- \ - | -------- | -------------- | --------") - .unwrap(); - - let total_time = ($(self.times.$name + )* 0) as f32; - - $( - let (hits, total) = self.query_counts.$name; - let (hits, total) = if total > 0 { - (format!("{:.2}", - (((hits as f32) / (total as f32)) * 100.0)), total.to_string()) - } else { - (String::new(), String::new()) - }; - - writeln!( - lock, - "| {0: <16} | {1: <14} | {2: <8.2} | {3: <14} | {4: <8}", - stringify!($name), - self.times.$name / 1_000_000, - ((self.times.$name as f32) / total_time) * 100.0, - total, - hits, - ).unwrap(); - )* - } - - fn json(&self) -> String { - let mut json = String::from("["); - - $( - let (hits, total) = self.query_counts.$name; +use std::io::{BufWriter, Write}; +use std::mem; +use std::process; +use std::thread::ThreadId; +use std::time::{Duration, Instant, SystemTime}; - //normalize hits to 0% - let hit_percent = - if total > 0 { - ((hits as f32) / (total as f32)) * 100.0 - } else { - 0.0 - }; +use crate::session::config::Options; - json.push_str(&format!( - "{{ \"category\": \"{}\", \"time_ms\": {},\ - \"query_count\": {}, \"query_hits\": {} }},", - stringify!($name), - self.times.$name / 1_000_000, - total, - format!("{:.2}", hit_percent) - )); - )* +use rustc_data_structures::fx::FxHashMap; - //remove the trailing ',' character - json.pop(); - - json.push(']'); - - json - } - } - } -} - -define_categories! { +#[derive(Clone, Copy, Debug, PartialEq, Eq, Ord, PartialOrd)] +pub enum ProfileCategory { Parsing, Expansion, TypeChecking, @@ -137,141 +21,423 @@ define_categories! { Other, } +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ProfilerEvent { + QueryStart { query_name: &'static str, category: ProfileCategory, time: u64 }, + QueryEnd { query_name: &'static str, category: ProfileCategory, time: u64 }, + GenericActivityStart { category: ProfileCategory, label: Cow<'static, str>, time: u64 }, + GenericActivityEnd { category: ProfileCategory, label: Cow<'static, str>, time: u64 }, + IncrementalLoadResultStart { query_name: &'static str, time: u64 }, + IncrementalLoadResultEnd { query_name: &'static str, time: u64 }, + QueryCacheHit { query_name: &'static str, category: ProfileCategory, time: u64 }, + QueryCount { query_name: &'static str, category: ProfileCategory, count: usize, time: u64 }, + QueryBlockedStart { query_name: &'static str, category: ProfileCategory, time: u64 }, + QueryBlockedEnd { query_name: &'static str, category: ProfileCategory, time: u64 }, +} + +impl ProfilerEvent { + fn timestamp(&self) -> u64 { + use self::ProfilerEvent::*; + + match self { + QueryStart { time, .. } | + QueryEnd { time, .. } | + GenericActivityStart { time, .. } | + GenericActivityEnd { time, .. } | + QueryCacheHit { time, .. } | + QueryCount { time, .. } | + IncrementalLoadResultStart { time, .. } | + IncrementalLoadResultEnd { time, .. } | + QueryBlockedStart { time, .. } | + QueryBlockedEnd { time, .. } => *time + } + } +} + +fn thread_id_to_u64(tid: ThreadId) -> u64 { + unsafe { mem::transmute::(tid) } +} + pub struct SelfProfiler { - timer_stack: Vec, - data: CategoryData, - current_timer: Instant, + events: FxHashMap>, + start_time: SystemTime, + start_instant: Instant, } impl SelfProfiler { pub fn new() -> SelfProfiler { - let mut profiler = SelfProfiler { - timer_stack: Vec::new(), - data: CategoryData::new(), - current_timer: Instant::now(), + let profiler = SelfProfiler { + events: Default::default(), + start_time: SystemTime::now(), + start_instant: Instant::now(), }; - profiler.start_activity(ProfileCategory::Other); - profiler } - pub fn start_activity(&mut self, category: ProfileCategory) { - match self.timer_stack.last().cloned() { - None => { - self.current_timer = Instant::now(); - }, - Some(current_category) if current_category == category => { - //since the current category is the same as the new activity's category, - //we don't need to do anything with the timer, we just need to push it on the stack - } - Some(current_category) => { - let elapsed = self.stop_timer(); - - //record the current category's time - let new_time = self.data.times.get(current_category) + elapsed; - self.data.times.set(current_category, new_time); - } - } - - //push the new category - self.timer_stack.push(category); + #[inline] + pub fn start_activity( + &mut self, + category: ProfileCategory, + label: impl Into>, + ) { + self.record(ProfilerEvent::GenericActivityStart { + category, + label: label.into(), + time: self.get_time_from_start(), + }) } - pub fn record_query(&mut self, category: ProfileCategory) { - let (hits, total) = *self.data.query_counts.get(category); - self.data.query_counts.set(category, (hits, total + 1)); + #[inline] + pub fn end_activity( + &mut self, + category: ProfileCategory, + label: impl Into>, + ) { + self.record(ProfilerEvent::GenericActivityEnd { + category, + label: label.into(), + time: self.get_time_from_start(), + }) } - pub fn record_query_hit(&mut self, category: ProfileCategory) { - let (hits, total) = *self.data.query_counts.get(category); - self.data.query_counts.set(category, (hits + 1, total)); + #[inline] + pub fn record_computed_queries( + &mut self, + query_name: &'static str, + category: ProfileCategory, + count: usize) + { + self.record(ProfilerEvent::QueryCount { + query_name, + category, + count, + time: self.get_time_from_start(), + }) } - pub fn end_activity(&mut self, category: ProfileCategory) { - match self.timer_stack.pop() { - None => bug!("end_activity() was called but there was no running activity"), - Some(c) => - assert!( - c == category, - "end_activity() was called but a different activity was running"), - } - - //check if the new running timer is in the same category as this one - //if it is, we don't need to do anything - if let Some(c) = self.timer_stack.last() { - if *c == category { - return; - } - } - - //the new timer is different than the previous, - //so record the elapsed time and start a new timer - let elapsed = self.stop_timer(); - let new_time = self.data.times.get(category) + elapsed; - self.data.times.set(category, new_time); + #[inline] + pub fn record_query_hit(&mut self, query_name: &'static str, category: ProfileCategory) { + self.record(ProfilerEvent::QueryCacheHit { + query_name, + category, + time: self.get_time_from_start(), + }) } - fn stop_timer(&mut self) -> u64 { - let elapsed = if cfg!(windows) { - // On Windows, timers don't always appear to be monotonic (see #51648) - // which can lead to panics when calculating elapsed time. - // Work around this by testing to see if the current time is less than - // our recorded time, and if it is, just returning 0. - let now = Instant::now(); - if self.current_timer >= now { - Duration::new(0, 0) - } else { - self.current_timer.elapsed() - } - } else { - self.current_timer.elapsed() - }; - - self.current_timer = Instant::now(); - - (elapsed.as_secs() * 1_000_000_000) + (elapsed.subsec_nanos() as u64) + #[inline] + pub fn start_query(&mut self, query_name: &'static str, category: ProfileCategory) { + self.record(ProfilerEvent::QueryStart { + query_name, + category, + time: self.get_time_from_start(), + }); } - pub fn print_results(&mut self, opts: &Options) { - self.end_activity(ProfileCategory::Other); + #[inline] + pub fn end_query(&mut self, query_name: &'static str, category: ProfileCategory) { + self.record(ProfilerEvent::QueryEnd { + query_name, + category, + time: self.get_time_from_start(), + }) + } - assert!( - self.timer_stack.is_empty(), - "there were timers running when print_results() was called"); + #[inline] + pub fn incremental_load_result_start(&mut self, query_name: &'static str) { + self.record(ProfilerEvent::IncrementalLoadResultStart { + query_name, + time: self.get_time_from_start(), + }) + } - let out = io::stderr(); - let mut lock = out.lock(); + #[inline] + pub fn incremental_load_result_end(&mut self, query_name: &'static str) { + self.record(ProfilerEvent::IncrementalLoadResultEnd { + query_name, + time: self.get_time_from_start(), + }) + } - let crate_name = - opts.crate_name - .as_ref() - .map(|n| format!(" for {}", n)) - .unwrap_or_default(); + #[inline] + pub fn query_blocked_start(&mut self, query_name: &'static str, category: ProfileCategory) { + self.record(ProfilerEvent::QueryBlockedStart { + query_name, + category, + time: self.get_time_from_start(), + }) + } - writeln!(lock, "Self profiling results{}:", crate_name).unwrap(); - writeln!(lock).unwrap(); + #[inline] + pub fn query_blocked_end(&mut self, query_name: &'static str, category: ProfileCategory) { + self.record(ProfilerEvent::QueryBlockedEnd { + query_name, + category, + time: self.get_time_from_start(), + }) + } - self.data.print(&mut lock); + #[inline] + fn record(&mut self, event: ProfilerEvent) { + let thread_id = std::thread::current().id(); + let events = self.events.entry(thread_id).or_default(); - writeln!(lock).unwrap(); - writeln!(lock, "Optimization level: {:?}", opts.optimize).unwrap(); + events.push(event); + } - let incremental = if opts.incremental.is_some() { "on" } else { "off" }; - writeln!(lock, "Incremental: {}", incremental).unwrap(); + #[inline] + fn get_time_from_start(&self) -> u64 { + let duration = Instant::now() - self.start_instant; + duration.as_nanos() as u64 } - pub fn save_results(&self, opts: &Options) { - let category_data = self.data.json(); - let compilation_options = - format!("{{ \"optimization_level\": \"{:?}\", \"incremental\": {} }}", - opts.optimize, - if opts.incremental.is_some() { "true" } else { "false" }); + pub fn dump_raw_events(&self, opts: &Options) { + use self::ProfilerEvent::*; + + let pid = process::id(); + + let filename = + format!("{}.profile_events.json", opts.crate_name.clone().unwrap_or_default()); + + let mut file = BufWriter::new(fs::File::create(filename).unwrap()); + + let threads: Vec<_> = + self.events + .keys() + .into_iter() + .map(|tid| format!("{}", thread_id_to_u64(*tid))) + .collect(); + + write!(file, + "{{\ + \"processes\": {{\ + \"{}\": {{\ + \"threads\": [{}],\ + \"crate_name\": \"{}\",\ + \"opt_level\": \"{:?}\",\ + \"incremental\": {}\ + }}\ + }},\ + \"events\": [\ + ", + pid, + threads.join(","), + opts.crate_name.clone().unwrap_or_default(), + opts.optimize, + if opts.incremental.is_some() { "true" } else { "false" }, + ).unwrap(); + + let mut is_first = true; + for (thread_id, events) in &self.events { + let thread_id = thread_id_to_u64(*thread_id); + + for event in events { + if is_first { + is_first = false; + } else { + writeln!(file, ",").unwrap(); + } - let json = format!("{{ \"category_data\": {}, \"compilation_options\": {} }}", - category_data, - compilation_options); + let (secs, nanos) = { + let time = self.start_time + Duration::from_nanos(event.timestamp()); + let time_since_unix = + time.duration_since(SystemTime::UNIX_EPOCH).unwrap_or_default(); + (time_since_unix.as_secs(), time_since_unix.subsec_nanos()) + }; + + match event { + QueryStart { query_name, category, time: _ } => + write!(file, + "{{ \ + \"QueryStart\": {{ \ + \"query_name\": \"{}\",\ + \"category\": \"{:?}\",\ + \"time\": {{\ + \"secs\": {},\ + \"nanos\": {}\ + }},\ + \"thread_id\": {}\ + }}\ + }}", + query_name, + category, + secs, + nanos, + thread_id, + ).unwrap(), + QueryEnd { query_name, category, time: _ } => + write!(file, + "{{\ + \"QueryEnd\": {{\ + \"query_name\": \"{}\",\ + \"category\": \"{:?}\",\ + \"time\": {{\ + \"secs\": {},\ + \"nanos\": {}\ + }},\ + \"thread_id\": {}\ + }}\ + }}", + query_name, + category, + secs, + nanos, + thread_id, + ).unwrap(), + GenericActivityStart { category, label, time: _ } => + write!(file, + "{{ + \"GenericActivityStart\": {{\ + \"category\": \"{:?}\",\ + \"label\": \"{}\",\ + \"time\": {{\ + \"secs\": {},\ + \"nanos\": {}\ + }},\ + \"thread_id\": {}\ + }}\ + }}", + category, + label, + secs, + nanos, + thread_id, + ).unwrap(), + GenericActivityEnd { category, label, time: _ } => + write!(file, + "{{\ + \"GenericActivityEnd\": {{\ + \"category\": \"{:?}\",\ + \"label\": \"{}\",\ + \"time\": {{\ + \"secs\": {},\ + \"nanos\": {}\ + }},\ + \"thread_id\": {}\ + }}\ + }}", + category, + label, + secs, + nanos, + thread_id, + ).unwrap(), + QueryCacheHit { query_name, category, time: _ } => + write!(file, + "{{\ + \"QueryCacheHit\": {{\ + \"query_name\": \"{}\",\ + \"category\": \"{:?}\",\ + \"time\": {{\ + \"secs\": {},\ + \"nanos\": {}\ + }},\ + \"thread_id\": {}\ + }}\ + }}", + query_name, + category, + secs, + nanos, + thread_id, + ).unwrap(), + QueryCount { query_name, category, count, time: _ } => + write!(file, + "{{\ + \"QueryCount\": {{\ + \"query_name\": \"{}\",\ + \"category\": \"{:?}\",\ + \"count\": {},\ + \"time\": {{\ + \"secs\": {},\ + \"nanos\": {}\ + }},\ + \"thread_id\": {}\ + }}\ + }}", + query_name, + category, + count, + secs, + nanos, + thread_id, + ).unwrap(), + IncrementalLoadResultStart { query_name, time: _ } => + write!(file, + "{{\ + \"IncrementalLoadResultStart\": {{\ + \"query_name\": \"{}\",\ + \"time\": {{\ + \"secs\": {},\ + \"nanos\": {}\ + }},\ + \"thread_id\": {}\ + }}\ + }}", + query_name, + secs, + nanos, + thread_id, + ).unwrap(), + IncrementalLoadResultEnd { query_name, time: _ } => + write!(file, + "{{\ + \"IncrementalLoadResultEnd\": {{\ + \"query_name\": \"{}\",\ + \"time\": {{\ + \"secs\": {},\ + \"nanos\": {}\ + }},\ + \"thread_id\": {}\ + }}\ + }}", + query_name, + secs, + nanos, + thread_id, + ).unwrap(), + QueryBlockedStart { query_name, category, time: _ } => + write!(file, + "{{\ + \"QueryBlockedStart\": {{\ + \"query_name\": \"{}\",\ + \"category\": \"{:?}\",\ + \"time\": {{\ + \"secs\": {},\ + \"nanos\": {}\ + }},\ + \"thread_id\": {}\ + }}\ + }}", + query_name, + category, + secs, + nanos, + thread_id, + ).unwrap(), + QueryBlockedEnd { query_name, category, time: _ } => + write!(file, + "{{\ + \"QueryBlockedEnd\": {{\ + \"query_name\": \"{}\",\ + \"category\": \"{:?}\",\ + \"time\": {{\ + \"secs\": {},\ + \"nanos\": {}\ + }},\ + \"thread_id\": {}\ + }}\ + }}", + query_name, + category, + secs, + nanos, + thread_id, + ).unwrap(), + } + } + } - fs::write("self_profiler_results.json", json).unwrap(); + write!(file, "] }}").unwrap(); } } diff --git a/src/librustc/util/time_graph.rs b/src/librustc/util/time_graph.rs deleted file mode 100644 index 3ba4e4ddbb13b..0000000000000 --- a/src/librustc/util/time_graph.rs +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc_data_structures::fx::FxHashMap; -use std::fs::File; -use std::io::prelude::*; -use std::marker::PhantomData; -use std::mem; -use std::sync::{Arc, Mutex}; -use std::time::Instant; - -const OUTPUT_WIDTH_IN_PX: u64 = 1000; -const TIME_LINE_HEIGHT_IN_PX: u64 = 20; -const TIME_LINE_HEIGHT_STRIDE_IN_PX: usize = 30; - -#[derive(Clone)] -struct Timing { - start: Instant, - end: Instant, - work_package_kind: WorkPackageKind, - name: String, - events: Vec<(String, Instant)>, -} - -#[derive(Clone, Copy, Hash, Eq, PartialEq, Debug)] -pub struct TimelineId(pub usize); - -#[derive(Clone)] -struct PerThread { - timings: Vec, - open_work_package: Option<(Instant, WorkPackageKind, String)>, -} - -#[derive(Clone)] -pub struct TimeGraph { - data: Arc>>, -} - -#[derive(Clone, Copy)] -pub struct WorkPackageKind(pub &'static [&'static str]); - -pub struct Timeline { - token: Option, -} - -struct RaiiToken { - graph: TimeGraph, - timeline: TimelineId, - events: Vec<(String, Instant)>, - // The token must not be Send: - _marker: PhantomData<*const ()> -} - - -impl Drop for RaiiToken { - fn drop(&mut self) { - self.graph.end(self.timeline, mem::replace(&mut self.events, Vec::new())); - } -} - -impl TimeGraph { - pub fn new() -> TimeGraph { - TimeGraph { - data: Arc::new(Mutex::new(FxHashMap::default())) - } - } - - pub fn start(&self, - timeline: TimelineId, - work_package_kind: WorkPackageKind, - name: &str) -> Timeline { - { - let mut table = self.data.lock().unwrap(); - - let data = table.entry(timeline).or_insert(PerThread { - timings: Vec::new(), - open_work_package: None, - }); - - assert!(data.open_work_package.is_none()); - data.open_work_package = Some((Instant::now(), work_package_kind, name.to_string())); - } - - Timeline { - token: Some(RaiiToken { - graph: self.clone(), - timeline, - events: Vec::new(), - _marker: PhantomData, - }), - } - } - - fn end(&self, timeline: TimelineId, events: Vec<(String, Instant)>) { - let end = Instant::now(); - - let mut table = self.data.lock().unwrap(); - let data = table.get_mut(&timeline).unwrap(); - - if let Some((start, work_package_kind, name)) = data.open_work_package.take() { - data.timings.push(Timing { - start, - end, - work_package_kind, - name, - events, - }); - } else { - bug!("end timing without start?") - } - } - - pub fn dump(&self, output_filename: &str) { - let table = self.data.lock().unwrap(); - - for data in table.values() { - assert!(data.open_work_package.is_none()); - } - - let mut threads: Vec = - table.values().map(|data| data.clone()).collect(); - - threads.sort_by_key(|timeline| timeline.timings[0].start); - - let earliest_instant = threads[0].timings[0].start; - let latest_instant = threads.iter() - .map(|timeline| timeline.timings - .last() - .unwrap() - .end) - .max() - .unwrap(); - let max_distance = distance(earliest_instant, latest_instant); - - let mut file = File::create(format!("{}.html", output_filename)).unwrap(); - - writeln!(file, " - - - - - -

- ").unwrap(); - - let mut idx = 0; - for thread in threads.iter() { - for timing in &thread.timings { - let colors = timing.work_package_kind.0; - let height = TIME_LINE_HEIGHT_STRIDE_IN_PX * timing.events.len(); - writeln!(file, "
", - idx, - colors[idx % colors.len()], - height).unwrap(); - idx += 1; - let max = distance(timing.start, timing.end); - for (i, &(ref event, time)) in timing.events.iter().enumerate() { - let i = i as u64; - let time = distance(timing.start, time); - let at = normalize(time, max, OUTPUT_WIDTH_IN_PX); - writeln!(file, "{}", - at, - TIME_LINE_HEIGHT_IN_PX * i, - event).unwrap(); - } - writeln!(file, "
").unwrap(); - } - } - - writeln!(file, " - - - ").unwrap(); - } -} - -impl Timeline { - pub fn noop() -> Timeline { - Timeline { token: None } - } - - /// Record an event which happened at this moment on this timeline. - /// - /// Events are displayed in the eventual HTML output where you can click on - /// a particular timeline and it'll expand to all of the events that - /// happened on that timeline. This can then be used to drill into a - /// particular timeline and see what events are happening and taking the - /// most time. - pub fn record(&mut self, name: &str) { - if let Some(ref mut token) = self.token { - token.events.push((name.to_string(), Instant::now())); - } - } -} - -fn distance(zero: Instant, x: Instant) -> u64 { - - let duration = x.duration_since(zero); - (duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64) // / div -} - -fn normalize(distance: u64, max: u64, max_pixels: u64) -> u64 { - (max_pixels * distance) / max -} - diff --git a/src/librustc_allocator/Cargo.toml b/src/librustc_allocator/Cargo.toml index 03d33f413c807..cf6c598bfb17b 100644 --- a/src/librustc_allocator/Cargo.toml +++ b/src/librustc_allocator/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustc_allocator" version = "0.0.0" +edition = "2018" [lib] path = "lib.rs" diff --git a/src/librustc_allocator/expand.rs b/src/librustc_allocator/expand.rs index 5df68d49af5f7..758a0d63886b1 100644 --- a/src/librustc_allocator/expand.rs +++ b/src/librustc_allocator/expand.rs @@ -1,16 +1,6 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - +use log::debug; use rustc::middle::allocator::AllocatorKind; -use rustc_errors; -use smallvec::SmallVec; +use smallvec::{smallvec, SmallVec}; use syntax::{ ast::{ self, Arg, Attribute, Crate, Expr, FnHeader, Generics, Ident, Item, ItemKind, @@ -26,22 +16,22 @@ use syntax::{ expand::ExpansionConfig, hygiene::{self, Mark, SyntaxContext}, }, - fold::{self, Folder}, + mut_visit::{self, MutVisitor}, parse::ParseSess, ptr::P, symbol::Symbol }; use syntax_pos::Span; -use {AllocatorMethod, AllocatorTy, ALLOCATOR_METHODS}; +use crate::{AllocatorMethod, AllocatorTy, ALLOCATOR_METHODS}; pub fn modify( sess: &ParseSess, resolver: &mut dyn Resolver, - krate: Crate, + krate: &mut Crate, crate_name: String, handler: &rustc_errors::Handler, -) -> ast::Crate { +) { ExpandAllocatorDirectives { handler, sess, @@ -49,7 +39,7 @@ pub fn modify( found: false, crate_name: Some(crate_name), in_submod: -1, // -1 to account for the "root" module - }.fold_crate(krate) + }.visit_crate(krate); } struct ExpandAllocatorDirectives<'a> { @@ -64,14 +54,14 @@ struct ExpandAllocatorDirectives<'a> { in_submod: isize, } -impl<'a> Folder for ExpandAllocatorDirectives<'a> { - fn fold_item(&mut self, item: P) -> SmallVec<[P; 1]> { +impl MutVisitor for ExpandAllocatorDirectives<'_> { + fn flat_map_item(&mut self, item: P) -> SmallVec<[P; 1]> { debug!("in submodule {}", self.in_submod); let name = if attr::contains_name(&item.attrs, "global_allocator") { "global_allocator" } else { - return fold::noop_fold_item(item, self); + return mut_visit::noop_flat_map_item(item, self); }; match item.node { ItemKind::Static(..) => {} @@ -101,7 +91,9 @@ impl<'a> Folder for ExpandAllocatorDirectives<'a> { call_site: item.span, // use the call site of the static def_site: None, format: MacroAttribute(Symbol::intern(name)), - allow_internal_unstable: true, + allow_internal_unstable: Some(vec![ + Symbol::intern("rustc_attrs"), + ].into()), allow_internal_unsafe: false, local_inner_macros: false, edition: hygiene::default_edition(), @@ -149,25 +141,24 @@ impl<'a> Folder for ExpandAllocatorDirectives<'a> { let name = f.kind.fn_name("allocator_abi"); let allocator_abi = Ident::with_empty_ctxt(Symbol::gensym(&name)); let module = f.cx.item_mod(span, span, allocator_abi, Vec::new(), items); - let module = f.cx.monotonic_expander().fold_item(module).pop().unwrap(); + let module = f.cx.monotonic_expander().flat_map_item(module).pop().unwrap(); // Return the item and new submodule smallvec![item, module] } // If we enter a submodule, take note. - fn fold_mod(&mut self, m: Mod) -> Mod { + fn visit_mod(&mut self, m: &mut Mod) { debug!("enter submodule"); self.in_submod += 1; - let ret = fold::noop_fold_mod(m, self); + mut_visit::noop_visit_mod(m, self); self.in_submod -= 1; debug!("exit submodule"); - ret } - // `fold_mac` is disabled by default. Enable it here. - fn fold_mac(&mut self, mac: Mac) -> Mac { - fold::noop_fold_mac(mac, self) + // `visit_mac` is disabled by default. Enable it here. + fn visit_mac(&mut self, mac: &mut Mac) { + mut_visit::noop_visit_mac(mac, self) } } @@ -179,7 +170,7 @@ struct AllocFnFactory<'a> { cx: ExtCtxt<'a>, } -impl<'a> AllocFnFactory<'a> { +impl AllocFnFactory<'_> { fn allocator_fn(&self, method: &AllocatorMethod) -> P { let mut abi_args = Vec::new(); let mut i = 0; diff --git a/src/librustc_allocator/lib.rs b/src/librustc_allocator/lib.rs index a2017a4ed0ff9..a9e422fb238b8 100644 --- a/src/librustc_allocator/lib.rs +++ b/src/librustc_allocator/lib.rs @@ -1,25 +1,8 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![feature(nll)] #![feature(rustc_private)] -#[macro_use] extern crate log; -extern crate rustc; -extern crate rustc_data_structures; -extern crate rustc_errors; -extern crate rustc_target; -extern crate syntax; -extern crate syntax_pos; -#[macro_use] -extern crate smallvec; +#![deny(rust_2018_idioms)] +#![cfg_attr(not(stage0), deny(internal))] pub mod expand; diff --git a/src/librustc_apfloat/Cargo.toml b/src/librustc_apfloat/Cargo.toml index 248f2d71f41e5..c7496a9547ea6 100644 --- a/src/librustc_apfloat/Cargo.toml +++ b/src/librustc_apfloat/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustc_apfloat" version = "0.0.0" +edition = "2018" [lib] name = "rustc_apfloat" diff --git a/src/librustc_apfloat/ieee.rs b/src/librustc_apfloat/ieee.rs index 60ddac1abfd46..9f68d770b9e87 100644 --- a/src/librustc_apfloat/ieee.rs +++ b/src/librustc_apfloat/ieee.rs @@ -1,15 +1,5 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use {Category, ExpInt, IEK_INF, IEK_NAN, IEK_ZERO}; -use {Float, FloatConvert, ParseError, Round, Status, StatusAnd}; +use crate::{Category, ExpInt, IEK_INF, IEK_NAN, IEK_ZERO}; +use crate::{Float, FloatConvert, ParseError, Round, Status, StatusAnd}; use smallvec::{SmallVec, smallvec}; use std::cmp::{self, Ordering}; @@ -196,7 +186,7 @@ impl Semantics for X87DoubleExtendedS { /// exponent = all 1's, integer bit 0, significand 0 ("pseudoinfinity") /// exponent = all 1's, integer bit 0, significand nonzero ("pseudoNaN") /// exponent = 0, integer bit 1 ("pseudodenormal") - /// exponent!=0 nor all 1's, integer bit 0 ("unnormal") + /// exponent != 0 nor all 1's, integer bit 0 ("unnormal") /// At the moment, the first two are treated as NaNs, the second two as Normal. fn from_bits(bits: u128) -> IeeeFloat { let sign = bits & (1 << (Self::BITS - 1)); @@ -335,7 +325,7 @@ impl Neg for IeeeFloat { /// 1.01E-2 4 2 0.0101 /// 1.01E-2 4 1 1.01E-2 impl fmt::Display for IeeeFloat { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let width = f.width().unwrap_or(3); let alternate = f.alternate(); @@ -624,7 +614,7 @@ impl fmt::Display for IeeeFloat { } impl fmt::Debug for IeeeFloat { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}({:?} | {}{:?} * 2^{})", self, self.category, if self.sign { "-" } else { "+" }, @@ -1559,11 +1549,11 @@ impl IeeeFloat { } } - /// Returns TRUE if, when truncating the current number, with BIT the + /// Returns `true` if, when truncating the current number, with `bit` the /// new LSB, with the given lost fraction and rounding mode, the result /// would need to be rounded away from zero (i.e., by increasing the - /// signficand). This routine must work for Category::Zero of both signs, and - /// Category::Normal numbers. + /// signficand). This routine must work for `Category::Zero` of both signs, and + /// `Category::Normal` numbers. fn round_away_from_zero(&self, round: Round, loss: Loss, bit: usize) -> bool { // NaNs and infinities should not have lost fractions. assert!(self.is_finite_non_zero() || self.is_zero()); @@ -2267,7 +2257,7 @@ impl Loss { more_significant } - /// Return the fraction lost were a bignum truncated losing the least + /// Returns the fraction lost were a bignum truncated losing the least /// significant `bits` bits. fn through_truncation(limbs: &[Limb], bits: usize) -> Loss { if bits == 0 { @@ -2330,12 +2320,12 @@ mod sig { Ordering::Equal } - /// Extract the given bit. + /// Extracts the given bit. pub(super) fn get_bit(limbs: &[Limb], bit: usize) -> bool { limbs[bit / LIMB_BITS] & (1 << (bit % LIMB_BITS)) != 0 } - /// Set the given bit. + /// Sets the given bit. pub(super) fn set_bit(limbs: &mut [Limb], bit: usize) { limbs[bit / LIMB_BITS] |= 1 << (bit % LIMB_BITS); } @@ -2345,13 +2335,13 @@ mod sig { limbs[bit / LIMB_BITS] &= !(1 << (bit % LIMB_BITS)); } - /// Shift `dst` left `bits` bits, subtract `bits` from its exponent. + /// Shifts `dst` left `bits` bits, subtract `bits` from its exponent. pub(super) fn shift_left(dst: &mut [Limb], exp: &mut ExpInt, bits: usize) { if bits > 0 { // Our exponent should not underflow. *exp = exp.checked_sub(bits as ExpInt).unwrap(); - // Jump is the inter-limb jump; shift is is intra-limb shift. + // Jump is the inter-limb jump; shift is the intra-limb shift. let jump = bits / LIMB_BITS; let shift = bits % LIMB_BITS; @@ -2377,7 +2367,7 @@ mod sig { } } - /// Shift `dst` right `bits` bits noting lost fraction. + /// Shifts `dst` right `bits` bits noting lost fraction. pub(super) fn shift_right(dst: &mut [Limb], exp: &mut ExpInt, bits: usize) -> Loss { let loss = Loss::through_truncation(dst, bits); @@ -2385,7 +2375,7 @@ mod sig { // Our exponent should not overflow. *exp = exp.checked_add(bits as ExpInt).unwrap(); - // Jump is the inter-limb jump; shift is is intra-limb shift. + // Jump is the inter-limb jump; shift is the intra-limb shift. let jump = bits / LIMB_BITS; let shift = bits % LIMB_BITS; @@ -2413,7 +2403,7 @@ mod sig { loss } - /// Copy the bit vector of width `src_bits` from `src`, starting at bit SRC_LSB, + /// Copies the bit vector of width `src_bits` from `src`, starting at bit SRC_LSB, /// to `dst`, such that the bit SRC_LSB becomes the least significant bit of `dst`. /// All high bits above `src_bits` in `dst` are zero-filled. pub(super) fn extract(dst: &mut [Limb], src: &[Limb], src_bits: usize, src_lsb: usize) { diff --git a/src/librustc_apfloat/lib.rs b/src/librustc_apfloat/lib.rs index c90191716010b..1b0bcdd0b5b48 100644 --- a/src/librustc_apfloat/lib.rs +++ b/src/librustc_apfloat/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Port of LLVM's APFloat software floating-point implementation from the //! following C++ sources (please update commit hash when backporting): //! @@ -40,28 +30,22 @@ //! //! This API is completely unstable and subject to change. -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![forbid(unsafe_code)] +#![deny(rust_2018_idioms)] #![feature(nll)] -#![feature(try_from)] // See librustc_cratesio_shim/Cargo.toml for a comment explaining this. #[allow(unused_extern_crates)] extern crate rustc_cratesio_shim; -#[macro_use] -extern crate bitflags; -extern crate smallvec; - use std::cmp::Ordering; use std::fmt; use std::ops::{Neg, Add, Sub, Mul, Div, Rem}; use std::ops::{AddAssign, SubAssign, MulAssign, DivAssign, RemAssign}; use std::str::FromStr; -bitflags! { +bitflags::bitflags! { /// IEEE-754R 7: Default exception handling. /// /// UNDERFLOW or OVERFLOW are always returned or-ed with INEXACT. @@ -390,7 +374,7 @@ pub trait Float fn from_str_r(s: &str, round: Round) -> Result, ParseError>; fn to_bits(self) -> u128; - /// Convert a floating point number to an integer according to the + /// Converts a floating point number to an integer according to the /// rounding mode. In case of an invalid operation exception, /// deterministic values are returned, namely zero for NaNs and the /// minimal or maximal value respectively for underflow or overflow. @@ -403,7 +387,7 @@ pub trait Float /// /// The *is_exact output tells whether the result is exact, in the sense /// that converting it back to the original floating point type produces - /// the original value. This is almost equivalent to result==Status::OK, + /// the original value. This is almost equivalent to `result == Status::OK`, /// except for negative zeroes. fn to_i128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd { let status; @@ -473,13 +457,13 @@ pub trait Float } } - /// IEEE-754R isSignMinus: Returns true if and only if the current value is + /// IEEE-754R isSignMinus: Returns whether the current value is /// negative. /// /// This applies to zeros and NaNs as well. fn is_negative(self) -> bool; - /// IEEE-754R isNormal: Returns true if and only if the current value is normal. + /// IEEE-754R isNormal: Returns whether the current value is normal. /// /// This implies that the current value of the float is not zero, subnormal, /// infinite, or NaN following the definition of normality from IEEE-754R. @@ -487,7 +471,7 @@ pub trait Float !self.is_denormal() && self.is_finite_non_zero() } - /// Returns true if and only if the current value is zero, subnormal, or + /// Returns `true` if the current value is zero, subnormal, or /// normal. /// /// This means that the value is not infinite or NaN. @@ -495,26 +479,26 @@ pub trait Float !self.is_nan() && !self.is_infinite() } - /// Returns true if and only if the float is plus or minus zero. + /// Returns `true` if the float is plus or minus zero. fn is_zero(self) -> bool { self.category() == Category::Zero } - /// IEEE-754R isSubnormal(): Returns true if and only if the float is a + /// IEEE-754R isSubnormal(): Returns whether the float is a /// denormal. fn is_denormal(self) -> bool; - /// IEEE-754R isInfinite(): Returns true if and only if the float is infinity. + /// IEEE-754R isInfinite(): Returns whether the float is infinity. fn is_infinite(self) -> bool { self.category() == Category::Infinity } - /// Returns true if and only if the float is a quiet or signaling NaN. + /// Returns `true` if the float is a quiet or signaling NaN. fn is_nan(self) -> bool { self.category() == Category::NaN } - /// Returns true if and only if the float is a signaling NaN. + /// Returns `true` if the float is a signaling NaN. fn is_signaling(self) -> bool; // Simple Queries @@ -533,19 +517,19 @@ pub trait Float self.is_zero() && self.is_negative() } - /// Returns true if and only if the number has the smallest possible non-zero + /// Returns `true` if the number has the smallest possible non-zero /// magnitude in the current semantics. fn is_smallest(self) -> bool { Self::SMALLEST.copy_sign(self).bitwise_eq(self) } - /// Returns true if and only if the number has the largest possible finite + /// Returns `true` if the number has the largest possible finite /// magnitude in the current semantics. fn is_largest(self) -> bool { Self::largest().copy_sign(self).bitwise_eq(self) } - /// Returns true if and only if the number is an exact integer. + /// Returns `true` if the number is an exact integer. fn is_integer(self) -> bool { // This could be made more efficient; I'm going for obviously correct. if !self.is_finite() { @@ -587,11 +571,11 @@ pub trait Float } pub trait FloatConvert: Float { - /// Convert a value of one floating point type to another. + /// Converts a value of one floating point type to another. /// The return value corresponds to the IEEE754 exceptions. *loses_info /// records whether the transformation lost information, i.e., whether /// converting the result back to the original type will produce the - /// original value (this is almost the same as return value==Status::OK, + /// original value (this is almost the same as return `value == Status::OK`, /// but there are edge cases where this is not so). fn convert_r(self, round: Round, loses_info: &mut bool) -> StatusAnd; fn convert(self, loses_info: &mut bool) -> StatusAnd { diff --git a/src/librustc_apfloat/ppc.rs b/src/librustc_apfloat/ppc.rs index aaf6b29a99e8c..ddccfd6ca623b 100644 --- a/src/librustc_apfloat/ppc.rs +++ b/src/librustc_apfloat/ppc.rs @@ -1,15 +1,5 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use {Category, ExpInt, Float, FloatConvert, Round, ParseError, Status, StatusAnd}; -use ieee; +use crate::{Category, ExpInt, Float, FloatConvert, Round, ParseError, Status, StatusAnd}; +use crate::ieee; use std::cmp::Ordering; use std::fmt; @@ -134,7 +124,7 @@ impl Neg for DoubleFloat { } impl>> fmt::Display for DoubleFloat { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(&Fallback::from(*self), f) } } diff --git a/src/librustc_apfloat/tests/ieee.rs b/src/librustc_apfloat/tests/ieee.rs index 6e06ea858efad..108b2114439d4 100644 --- a/src/librustc_apfloat/tests/ieee.rs +++ b/src/librustc_apfloat/tests/ieee.rs @@ -1,19 +1,7 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#[macro_use] -extern crate rustc_apfloat; - use rustc_apfloat::{Category, ExpInt, IEK_INF, IEK_NAN, IEK_ZERO}; use rustc_apfloat::{Float, FloatConvert, ParseError, Round, Status}; use rustc_apfloat::ieee::{Half, Single, Double, Quad, X87DoubleExtended}; +use rustc_apfloat::unpack; trait SingleExt { fn from_f32(input: f32) -> Self; diff --git a/src/librustc_apfloat/tests/ppc.rs b/src/librustc_apfloat/tests/ppc.rs index 145c3ddc869df..02cdeb90a12be 100644 --- a/src/librustc_apfloat/tests/ppc.rs +++ b/src/librustc_apfloat/tests/ppc.rs @@ -1,15 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -extern crate rustc_apfloat; - use rustc_apfloat::{Category, Float, Round}; use rustc_apfloat::ppc::DoubleDouble; diff --git a/src/librustc_asan/Cargo.toml b/src/librustc_asan/Cargo.toml index 836caf22abfa5..df117de8720e0 100644 --- a/src/librustc_asan/Cargo.toml +++ b/src/librustc_asan/Cargo.toml @@ -3,6 +3,7 @@ authors = ["The Rust Project Developers"] build = "build.rs" name = "rustc_asan" version = "0.0.0" +edition = "2018" [lib] name = "rustc_asan" @@ -11,7 +12,7 @@ test = false [build-dependencies] build_helper = { path = "../build_helper" } -cmake = "0.1.18" +cmake = "0.1.38" [dependencies] alloc = { path = "../liballoc" } diff --git a/src/librustc_asan/build.rs b/src/librustc_asan/build.rs index a5905df57c8cf..a2b4b090efb4f 100644 --- a/src/librustc_asan/build.rs +++ b/src/librustc_asan/build.rs @@ -1,16 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -extern crate build_helper; -extern crate cmake; - use std::env; use build_helper::sanitizer_lib_boilerplate; @@ -18,6 +5,8 @@ use cmake::Config; fn main() { if let Some(llvm_config) = env::var_os("LLVM_CONFIG") { + build_helper::restore_library_path(); + let (native, target) = match sanitizer_lib_boilerplate("asan") { Ok(native) => native, _ => return, diff --git a/src/librustc_asan/lib.rs b/src/librustc_asan/lib.rs index 47f917e40c1ff..3bdb86d313dcb 100644 --- a/src/librustc_asan/lib.rs +++ b/src/librustc_asan/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![sanitizer_runtime] #![feature(nll)] #![feature(sanitizer_runtime)] @@ -16,3 +6,5 @@ #![unstable(feature = "sanitizer_runtime_lib", reason = "internal implementation detail of sanitizers", issue = "0")] + +#![deny(rust_2018_idioms)] diff --git a/src/librustc_borrowck/Cargo.toml b/src/librustc_borrowck/Cargo.toml index 3368bbf3855a5..f293739dec727 100644 --- a/src/librustc_borrowck/Cargo.toml +++ b/src/librustc_borrowck/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustc_borrowck" version = "0.0.0" +edition = "2018" [lib] name = "rustc_borrowck" @@ -13,8 +14,10 @@ test = false log = "0.4" syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } -graphviz = { path = "../libgraphviz" } +# for "clarity", rename the graphviz crate to dot; graphviz within `borrowck` +# refers to the borrowck-specific graphviz adapter traits. +dot = { path = "../libgraphviz", package = "graphviz" } rustc = { path = "../librustc" } rustc_mir = { path = "../librustc_mir" } -rustc_errors = { path = "../librustc_errors" } -rustc_data_structures = { path = "../librustc_data_structures" } \ No newline at end of file +errors = { path = "../librustc_errors", package = "rustc_errors" } +rustc_data_structures = { path = "../librustc_data_structures" } diff --git a/src/librustc_borrowck/borrowck/README.md b/src/librustc_borrowck/borrowck/README.md index a05c56e3629a3..6c47e8784e025 100644 --- a/src/librustc_borrowck/borrowck/README.md +++ b/src/librustc_borrowck/borrowck/README.md @@ -3,7 +3,7 @@ > WARNING: This README is more or less obsolete, and will be removed > soon! The new system is described in the [rustc guide]. -[rustc guide]: https://rust-lang.github.io/rustc-guide/mir/borrowck.html +[rustc guide]: https://rust-lang.github.io/rustc-guide/borrow_check.html This pass has the job of enforcing memory safety. This is a subtle topic. This docs aim to explain both the practice and the theory diff --git a/src/librustc_borrowck/borrowck/check_loans.rs b/src/librustc_borrowck/borrowck/check_loans.rs index 51afb43d973b7..d2d5c4fe85c90 100644 --- a/src/librustc_borrowck/borrowck/check_loans.rs +++ b/src/librustc_borrowck/borrowck/check_loans.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // ---------------------------------------------------------------------- // Checking loans // @@ -17,21 +7,21 @@ // 3. assignments do not affect things loaned out as immutable // 4. moves do not affect things loaned out in any way -use self::UseError::*; +use UseError::*; -use borrowck::*; -use borrowck::InteriorKind::{InteriorElement, InteriorField}; +use crate::borrowck::*; +use crate::borrowck::InteriorKind::{InteriorElement, InteriorField}; use rustc::middle::expr_use_visitor as euv; use rustc::middle::expr_use_visitor::MutateMode; use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; use rustc::middle::region; use rustc::ty::{self, TyCtxt, RegionKind}; -use syntax::ast; use syntax_pos::Span; use rustc::hir; use rustc::hir::Node; use rustc_mir::util::borrowck_errors::{BorrowckErrors, Origin}; +use log::debug; use std::rc::Rc; @@ -98,20 +88,19 @@ struct CheckLoanCtxt<'a, 'tcx: 'a> { impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { fn consume(&mut self, - consume_id: ast::NodeId, + consume_id: hir::HirId, consume_span: Span, cmt: &mc::cmt_<'tcx>, mode: euv::ConsumeMode) { debug!("consume(consume_id={}, cmt={:?}, mode={:?})", consume_id, cmt, mode); - let hir_id = self.tcx().hir().node_to_hir_id(consume_id); - self.consume_common(hir_id.local_id, consume_span, cmt, mode); + self.consume_common(consume_id.local_id, consume_span, cmt, mode); } fn matched_pat(&mut self, _matched_pat: &hir::Pat, - _cmt: &mc::cmt_, + _cmt: &mc::cmt_<'_>, _mode: euv::MatchMode) { } fn consume_pat(&mut self, @@ -127,7 +116,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { } fn borrow(&mut self, - borrow_id: ast::NodeId, + borrow_id: hir::HirId, borrow_span: Span, cmt: &mc::cmt_<'tcx>, loan_region: ty::Region<'tcx>, @@ -139,22 +128,21 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { borrow_id, cmt, loan_region, bk, loan_cause); - let hir_id = self.tcx().hir().node_to_hir_id(borrow_id); if let Some(lp) = opt_loan_path(cmt) { let moved_value_use_kind = match loan_cause { euv::ClosureCapture(_) => MovedInCapture, _ => MovedInUse, }; - self.check_if_path_is_moved(hir_id.local_id, borrow_span, moved_value_use_kind, &lp); + self.check_if_path_is_moved(borrow_id.local_id, borrow_span, moved_value_use_kind, &lp); } - self.check_for_conflicting_loans(hir_id.local_id); + self.check_for_conflicting_loans(borrow_id.local_id); self.check_for_loans_across_yields(cmt, loan_region, borrow_span); } fn mutate(&mut self, - assignment_id: ast::NodeId, + assignment_id: hir::HirId, assignment_span: Span, assignee_cmt: &mc::cmt_<'tcx>, mode: euv::MutateMode) @@ -185,11 +173,10 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for CheckLoanCtxt<'a, 'tcx> { } } } - self.check_assignment(self.tcx().hir().node_to_hir_id(assignment_id).local_id, - assignment_span, assignee_cmt); + self.check_assignment(assignment_id.local_id, assignment_span, assignee_cmt); } - fn decl_without_init(&mut self, _id: ast::NodeId, _span: Span) { } + fn decl_without_init(&mut self, _id: hir::HirId, _span: Span) { } } pub fn check_loans<'a, 'b, 'c, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, @@ -197,12 +184,12 @@ pub fn check_loans<'a, 'b, 'c, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, move_data: &move_data::FlowedMoveData<'c, 'tcx>, all_loans: &[Loan<'tcx>], body: &hir::Body) { - debug!("check_loans(body id={})", body.value.id); + debug!("check_loans(body id={})", body.value.hir_id); let def_id = bccx.tcx.hir().body_owner_def_id(body.id()); - let node_id = bccx.tcx.hir().as_local_node_id(def_id).unwrap(); - let movable_generator = !match bccx.tcx.hir().get(node_id) { + let hir_id = bccx.tcx.hir().as_local_hir_id(def_id).unwrap(); + let movable_generator = !match bccx.tcx.hir().get_by_hir_id(hir_id) { Node::Expr(&hir::Expr { node: hir::ExprKind::Closure(.., Some(hir::GeneratorMovability::Static)), .. @@ -248,7 +235,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { { //! Iterates over each loan that has been issued //! on entrance to `node`, regardless of whether it is - //! actually *in scope* at that point. Sometimes loans + //! actually *in scope* at that point. Sometimes loans //! are issued for future scopes and thus they may have been //! *issued* but not yet be in effect. @@ -567,12 +554,8 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { if new_loan.loan_path.has_fork(&old_loan.loan_path) && common.is_some() { let nl = self.bccx.loan_path_to_string(&common.unwrap()); let ol = nl.clone(); - let new_loan_msg = format!(" (via `{}`)", - self.bccx.loan_path_to_string( - &new_loan.loan_path)); - let old_loan_msg = format!(" (via `{}`)", - self.bccx.loan_path_to_string( - &old_loan.loan_path)); + let new_loan_msg = self.bccx.loan_path_to_string(&new_loan.loan_path); + let old_loan_msg = self.bccx.loan_path_to_string(&old_loan.loan_path); (nl, ol, new_loan_msg, old_loan_msg) } else { (self.bccx.loan_path_to_string(&new_loan.loan_path), @@ -903,11 +886,10 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { // Check for reassignments to (immutable) local variables. This // needs to be done here instead of in check_loans because we // depend on move data. - if let Categorization::Local(local_id) = assignee_cmt.cat { + if let Categorization::Local(hir_id) = assignee_cmt.cat { let lp = opt_loan_path(assignee_cmt).unwrap(); self.move_data.each_assignment_of(assignment_id, &lp, |assign| { if assignee_cmt.mutbl.is_mutable() { - let hir_id = self.bccx.tcx.hir().node_to_hir_id(local_id); self.bccx.used_mut_nodes.borrow_mut().insert(hir_id); } else { self.bccx.report_reassigned_immutable_variable( @@ -924,7 +906,7 @@ impl<'a, 'tcx> CheckLoanCtxt<'a, 'tcx> { pub fn report_illegal_mutation(&self, span: Span, loan_path: &LoanPath<'tcx>, - loan: &Loan) { + loan: &Loan<'_>) { self.bccx.cannot_assign_to_borrowed( span, loan.span, &self.bccx.loan_path_to_string(loan_path), Origin::Ast) .emit(); diff --git a/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs b/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs index 9fa541cdd07b4..a15d3d10adf0e 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/gather_moves.rs @@ -1,19 +1,9 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Computes moves. -use borrowck::*; -use borrowck::gather_loans::move_error::MovePlace; -use borrowck::gather_loans::move_error::{MoveError, MoveErrorCollector}; -use borrowck::move_data::*; +use crate::borrowck::*; +use crate::borrowck::gather_loans::move_error::MovePlace; +use crate::borrowck::gather_loans::move_error::{MoveError, MoveErrorCollector}; +use crate::borrowck::move_data::*; use rustc::middle::expr_use_visitor as euv; use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; @@ -21,10 +11,10 @@ use rustc::middle::mem_categorization::InteriorOffsetKind as Kind; use rustc::ty::{self, Ty}; use std::rc::Rc; -use syntax::ast; use syntax_pos::Span; use rustc::hir::*; use rustc::hir::Node; +use log::debug; struct GatherMoveInfo<'c, 'tcx: 'c> { id: hir::ItemLocalId, @@ -57,9 +47,9 @@ pub enum PatternSource<'tcx> { /// with a reference to the let fn get_pattern_source<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, pat: &Pat) -> PatternSource<'tcx> { - let parent = tcx.hir().get_parent_node(pat.id); + let parent = tcx.hir().get_parent_node_by_hir_id(pat.hir_id); - match tcx.hir().get(parent) { + match tcx.hir().get_by_hir_id(parent) { Node::Expr(ref e) => { // the enclosing expression must be a `match` or something else assert!(match e.node { @@ -76,11 +66,10 @@ fn get_pattern_source<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, pat: &Pat) -> Patte pub fn gather_decl<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, move_data: &MoveData<'tcx>, - var_id: ast::NodeId, + var_id: hir::HirId, var_ty: Ty<'tcx>) { let loan_path = Rc::new(LoanPath::new(LpVar(var_id), var_ty)); - let hir_id = bccx.tcx.hir().node_to_hir_id(var_id); - move_data.add_move(bccx.tcx, loan_path, hir_id.local_id, Declared); + move_data.add_move(bccx.tcx, loan_path, var_id.local_id, Declared); } pub fn gather_move_from_expr<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, diff --git a/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs b/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs index ccc091a6a1ce6..9680dd4ce2faf 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/lifetime.rs @@ -1,25 +1,16 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This module implements the check that the lifetime of a borrow //! does not exceed the lifetime of the value being borrowed. -use borrowck::*; +use crate::borrowck::*; +use rustc::hir::HirId; use rustc::middle::expr_use_visitor as euv; use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; use rustc::middle::region; use rustc::ty; -use syntax::ast; use syntax_pos::Span; +use log::debug; type R = Result<(),()>; @@ -60,9 +51,9 @@ struct GuaranteeLifetimeContext<'a, 'tcx: 'a> { } impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { - fn check(&self, cmt: &mc::cmt_<'tcx>, discr_scope: Option) -> R { + fn check(&self, cmt: &mc::cmt_<'tcx>, discr_scope: Option) -> R { //! Main routine. Walks down `cmt` until we find the - //! "guarantor". Reports an error if `self.loan_region` is + //! "guarantor". Reports an error if `self.loan_region` is //! larger than scope of `cmt`. debug!("guarantee_lifetime.check(cmt={:?}, loan_region={:?})", cmt, @@ -113,8 +104,7 @@ impl<'a, 'tcx> GuaranteeLifetimeContext<'a, 'tcx> { Categorization::Upvar(..) => { self.bccx.tcx.mk_region(ty::ReScope(self.item_scope)) } - Categorization::Local(local_id) => { - let hir_id = self.bccx.tcx.hir().node_to_hir_id(local_id); + Categorization::Local(hir_id) => { self.bccx.tcx.mk_region(ty::ReScope( self.bccx.region_scope_tree.var_scope(hir_id.local_id))) } diff --git a/src/librustc_borrowck/borrowck/gather_loans/mod.rs b/src/librustc_borrowck/borrowck/gather_loans/mod.rs index 7ed4d4910d71d..3892a18b1400e 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/mod.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // ---------------------------------------------------------------------- // Gathering loans // @@ -16,19 +6,19 @@ // their associated scopes. In phase two, checking loans, we will then make // sure that all of these loans are honored. -use borrowck::*; -use borrowck::move_data::MoveData; +use crate::borrowck::*; +use crate::borrowck::move_data::MoveData; use rustc::middle::expr_use_visitor as euv; use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; use rustc::middle::region; use rustc::ty::{self, TyCtxt}; -use syntax::ast; use syntax_pos::Span; use rustc::hir; +use log::debug; -use self::restrictions::RestrictionResult; +use restrictions::RestrictionResult; mod lifetime; mod restrictions; @@ -77,7 +67,7 @@ struct GatherLoanCtxt<'a, 'tcx: 'a> { impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { fn consume(&mut self, - consume_id: ast::NodeId, + consume_id: hir::HirId, _consume_span: Span, cmt: &mc::cmt_<'tcx>, mode: euv::ConsumeMode) { @@ -88,7 +78,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { euv::Move(move_reason) => { gather_moves::gather_move_from_expr( self.bccx, &self.move_data, &mut self.move_error_collector, - self.bccx.tcx.hir().node_to_hir_id(consume_id).local_id, cmt, move_reason); + consume_id.local_id, cmt, move_reason); } euv::Copy => { } } @@ -124,7 +114,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { } fn borrow(&mut self, - borrow_id: ast::NodeId, + borrow_id: hir::HirId, borrow_span: Span, cmt: &mc::cmt_<'tcx>, loan_region: ty::Region<'tcx>, @@ -135,8 +125,8 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { bk={:?}, loan_cause={:?})", borrow_id, cmt, loan_region, bk, loan_cause); - let hir_id = self.bccx.tcx.hir().node_to_hir_id(borrow_id); - self.guarantee_valid(hir_id.local_id, + + self.guarantee_valid(borrow_id.local_id, borrow_span, cmt, bk, @@ -145,7 +135,7 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { } fn mutate(&mut self, - assignment_id: ast::NodeId, + assignment_id: hir::HirId, assignment_span: Span, assignee_cmt: &mc::cmt_<'tcx>, _: euv::MutateMode) @@ -155,12 +145,30 @@ impl<'a, 'tcx> euv::Delegate<'tcx> for GatherLoanCtxt<'a, 'tcx> { assignee_cmt); } - fn decl_without_init(&mut self, id: ast::NodeId, _span: Span) { + fn decl_without_init(&mut self, id: hir::HirId, _span: Span) { let ty = self.bccx .tables - .node_id_to_type(self.bccx.tcx.hir().node_to_hir_id(id)); + .node_type(id); gather_moves::gather_decl(self.bccx, &self.move_data, id, ty); } + + fn nested_body(&mut self, body_id: hir::BodyId) { + debug!("nested_body(body_id={:?})", body_id); + // rust-lang/rust#58776: MIR and AST borrow check disagree on where + // certain closure errors are reported. As such migrate borrowck has to + // operate at the level of items, rather than bodies. Check if the + // contained closure had any errors and set `signalled_any_error` if it + // has. + let bccx = self.bccx; + if bccx.tcx.migrate_borrowck() { + if let SignalledError::NoErrorsSeen = bccx.signalled_any_error.get() { + let closure_def_id = bccx.tcx.hir().body_owner_def_id(body_id); + debug!("checking closure: {:?}", closure_def_id); + + bccx.signalled_any_error.set(bccx.tcx.borrowck(closure_def_id).signalled_any_error); + } + } + } } /// Implements the A-* rules in README.md. @@ -246,7 +254,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { /// Guarantees that `cmt` is assignable, or reports an error. fn guarantee_assignment_valid(&mut self, - assignment_id: ast::NodeId, + assignment_id: hir::HirId, assignment_span: Span, cmt: &mc::cmt_<'tcx>) { @@ -280,8 +288,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { self.mark_loan_path_as_mutated(&lp); } gather_moves::gather_assignment(self.bccx, &self.move_data, - self.bccx.tcx.hir().node_to_hir_id(assignment_id) - .local_id, + assignment_id.local_id, assignment_span, lp); } @@ -294,7 +301,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { } /// Guarantees that `addr_of(cmt)` will be valid for the duration of `static_scope_r`, or - /// reports an error. This may entail taking out loans, which will be added to the + /// reports an error. This may entail taking out loans, which will be added to the /// `req_loan_map`. fn guarantee_valid(&mut self, borrow_id: hir::ItemLocalId, @@ -437,7 +444,7 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { // } } - pub fn mark_loan_path_as_mutated(&self, loan_path: &LoanPath) { + pub fn mark_loan_path_as_mutated(&self, loan_path: &LoanPath<'_>) { //! For mutable loans of content whose mutability derives //! from a local variable, mark the mutability decl as necessary. @@ -446,9 +453,8 @@ impl<'a, 'tcx> GatherLoanCtxt<'a, 'tcx> { while let Some(current_path) = wrapped_path { wrapped_path = match current_path.kind { - LpVar(local_id) => { + LpVar(hir_id) => { if !through_borrow { - let hir_id = self.bccx.tcx.hir().node_to_hir_id(local_id); self.bccx.used_mut_nodes.borrow_mut().insert(hir_id); } None diff --git a/src/librustc_borrowck/borrowck/gather_loans/move_error.rs b/src/librustc_borrowck/borrowck/gather_loans/move_error.rs index 08c5b247b2f65..9a00c43be3fbb 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/move_error.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/move_error.rs @@ -1,14 +1,4 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrowck::BorrowckCtxt; +use crate::borrowck::BorrowckCtxt; use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; use rustc::middle::mem_categorization::NoteClosureEnv; @@ -18,7 +8,8 @@ use rustc_mir::util::borrowck_errors::{BorrowckErrors, Origin}; use syntax::ast; use syntax_pos; use errors::{DiagnosticBuilder, Applicability}; -use borrowck::gather_loans::gather_moves::PatternSource; +use crate::borrowck::gather_loans::gather_moves::PatternSource; +use log::debug; pub struct MoveErrorCollector<'tcx> { errors: Vec> @@ -80,7 +71,7 @@ fn report_move_errors<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, errors: &[MoveErr let initializer = e.init.as_ref().expect("should have an initializer to get an error"); if let Ok(snippet) = bccx.tcx.sess.source_map().span_to_snippet(initializer.span) { - err.span_suggestion_with_applicability( + err.span_suggestion( initializer.span, "consider using a reference instead", format!("&{}", snippet), @@ -97,8 +88,7 @@ fn report_move_errors<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, errors: &[MoveErr } } if let NoteClosureEnv(upvar_id) = error.move_from.note { - let var_node_id = bccx.tcx.hir().hir_to_node_id(upvar_id.var_path.hir_id); - err.span_label(bccx.tcx.hir().span(var_node_id), + err.span_label(bccx.tcx.hir().span_by_hir_id(upvar_id.var_path.hir_id), "captured outer variable"); } err.emit(); @@ -177,10 +167,10 @@ fn report_cannot_move_out_of<'a, 'tcx>(bccx: &'a BorrowckCtxt<'a, 'tcx>, } } -fn note_move_destination(mut err: DiagnosticBuilder, +fn note_move_destination(mut err: DiagnosticBuilder<'_>, move_to_span: syntax_pos::Span, pat_name: ast::Name, - is_first_note: bool) -> DiagnosticBuilder { + is_first_note: bool) -> DiagnosticBuilder<'_> { if is_first_note { err.span_label( move_to_span, diff --git a/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs b/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs index 52c7ebb4beb02..9f4c05a6b255f 100644 --- a/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs +++ b/src/librustc_borrowck/borrowck/gather_loans/restrictions.rs @@ -1,23 +1,14 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Computes the restrictions that result from a borrow. -use borrowck::*; +use crate::borrowck::*; use rustc::middle::expr_use_visitor as euv; use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; use rustc::ty; use syntax_pos::Span; +use log::debug; -use borrowck::ToInteriorKind; +use crate::borrowck::ToInteriorKind; use std::rc::Rc; diff --git a/src/librustc_borrowck/borrowck/mod.rs b/src/librustc_borrowck/borrowck/mod.rs index cb1200f462fb6..fe39e3ae0c6cd 100644 --- a/src/librustc_borrowck/borrowck/mod.rs +++ b/src/librustc_borrowck/borrowck/mod.rs @@ -1,24 +1,14 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! See The Book chapter on the borrow checker for more details. #![allow(non_camel_case_types)] -pub use self::LoanPathKind::*; -pub use self::LoanPathElem::*; -pub use self::bckerr_code::*; -pub use self::AliasableViolationKind::*; -pub use self::MovedValueUseKind::*; +pub use LoanPathKind::*; +pub use LoanPathElem::*; +pub use bckerr_code::*; +pub use AliasableViolationKind::*; +pub use MovedValueUseKind::*; -use self::InteriorKind::*; +use InteriorKind::*; use rustc::hir::HirId; use rustc::hir::Node; @@ -44,14 +34,14 @@ use std::fmt; use std::rc::Rc; use rustc_data_structures::sync::Lrc; use std::hash::{Hash, Hasher}; -use syntax::ast; +use syntax::source_map::CompilerDesugaringKind; use syntax_pos::{MultiSpan, Span}; use errors::{Applicability, DiagnosticBuilder, DiagnosticId}; +use log::debug; use rustc::hir; -use rustc::hir::intravisit::{self, Visitor}; -use dataflow::{DataFlowContext, BitwiseOperator, DataFlowOperator, KillFrom}; +use crate::dataflow::{DataFlowContext, BitwiseOperator, DataFlowOperator, KillFrom}; pub mod check_loans; @@ -68,11 +58,11 @@ pub type LoanDataFlow<'a, 'tcx> = DataFlowContext<'a, 'tcx, LoanDataFlowOperator pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { tcx.par_body_owners(|body_owner_def_id| { - tcx.borrowck(body_owner_def_id); + tcx.ensure().borrowck(body_owner_def_id); }); } -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { *providers = Providers { borrowck, ..*providers @@ -93,11 +83,10 @@ fn borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, owner_def_id: DefId) debug!("borrowck(body_owner_def_id={:?})", owner_def_id); - let owner_id = tcx.hir().as_local_node_id(owner_def_id).unwrap(); + let owner_id = tcx.hir().as_local_hir_id(owner_def_id).unwrap(); - match tcx.hir().get(owner_id) { - Node::StructCtor(_) | - Node::Variant(_) => { + match tcx.hir().get_by_hir_id(owner_id) { + Node::Ctor(..) => { // We get invoked with anything that has MIR, but some of // those things (notably the synthesized constructors from // tuple structs/variants) do not have an associated body @@ -132,7 +121,7 @@ fn borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, owner_def_id: DefId) // Note that `mir_validated` is a "stealable" result; the // thief, `optimized_mir()`, forces borrowck, so we know that // is not yet stolen. - ty::query::queries::mir_validated::ensure(tcx, owner_def_id); + tcx.ensure().mir_validated(owner_def_id); // option dance because you can't capture an uninitialized variable // by mut-ref. @@ -167,12 +156,6 @@ fn build_borrowck_dataflow_data<'a, 'c, 'tcx, F>(this: &mut BorrowckCtxt<'a, 'tc where F: FnOnce(&mut BorrowckCtxt<'a, 'tcx>) -> &'c cfg::CFG { // Check the body of fn items. - let tcx = this.tcx; - let id_range = { - let mut visitor = intravisit::IdRangeComputingVisitor::new(&tcx.hir()); - visitor.visit_body(this.body); - visitor.result() - }; let (all_loans, move_data) = gather_loans::gather_loans_in_fn(this, body_id); @@ -194,7 +177,6 @@ fn build_borrowck_dataflow_data<'a, 'c, 'tcx, F>(this: &mut BorrowckCtxt<'a, 'tc Some(this.body), cfg, LoanDataFlowOperator, - id_range, all_loans.len()); for (loan_idx, loan) in all_loans.iter().enumerate() { loan_dfcx.add_gen(loan.gen_scope.item_local_id(), loan_idx); @@ -208,7 +190,6 @@ fn build_borrowck_dataflow_data<'a, 'c, 'tcx, F>(this: &mut BorrowckCtxt<'a, 'tc let flowed_moves = move_data::FlowedMoveData::new(move_data, this, cfg, - id_range, this.body); Some(AnalysisData { all_loans, @@ -314,11 +295,11 @@ pub struct Loan<'tcx> { /// gen_scope indicates where loan is introduced. Typically the /// loan is introduced at the point of the borrow, but in some /// cases, notably method arguments, the loan may be introduced - /// only later, once it comes into scope. See also + /// only later, once it comes into scope. See also /// `GatherLoanCtxt::compute_gen_scope`. gen_scope: region::Scope, - /// kill_scope indicates when the loan goes out of scope. This is + /// kill_scope indicates when the loan goes out of scope. This is /// either when the lifetime expires or when the local variable /// which roots the loan-path goes out of scope, whichever happens /// faster. See also `GatherLoanCtxt::compute_kill_scope`. @@ -353,7 +334,7 @@ impl<'tcx> Hash for LoanPath<'tcx> { #[derive(PartialEq, Eq, Hash, Debug)] pub enum LoanPathKind<'tcx> { - LpVar(ast::NodeId), // `x` in README.md + LpVar(hir::HirId), // `x` in README.md LpUpvar(ty::UpvarId), // `x` captured by-value into closure LpDowncast(Rc>, DefId), // `x` downcast to particular enum variant LpExtend(Rc>, mc::MutabilityCategory, LoanPathElem<'tcx>) @@ -417,12 +398,12 @@ pub enum LoanPathElem<'tcx> { } fn closure_to_block(closure_id: LocalDefId, - tcx: TyCtxt) -> ast::NodeId { + tcx: TyCtxt<'_, '_, '_>) -> HirId { let closure_id = tcx.hir().local_def_id_to_node_id(closure_id); match tcx.hir().get(closure_id) { Node::Expr(expr) => match expr.node { hir::ExprKind::Closure(.., body_id, _, _) => { - body_id.node_id + body_id.hir_id } _ => { bug!("encountered non-closure id: {}", closure_id) @@ -435,14 +416,12 @@ fn closure_to_block(closure_id: LocalDefId, impl<'a, 'tcx> LoanPath<'tcx> { pub fn kill_scope(&self, bccx: &BorrowckCtxt<'a, 'tcx>) -> region::Scope { match self.kind { - LpVar(local_id) => { - let hir_id = bccx.tcx.hir().node_to_hir_id(local_id); + LpVar(hir_id) => { bccx.region_scope_tree.var_scope(hir_id.local_id) } LpUpvar(upvar_id) => { let block_id = closure_to_block(upvar_id.closure_expr_id, bccx.tcx); - let hir_id = bccx.tcx.hir().node_to_hir_id(block_id); - region::Scope { id: hir_id.local_id, data: region::ScopeData::Node } + region::Scope { id: block_id.local_id, data: region::ScopeData::Node } } LpDowncast(ref base, _) | LpExtend(ref base, ..) => base.kill_scope(bccx), @@ -700,8 +679,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { Origin::Ast); let need_note = match lp.ty.sty { ty::Closure(id, _) => { - let node_id = self.tcx.hir().as_local_node_id(id).unwrap(); - let hir_id = self.tcx.hir().node_to_hir_id(node_id); + let hir_id = self.tcx.hir().as_local_hir_id(id).unwrap(); if let Some((span, name)) = self.tables.closure_kind_origins().get(hir_id) { err.span_note(*span, &format!( "closure cannot be invoked more than once because \ @@ -721,20 +699,20 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { // Get type of value and span where it was previously // moved. - let node_id = self.tcx.hir().hir_to_node_id(hir::HirId { + let hir_id = hir::HirId { owner: self.body.value.hir_id.owner, local_id: the_move.id - }); + }; let (move_span, move_note) = match the_move.kind { move_data::Declared => { unreachable!(); } move_data::MoveExpr | - move_data::MovePat => (self.tcx.hir().span(node_id), ""), + move_data::MovePat => (self.tcx.hir().span_by_hir_id(hir_id), ""), move_data::Captured => - (match self.tcx.hir().expect_expr(node_id).node { + (match self.tcx.hir().expect_expr_by_hir_id(hir_id).node { hir::ExprKind::Closure(.., fn_decl_span, _) => fn_decl_span, ref r => bug!("Captured({:?}) maps to non-closure: {:?}", the_move.id, r), @@ -766,6 +744,19 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { }, moved_lp.ty)); } + if let (Some(CompilerDesugaringKind::ForLoop), Ok(snippet)) = ( + move_span.compiler_desugaring_kind(), + self.tcx.sess.source_map().span_to_snippet(move_span), + ) { + if !snippet.starts_with("&") { + err.span_suggestion( + move_span, + "consider borrowing this to avoid moving it into the for loop", + format!("&{}", snippet), + Applicability::MaybeIncorrect, + ); + } + } // Note: we used to suggest adding a `ref binding` or calling // `clone` but those suggestions have been removed because @@ -846,8 +837,8 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { MutabilityViolation => { let mut db = self.cannot_assign(error_span, &descr, Origin::Ast); if let mc::NoteClosureEnv(upvar_id) = err.cmt.note { - let node_id = self.tcx.hir().hir_to_node_id(upvar_id.var_path.hir_id); - let sp = self.tcx.hir().span(node_id); + let hir_id = upvar_id.var_path.hir_id; + let sp = self.tcx.hir().span_by_hir_id(hir_id); let fn_closure_msg = "`Fn` closures cannot capture their enclosing \ environment for modifications"; match (self.tcx.sess.source_map().span_to_snippet(sp), &err.cmt.cat) { @@ -869,7 +860,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { }) = cmt.cat { db.note(fn_closure_msg); } else { - db.span_suggestion_with_applicability( + db.span_suggestion( sp, msg, suggestion, @@ -877,7 +868,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { ); } } else { - db.span_suggestion_with_applicability( + db.span_suggestion( sp, msg, suggestion, @@ -937,7 +928,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { self.note_immutability_blame( &mut db, err.cmt.immutability_blame(), - self.tcx.hir().hir_to_node_id(err.cmt.hir_id) + err.cmt.hir_id ); db.emit(); self.signal_error(); @@ -1138,8 +1129,8 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } else { "consider changing this closure to take self by mutable reference" }; - let node_id = self.tcx.hir().local_def_id_to_node_id(id); - let help_span = self.tcx.hir().span(node_id); + let hir_id = self.tcx.hir().local_def_id_to_hir_id(id); + let help_span = self.tcx.hir().span_by_hir_id(hir_id); self.cannot_act_on_capture_in_sharable_fn(span, prefix, (help_span, help_msg), @@ -1153,7 +1144,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { self.note_immutability_blame( &mut err, blame, - self.tcx.hir().hir_to_node_id(cmt.hir_id) + cmt.hir_id ); if is_closure { @@ -1193,8 +1184,8 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } } - fn local_binding_mode(&self, node_id: ast::NodeId) -> ty::BindingMode { - let pat = match self.tcx.hir().get(node_id) { + fn local_binding_mode(&self, hir_id: hir::HirId) -> ty::BindingMode { + let pat = match self.tcx.hir().get_by_hir_id(hir_id) { Node::Binding(pat) => pat, node => bug!("bad node for local: {:?}", node) }; @@ -1210,16 +1201,16 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } } - fn local_ty(&self, node_id: ast::NodeId) -> (Option<&hir::Ty>, bool) { - let parent = self.tcx.hir().get_parent_node(node_id); - let parent_node = self.tcx.hir().get(parent); + fn local_ty(&self, hir_id: hir::HirId) -> (Option<&hir::Ty>, bool) { + let parent = self.tcx.hir().get_parent_node_by_hir_id(hir_id); + let parent_node = self.tcx.hir().get_by_hir_id(parent); // The parent node is like a fn if let Some(fn_like) = FnLikeNode::from_node(parent_node) { // `nid`'s parent's `Body` let fn_body = self.tcx.hir().body(fn_like.body()); // Get the position of `node_id` in the arguments list - let arg_pos = fn_body.arguments.iter().position(|arg| arg.pat.id == node_id); + let arg_pos = fn_body.arguments.iter().position(|arg| arg.pat.hir_id == hir_id); if let Some(i) = arg_pos { // The argument's `Ty` (Some(&fn_like.decl().inputs[i]), @@ -1233,22 +1224,22 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } fn note_immutability_blame(&self, - db: &mut DiagnosticBuilder, - blame: Option, - error_node_id: ast::NodeId) { + db: &mut DiagnosticBuilder<'_>, + blame: Option>, + error_hir_id: hir::HirId) { match blame { None => {} Some(ImmutabilityBlame::ClosureEnv(_)) => {} - Some(ImmutabilityBlame::ImmLocal(node_id)) => { - self.note_immutable_local(db, error_node_id, node_id) + Some(ImmutabilityBlame::ImmLocal(hir_id)) => { + self.note_immutable_local(db, error_hir_id, hir_id) } - Some(ImmutabilityBlame::LocalDeref(node_id)) => { - match self.local_binding_mode(node_id) { + Some(ImmutabilityBlame::LocalDeref(hir_id)) => { + match self.local_binding_mode(hir_id) { ty::BindByReference(..) => { - let let_span = self.tcx.hir().span(node_id); + let let_span = self.tcx.hir().span_by_hir_id(hir_id); let suggestion = suggest_ref_mut(self.tcx, let_span); if let Some(replace_str) = suggestion { - db.span_suggestion_with_applicability( + db.span_suggestion( let_span, "use a mutable reference instead", replace_str, @@ -1262,7 +1253,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } } ty::BindByValue(..) => { - if let (Some(local_ty), is_implicit_self) = self.local_ty(node_id) { + if let (Some(local_ty), is_implicit_self) = self.local_ty(hir_id) { if let Some(msg) = self.suggest_mut_for_immutable(local_ty, is_implicit_self) { db.span_label(local_ty.span, msg); @@ -1272,12 +1263,12 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } } Some(ImmutabilityBlame::AdtFieldDeref(_, field)) => { - let node_id = match self.tcx.hir().as_local_node_id(field.did) { - Some(node_id) => node_id, + let hir_id = match self.tcx.hir().as_local_hir_id(field.did) { + Some(hir_id) => hir_id, None => return }; - if let Node::Field(ref field) = self.tcx.hir().get(node_id) { + if let Node::Field(ref field) = self.tcx.hir().get_by_hir_id(hir_id) { if let Some(msg) = self.suggest_mut_for_immutable(&field.ty, false) { db.span_label(field.ty.span, msg); } @@ -1290,13 +1281,13 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { // binding: either to make the binding mutable (if its type is // not a mutable reference) or to avoid borrowing altogether fn note_immutable_local(&self, - db: &mut DiagnosticBuilder, - borrowed_node_id: ast::NodeId, - binding_node_id: ast::NodeId) { - let let_span = self.tcx.hir().span(binding_node_id); - if let ty::BindByValue(..) = self.local_binding_mode(binding_node_id) { + db: &mut DiagnosticBuilder<'_>, + borrowed_hir_id: hir::HirId, + binding_hir_id: hir::HirId) { + let let_span = self.tcx.hir().span_by_hir_id(binding_hir_id); + if let ty::BindByValue(..) = self.local_binding_mode(binding_hir_id) { if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(let_span) { - let (ty, is_implicit_self) = self.local_ty(binding_node_id); + let (ty, is_implicit_self) = self.local_ty(binding_hir_id); if is_implicit_self && snippet != "self" { // avoid suggesting `mut &self`. return @@ -1309,16 +1300,16 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { }, )) = ty.map(|t| &t.node) { - let borrow_expr_id = self.tcx.hir().get_parent_node(borrowed_node_id); - db.span_suggestion_with_applicability( - self.tcx.hir().span(borrow_expr_id), + let borrow_expr_id = self.tcx.hir().get_parent_node_by_hir_id(borrowed_hir_id); + db.span_suggestion( + self.tcx.hir().span_by_hir_id(borrow_expr_id), "consider removing the `&mut`, as it is an \ immutable binding to a mutable reference", snippet, Applicability::MachineApplicable, ); } else { - db.span_suggestion_with_applicability( + db.span_suggestion( let_span, "make this binding mutable", format!("mut {}", snippet), @@ -1345,7 +1336,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { &cmt_path_or_string, capture_span, Origin::Ast) - .span_suggestion_with_applicability( + .span_suggestion( err.span, &format!("to force the closure to take ownership of {} \ (and any other referenced variables), \ @@ -1368,7 +1359,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { } } - fn note_and_explain_mutbl_error(&self, db: &mut DiagnosticBuilder, err: &BckError<'a, 'tcx>, + fn note_and_explain_mutbl_error(&self, db: &mut DiagnosticBuilder<'_>, err: &BckError<'a, 'tcx>, error_span: &Span) { match err.cmt.note { mc::NoteClosureEnv(upvar_id) | mc::NoteUpvarRef(upvar_id) => { @@ -1380,9 +1371,9 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { _ => bug!() }; if *kind == ty::ClosureKind::Fn { - let closure_node_id = - self.tcx.hir().local_def_id_to_node_id(upvar_id.closure_expr_id); - db.span_help(self.tcx.hir().span(closure_node_id), + let closure_hir_id = + self.tcx.hir().local_def_id_to_hir_id(upvar_id.closure_expr_id); + db.span_help(self.tcx.hir().span_by_hir_id(closure_hir_id), "consider changing this closure to take \ self by mutable reference"); } @@ -1391,7 +1382,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { if let Categorization::Deref(..) = err.cmt.cat { db.span_label(*error_span, "cannot borrow as mutable"); } else if let Categorization::Local(local_id) = err.cmt.cat { - let span = self.tcx.hir().span(local_id); + let span = self.tcx.hir().span_by_hir_id(local_id); if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) { if snippet.starts_with("ref mut ") || snippet.starts_with("&mut ") { db.span_label(*error_span, "cannot reborrow mutably"); @@ -1415,18 +1406,18 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { loan_path: &LoanPath<'tcx>, out: &mut String) { match loan_path.kind { - LpUpvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id: id}, closure_expr_id: _ }) => { - out.push_str(&self.tcx.hir().name(self.tcx.hir().hir_to_node_id(id)).as_str()); + LpUpvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id: id }, closure_expr_id: _ }) => { + out.push_str(&self.tcx.hir().name_by_hir_id(id).as_str()); } LpVar(id) => { - out.push_str(&self.tcx.hir().name(id).as_str()); + out.push_str(&self.tcx.hir().name_by_hir_id(id).as_str()); } LpDowncast(ref lp_base, variant_def_id) => { out.push('('); self.append_loan_path_to_string(&lp_base, out); out.push_str(DOWNCAST_PRINTED_OPERATOR); - out.push_str(&self.tcx.item_path_str(variant_def_id)); + out.push_str(&self.tcx.def_path_str(variant_def_id)); out.push(')'); } @@ -1463,7 +1454,7 @@ impl<'a, 'tcx> BorrowckCtxt<'a, 'tcx> { out.push('('); self.append_autoderefd_loan_path_to_string(&lp_base, out); out.push_str(DOWNCAST_PRINTED_OPERATOR); - out.push_str(&self.tcx.item_path_str(variant_def_id)); + out.push_str(&self.tcx.def_path_str(variant_def_id)); out.push(')'); } @@ -1506,7 +1497,7 @@ impl DataFlowOperator for LoanDataFlowOperator { } impl<'tcx> fmt::Debug for InteriorKind { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { InteriorField(mc::FieldIndex(_, info)) => write!(f, "{}", info), InteriorElement => write!(f, "[]"), @@ -1515,7 +1506,7 @@ impl<'tcx> fmt::Debug for InteriorKind { } impl<'tcx> fmt::Debug for Loan<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "Loan_{}({:?}, {:?}, {:?}-{:?}, {:?})", self.index, self.loan_path, @@ -1527,10 +1518,10 @@ impl<'tcx> fmt::Debug for Loan<'tcx> { } impl<'tcx> fmt::Debug for LoanPath<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.kind { LpVar(id) => { - write!(f, "$({})", ty::tls::with(|tcx| tcx.hir().node_to_string(id))) + write!(f, "$({})", ty::tls::with(|tcx| tcx.hir().hir_to_string(id))) } LpUpvar(ty::UpvarId{ var_path: ty::UpvarPath {hir_id: var_id}, closure_expr_id }) => { @@ -1543,7 +1534,7 @@ impl<'tcx> fmt::Debug for LoanPath<'tcx> { LpDowncast(ref lp, variant_def_id) => { let variant_str = if variant_def_id.is_local() { - ty::tls::with(|tcx| tcx.item_path_str(variant_def_id)) + ty::tls::with(|tcx| tcx.def_path_str(variant_def_id)) } else { format!("{:?}", variant_def_id) }; @@ -1562,10 +1553,10 @@ impl<'tcx> fmt::Debug for LoanPath<'tcx> { } impl<'tcx> fmt::Display for LoanPath<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.kind { LpVar(id) => { - write!(f, "$({})", ty::tls::with(|tcx| tcx.hir().node_to_user_string(id))) + write!(f, "$({})", ty::tls::with(|tcx| tcx.hir().hir_to_user_string(id))) } LpUpvar(ty::UpvarId{ var_path: ty::UpvarPath { hir_id }, closure_expr_id: _ }) => { @@ -1578,7 +1569,7 @@ impl<'tcx> fmt::Display for LoanPath<'tcx> { LpDowncast(ref lp, variant_def_id) => { let variant_str = if variant_def_id.is_local() { - ty::tls::with(|tcx| tcx.item_path_str(variant_def_id)) + ty::tls::with(|tcx| tcx.def_path_str(variant_def_id)) } else { format!("{:?}", variant_def_id) }; diff --git a/src/librustc_borrowck/borrowck/move_data.rs b/src/librustc_borrowck/borrowck/move_data.rs index 8aa2e4641d4f9..325d3559f0ab6 100644 --- a/src/librustc_borrowck/borrowck/move_data.rs +++ b/src/librustc_borrowck/borrowck/move_data.rs @@ -1,21 +1,11 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Data structures used for tracking moves. Please see the extensive //! comments in the section "Moves and initialization" in `README.md`. -pub use self::MoveKind::*; +pub use MoveKind::*; -use dataflow::{DataFlowContext, BitwiseOperator, DataFlowOperator, KillFrom}; +use crate::dataflow::{DataFlowContext, BitwiseOperator, DataFlowOperator, KillFrom}; -use borrowck::*; +use crate::borrowck::*; use rustc::cfg; use rustc::ty::{self, TyCtxt}; use rustc::util::nodemap::FxHashMap; @@ -25,7 +15,7 @@ use std::rc::Rc; use std::usize; use syntax_pos::Span; use rustc::hir; -use rustc::hir::intravisit::IdRange; +use log::debug; #[derive(Default)] pub struct MoveData<'tcx> { @@ -124,7 +114,7 @@ pub struct Move { /// Path being moved. pub path: MovePathIndex, - /// id of node that is doing the move. + /// ID of node that is doing the move. pub id: hir::ItemLocalId, /// Kind of move, for error messages. @@ -139,7 +129,7 @@ pub struct Assignment { /// Path being assigned. pub path: MovePathIndex, - /// id where assignment occurs + /// ID where assignment occurs pub id: hir::ItemLocalId, /// span of node where assignment occurs @@ -156,7 +146,7 @@ pub struct AssignDataFlowOperator; pub type AssignDataFlow<'a, 'tcx> = DataFlowContext<'a, 'tcx, AssignDataFlowOperator>; -fn loan_path_is_precise(loan_path: &LoanPath) -> bool { +fn loan_path_is_precise(loan_path: &LoanPath<'_>) -> bool { match loan_path.kind { LpVar(_) | LpUpvar(_) => { true @@ -178,8 +168,8 @@ fn loan_path_is_precise(loan_path: &LoanPath) -> bool { } impl<'a, 'tcx> MoveData<'tcx> { - /// return true if there are no trackable assignments or moves - /// in this move data - that means that there is nothing that + /// Returns `true` if there are no trackable assignments or moves + /// in this move data -- that means that there is nothing that /// could cause a borrow error. pub fn is_empty(&self) -> bool { self.moves.borrow().is_empty() && @@ -439,8 +429,8 @@ impl<'a, 'tcx> MoveData<'tcx> { /// killed by scoping. See `README.md` for more details. fn add_gen_kills(&self, bccx: &BorrowckCtxt<'a, 'tcx>, - dfcx_moves: &mut MoveDataFlow, - dfcx_assign: &mut AssignDataFlow) { + dfcx_moves: &mut MoveDataFlow<'_, '_>, + dfcx_assign: &mut AssignDataFlow<'_, '_>) { for (i, the_move) in self.moves.borrow().iter().enumerate() { dfcx_moves.add_gen(the_move.id, i); } @@ -548,7 +538,7 @@ impl<'a, 'tcx> MoveData<'tcx> { path: MovePathIndex, kill_id: hir::ItemLocalId, kill_kind: KillFrom, - dfcx_moves: &mut MoveDataFlow) { + dfcx_moves: &mut MoveDataFlow<'_, '_>) { // We can only perform kills for paths that refer to a unique location, // since otherwise we may kill a move from one location with an // assignment referring to another location. @@ -569,7 +559,6 @@ impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> { pub fn new(move_data: MoveData<'tcx>, bccx: &BorrowckCtxt<'a, 'tcx>, cfg: &cfg::CFG, - id_range: IdRange, body: &hir::Body) -> FlowedMoveData<'a, 'tcx> { let tcx = bccx.tcx; @@ -580,7 +569,6 @@ impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> { Some(body), cfg, MoveDataFlowOperator, - id_range, move_data.moves.borrow().len()); let mut dfcx_assign = DataFlowContext::new(tcx, @@ -588,7 +576,6 @@ impl<'a, 'tcx> FlowedMoveData<'a, 'tcx> { Some(body), cfg, AssignDataFlowOperator, - id_range, move_data.var_assignments.borrow().len()); move_data.add_gen_kills(bccx, diff --git a/src/librustc_borrowck/borrowck/unused.rs b/src/librustc_borrowck/borrowck/unused.rs index a9a33f35842ce..60a9c18e95ee9 100644 --- a/src/librustc_borrowck/borrowck/unused.rs +++ b/src/librustc_borrowck/borrowck/unused.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir::intravisit::{Visitor, NestedVisitorMap}; use rustc::hir::{self, HirId}; use rustc::lint::builtin::UNUSED_MUT; @@ -17,7 +7,7 @@ use errors::Applicability; use std::slice; use syntax::ptr::P; -use borrowck::BorrowckCtxt; +use crate::borrowck::BorrowckCtxt; pub fn check<'a, 'tcx>(bccx: &BorrowckCtxt<'a, 'tcx>, body: &'tcx hir::Body) { let mut used_mut = bccx.used_mut_nodes.borrow().clone(); @@ -88,11 +78,12 @@ impl<'a, 'tcx> UnusedMutCx<'a, 'tcx> { hir_id, span, "variable does not need to be mutable") - .span_suggestion_short_with_applicability( + .span_suggestion_short( mut_span, "remove this `mut`", String::new(), - Applicability::MachineApplicable) + Applicability::MachineApplicable, + ) .emit(); } } diff --git a/src/librustc_borrowck/dataflow.rs b/src/librustc_borrowck/dataflow.rs index d12c22109c681..de2a3c4cb22a8 100644 --- a/src/librustc_borrowck/dataflow.rs +++ b/src/librustc_borrowck/dataflow.rs @@ -1,14 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - //! A module for propagating forward dataflow information. The analysis //! assumes that the items to be propagated can be represented as bits //! and thus uses bitvectors. Your job is simply to specify the so-called @@ -21,12 +10,13 @@ use std::io; use std::mem; use std::usize; use syntax::print::pprust::PrintState; +use log::debug; use rustc_data_structures::graph::implementation::OUTGOING; use rustc::util::nodemap::FxHashMap; use rustc::hir; -use rustc::hir::intravisit::{self, IdRange}; +use rustc::hir::intravisit; use rustc::hir::print as pprust; @@ -91,7 +81,7 @@ pub trait DataFlowOperator : BitwiseOperator { fn initial_value(&self) -> bool; } -struct PropagationContext<'a, 'b: 'a, 'tcx: 'b, O: 'a> { +struct PropagationContext<'a, 'b: 'a, 'tcx: 'b, O> { dfcx: &'a mut DataFlowContext<'b, 'tcx, O>, changed: bool } @@ -110,12 +100,12 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> { } impl<'a, 'tcx, O:DataFlowOperator> pprust::PpAnn for DataFlowContext<'a, 'tcx, O> { - fn nested(&self, state: &mut pprust::State, nested: pprust::Nested) -> io::Result<()> { + fn nested(&self, state: &mut pprust::State<'_>, nested: pprust::Nested) -> io::Result<()> { pprust::PpAnn::nested(self.tcx.hir(), state, nested) } fn pre(&self, - ps: &mut pprust::State, - node: pprust::AnnNode) -> io::Result<()> { + ps: &mut pprust::State<'_>, + node: pprust::AnnNode<'_>) -> io::Result<()> { let id = match node { pprust::AnnNode::Name(_) => return Ok(()), pprust::AnnNode::Expr(expr) => expr.hir_id.local_id, @@ -188,7 +178,7 @@ fn build_local_id_to_index(body: Option<&hir::Body>, return index; - /// Add mappings from the ast nodes for the formal bindings to + /// Adds mappings from the ast nodes for the formal bindings to /// the entry-node in the graph. fn add_entries_from_fn_body(index: &mut FxHashMap>, body: &hir::Body, @@ -241,16 +231,15 @@ impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> { body: Option<&hir::Body>, cfg: &cfg::CFG, oper: O, - id_range: IdRange, bits_per_id: usize) -> DataFlowContext<'a, 'tcx, O> { let usize_bits = mem::size_of::() * 8; let words_per_id = (bits_per_id + usize_bits - 1) / usize_bits; let num_nodes = cfg.graph.all_nodes().len(); - debug!("DataFlowContext::new(analysis_name: {}, id_range={:?}, \ + debug!("DataFlowContext::new(analysis_name: {}, \ bits_per_id={}, words_per_id={}) \ num_nodes: {}", - analysis_name, id_range, bits_per_id, words_per_id, + analysis_name, bits_per_id, words_per_id, num_nodes); let entry = if oper.initial_value() { usize::MAX } else {0}; diff --git a/src/librustc_borrowck/diagnostics.rs b/src/librustc_borrowck/diagnostics.rs index 3fea01443be4b..44d8a23fcb910 100644 --- a/src/librustc_borrowck/diagnostics.rs +++ b/src/librustc_borrowck/diagnostics.rs @@ -1,11 +1 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_snake_case)] diff --git a/src/librustc_borrowck/graphviz.rs b/src/librustc_borrowck/graphviz.rs index 56dd5a846da6b..77056d4d3eb15 100644 --- a/src/librustc_borrowck/graphviz.rs +++ b/src/librustc_borrowck/graphviz.rs @@ -1,27 +1,16 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This module provides linkage between rustc::middle::graph and //! libgraphviz traits, specialized to attaching borrowck analysis //! data to rendered labels. -pub use self::Variant::*; +pub use Variant::*; pub use rustc::cfg::graphviz::{Node, Edge}; use rustc::cfg::graphviz as cfg_dot; -use borrowck; -use borrowck::{BorrowckCtxt, LoanPath}; -use dot; +use crate::borrowck::{self, BorrowckCtxt, LoanPath}; +use crate::dataflow::{DataFlowOperator, DataFlowContext, EntryOrExit}; +use log::debug; use rustc::cfg::CFGIndex; -use dataflow::{DataFlowOperator, DataFlowContext, EntryOrExit}; use std::rc::Rc; #[derive(Debug, Copy, Clone)] @@ -63,7 +52,7 @@ impl<'a, 'tcx> DataflowLabeller<'a, 'tcx> { sets } - fn dataflow_for_variant(&self, e: EntryOrExit, n: &Node, v: Variant) -> String { + fn dataflow_for_variant(&self, e: EntryOrExit, n: &Node<'_>, v: Variant) -> String { let cfgidx = n.0; match v { Loans => self.dataflow_loans_for(e, cfgidx), @@ -99,7 +88,7 @@ impl<'a, 'tcx> DataflowLabeller<'a, 'tcx> { let dfcx = &self.analysis_data.loans; let loan_index_to_path = |loan_index| { let all_loans = &self.analysis_data.all_loans; - let l: &borrowck::Loan = &all_loans[loan_index]; + let l: &borrowck::Loan<'_> = &all_loans[loan_index]; l.loan_path() }; self.build_set(e, cfgidx, dfcx, loan_index_to_path) diff --git a/src/librustc_borrowck/lib.rs b/src/librustc_borrowck/lib.rs index 049c4b2bd92d8..3761a52bcccf2 100644 --- a/src/librustc_borrowck/lib.rs +++ b/src/librustc_borrowck/lib.rs @@ -1,36 +1,15 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![allow(non_camel_case_types)] +#![deny(rust_2018_idioms)] +#![cfg_attr(not(stage0), deny(internal))] #![feature(nll)] -#![feature(quote)] #![recursion_limit="256"] -#[macro_use] extern crate log; -extern crate syntax; -extern crate syntax_pos; -extern crate rustc_errors as errors; -extern crate rustc_data_structures; - -// for "clarity", rename the graphviz crate to dot; graphviz within `borrowck` -// refers to the borrowck-specific graphviz adapter traits. -extern crate graphviz as dot; #[macro_use] extern crate rustc; -extern crate rustc_mir; pub use borrowck::check_crate; pub use borrowck::build_borrowck_dataflow_data_for_fn; diff --git a/src/librustc_codegen_llvm/Cargo.toml b/src/librustc_codegen_llvm/Cargo.toml index b711502b14b7f..841cf98164eb4 100644 --- a/src/librustc_codegen_llvm/Cargo.toml +++ b/src/librustc_codegen_llvm/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustc_codegen_llvm" version = "0.0.0" +edition = "2018" [lib] name = "rustc_codegen_llvm" @@ -10,7 +11,7 @@ crate-type = ["dylib"] test = false [dependencies] -cc = "1.0.1" +cc = "1.0.1" # Used to locate MSVC num_cpus = "1.0" rustc-demangle = "0.1.4" rustc_llvm = { path = "../librustc_llvm" } diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index b8954dee794f7..60973c43a533c 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -1,22 +1,12 @@ -// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm::{self, AttributePlace}; +use crate::llvm::{self, AttributePlace}; +use crate::builder::Builder; +use crate::context::CodegenCx; +use crate::type_::Type; +use crate::type_of::{LayoutLlvmExt, PointerKind}; +use crate::value::Value; use rustc_codegen_ssa::MemFlags; -use builder::Builder; -use context::CodegenCx; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::mir::operand::OperandValue; -use type_::Type; -use type_of::{LayoutLlvmExt, PointerKind}; -use value::Value; use rustc_target::abi::call::ArgType; use rustc_codegen_ssa::traits::*; @@ -184,13 +174,13 @@ pub trait ArgTypeExt<'ll, 'tcx> { } impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { - /// Get the LLVM type for a place of the original Rust type of + /// Gets the LLVM type for a place of the original Rust type of /// this argument/return, i.e., the result of `type_of::type_of`. fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { self.layout.llvm_type(cx) } - /// Store a direct/indirect value described by this ArgType into a + /// Stores a direct/indirect value described by this ArgType into a /// place for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. @@ -268,7 +258,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { val }; match self.mode { - PassMode::Ignore => {}, + PassMode::Ignore(_) => {} PassMode::Pair(..) => { OperandValue::Pair(next(), next()).store(bx, dst); } @@ -276,7 +266,8 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst); } PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => { - self.store(bx, next(), dst); + let next_arg = next(); + self.store(bx, next_arg, dst); } } } @@ -425,6 +416,8 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { Msp430Interrupt => Conv::Msp430Intr, X86Interrupt => Conv::X86Intr, AmdGpuKernel => Conv::AmdGpuKernel, + AvrInterrupt => Conv::AvrInterrupt, + AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt, // These API constants ought to be more specific... Cdecl => Conv::C, @@ -432,7 +425,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { let mut inputs = sig.inputs(); let extra_args = if sig.abi == RustCall { - assert!(!sig.variadic && extra_args.is_empty()); + assert!(!sig.c_variadic && extra_args.is_empty()); match sig.inputs().last().unwrap().sty { ty::Tuple(ref tupled_arguments) => { @@ -445,7 +438,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } } } else { - assert!(sig.variadic || extra_args.is_empty()); + assert!(sig.c_variadic || extra_args.is_empty()); extra_args }; @@ -456,6 +449,9 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { let linux_s390x = target.target_os == "linux" && target.arch == "s390x" && target.target_env == "gnu"; + let linux_sparc64 = target.target_os == "linux" + && target.arch == "sparc64" + && target.target_env == "gnu"; let rust_abi = match sig.abi { RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true, _ => false @@ -489,12 +485,6 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { attrs.pointee_size = pointee.size; attrs.pointee_align = Some(pointee.align); - // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions - // with align attributes, and those calls later block optimizations. - if !is_return && !cx.tcx.sess.opts.debugging_opts.arg_align_attributes { - attrs.pointee_align = None; - } - // `Box` pointer parameters never alias because ownership is transferred // `&mut` pointer parameters never alias other parameters, // or mutable global data @@ -520,15 +510,47 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } }; + // Store the index of the last argument. This is useful for working with + // C-compatible variadic arguments. + let last_arg_idx = if sig.inputs().is_empty() { + None + } else { + Some(sig.inputs().len() - 1) + }; + let arg_of = |ty: Ty<'tcx>, arg_idx: Option| { let is_return = arg_idx.is_none(); let mut arg = mk_arg_type(ty, arg_idx); if arg.layout.is_zst() { // For some forsaken reason, x86_64-pc-windows-gnu // doesn't ignore zero-sized struct arguments. - // The same is true for s390x-unknown-linux-gnu. - if is_return || rust_abi || (!win_x64_gnu && !linux_s390x) { - arg.mode = PassMode::Ignore; + // The same is true for s390x-unknown-linux-gnu + // and sparc64-unknown-linux-gnu. + if is_return || rust_abi || (!win_x64_gnu && !linux_s390x && !linux_sparc64) { + arg.mode = PassMode::Ignore(IgnoreMode::Zst); + } + } + + // If this is a C-variadic function, this is not the return value, + // and there is one or more fixed arguments; ensure that the `VaList` + // is ignored as an argument. + if sig.c_variadic { + match (last_arg_idx, arg_idx) { + (Some(last_idx), Some(cur_idx)) if last_idx == cur_idx => { + let va_list_did = match cx.tcx.lang_items().va_list() { + Some(did) => did, + None => bug!("`va_list` lang item required for C-variadic functions"), + }; + match ty.sty { + ty::Adt(def, _) if def.did == va_list_did => { + // This is the "spoofed" `VaList`. Set the arguments mode + // so that it will be ignored. + arg.mode = PassMode::Ignore(IgnoreMode::CVarArgs); + }, + _ => (), + } + } + _ => {} } } @@ -570,7 +592,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { args: inputs.iter().chain(extra_args).enumerate().map(|(i, ty)| { arg_of(ty, Some(i)) }).collect(), - variadic: sig.variadic, + c_variadic: sig.c_variadic, conv, }; fn_ty.adjust_for_abi(cx, sig.abi); @@ -658,7 +680,9 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { ); let llreturn_ty = match self.ret.mode { - PassMode::Ignore => cx.type_void(), + PassMode::Ignore(IgnoreMode::Zst) => cx.type_void(), + PassMode::Ignore(IgnoreMode::CVarArgs) => + bug!("`va_list` should never be a return type"), PassMode::Direct(_) | PassMode::Pair(..) => { self.ret.layout.immediate_llvm_type(cx) } @@ -676,7 +700,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } let llarg_ty = match arg.mode { - PassMode::Ignore => continue, + PassMode::Ignore(_) => continue, PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx), PassMode::Pair(..) => { llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true)); @@ -696,7 +720,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { llargument_tys.push(llarg_ty); } - if self.variadic { + if self.c_variadic { cx.type_variadic_func(&llargument_tys, llreturn_ty) } else { cx.type_func(&llargument_tys, llreturn_ty) @@ -714,6 +738,8 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { match self.conv { Conv::C => llvm::CCallConv, Conv::AmdGpuKernel => llvm::AmdGpuKernel, + Conv::AvrInterrupt => llvm::AvrInterrupt, + Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt, Conv::ArmAapcs => llvm::ArmAapcsCallConv, Conv::Msp430Intr => llvm::Msp430Intr, Conv::PtxKernel => llvm::PtxKernel, @@ -745,7 +771,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { apply(&ArgAttributes::new()); } match arg.mode { - PassMode::Ignore => {} + PassMode::Ignore(_) => {} PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => apply(attrs), PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => { @@ -792,7 +818,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { apply(&ArgAttributes::new()); } match arg.mode { - PassMode::Ignore => {} + PassMode::Ignore(_) => {} PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => apply(attrs), PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => { @@ -838,4 +864,8 @@ impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { ) { ty.apply_attrs_callsite(self, callsite) } + + fn get_param(&self, index: usize) -> Self::Value { + llvm::get_param(self.llfn(), index as c_uint) + } } diff --git a/src/librustc_codegen_llvm/allocator.rs b/src/librustc_codegen_llvm/allocator.rs index 7c237407c8f54..1fe020561dde1 100644 --- a/src/librustc_codegen_llvm/allocator.rs +++ b/src/librustc_codegen_llvm/allocator.rs @@ -1,25 +1,15 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::ffi::CString; -use attributes; +use crate::attributes; use libc::c_uint; use rustc::middle::allocator::AllocatorKind; use rustc::ty::TyCtxt; use rustc_allocator::{ALLOCATOR_METHODS, AllocatorTy}; -use ModuleLlvm; -use llvm::{self, False, True}; +use crate::ModuleLlvm; +use crate::llvm::{self, False, True}; -pub(crate) unsafe fn codegen(tcx: TyCtxt, mods: &ModuleLlvm, kind: AllocatorKind) { +pub(crate) unsafe fn codegen(tcx: TyCtxt<'_, '_, '_>, mods: &mut ModuleLlvm, kind: AllocatorKind) { let llcx = &*mods.llcx; let llmod = mods.llmod(); let usize = match &tcx.sess.target.target.target_pointer_width[..] { diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index 294596cea5f15..100a896ea0c7d 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -1,18 +1,8 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm; -use context::CodegenCx; -use type_of::LayoutLlvmExt; -use builder::Builder; -use value::Value; +use crate::llvm; +use crate::context::CodegenCx; +use crate::type_of::LayoutLlvmExt; +use crate::builder::Builder; +use crate::value::Value; use rustc::hir; use rustc_codegen_ssa::traits::*; @@ -20,7 +10,7 @@ use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::mir::operand::OperandValue; -use std::ffi::CString; +use std::ffi::{CStr, CString}; use libc::{c_uint, c_char}; @@ -83,7 +73,8 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); let constraint_cstr = CString::new(all_constraints).unwrap(); - let r = self.inline_asm_call( + let r = inline_asm_call( + self, &asm, &constraint_cstr, &inputs, @@ -129,3 +120,46 @@ impl AsmMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } } + +fn inline_asm_call( + bx: &mut Builder<'a, 'll, 'tcx>, + asm: &CStr, + cons: &CStr, + inputs: &[&'ll Value], + output: &'ll llvm::Type, + volatile: bool, + alignstack: bool, + dia: ::syntax::ast::AsmDialect, +) -> Option<&'ll Value> { + let volatile = if volatile { llvm::True } + else { llvm::False }; + let alignstack = if alignstack { llvm::True } + else { llvm::False }; + + let argtys = inputs.iter().map(|v| { + debug!("Asm Input Type: {:?}", *v); + bx.cx.val_ty(*v) + }).collect::>(); + + debug!("Asm Output Type: {:?}", output); + let fty = bx.cx.type_func(&argtys[..], output); + unsafe { + // Ask LLVM to verify that the constraints are well-formed. + let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr()); + debug!("Constraint verification result: {:?}", constraints_ok); + if constraints_ok { + let v = llvm::LLVMRustInlineAsm( + fty, + asm.as_ptr(), + cons.as_ptr(), + volatile, + alignstack, + llvm::AsmDialect::from_generic(dia), + ); + Some(bx.call(v, inputs, None)) + } else { + // LLVM has detected an issue with our constraints, bail out + None + } + } +} diff --git a/src/librustc_codegen_llvm/attributes.rs b/src/librustc_codegen_llvm/attributes.rs index 48e0a3a12c963..77fa34e74dd70 100644 --- a/src/librustc_codegen_llvm/attributes.rs +++ b/src/librustc_codegen_llvm/attributes.rs @@ -1,12 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. //! Set and unset common attributes on LLVM values. use std::ffi::CString; @@ -14,7 +5,7 @@ use std::ffi::CString; use rustc::hir::{CodegenFnAttrFlags, CodegenFnAttrs}; use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::session::Session; -use rustc::session::config::Sanitizer; +use rustc::session::config::{Sanitizer, OptLevel}; use rustc::ty::{self, TyCtxt, PolyFnSig}; use rustc::ty::layout::HasTyCtxt; use rustc::ty::query::Providers; @@ -24,15 +15,15 @@ use rustc_data_structures::fx::FxHashMap; use rustc_target::spec::PanicStrategy; use rustc_codegen_ssa::traits::*; -use abi::Abi; -use attributes; -use llvm::{self, Attribute}; -use llvm::AttributePlace::Function; -use llvm_util; -pub use syntax::attr::{self, InlineAttr}; +use crate::abi::Abi; +use crate::attributes; +use crate::llvm::{self, Attribute}; +use crate::llvm::AttributePlace::Function; +use crate::llvm_util; +pub use syntax::attr::{self, InlineAttr, OptimizeAttr}; -use context::CodegenCx; -use value::Value; +use crate::context::CodegenCx; +use crate::value::Value; /// Mark LLVM function to use provided inline heuristic. #[inline] @@ -66,13 +57,6 @@ fn unwind(val: &'ll Value, can_unwind: bool) { Attribute::NoUnwind.toggle_llfn(Function, val, !can_unwind); } -/// Tell LLVM whether it should optimize function for size. -#[inline] -#[allow(dead_code)] // possibly useful function -pub fn set_optimize_for_size(val: &'ll Value, optimize: bool) { - Attribute::OptimizeForSize.toggle_llfn(Function, val, optimize); -} - /// Tell LLVM if this function should be 'naked', i.e., skip the epilogue and prologue. #[inline] pub fn naked(val: &'ll Value, is_naked: bool) { @@ -87,6 +71,24 @@ pub fn set_frame_pointer_elimination(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) } } +/// Tell LLVM what instrument function to insert. +#[inline] +pub fn set_instrument_function(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { + if cx.sess().instrument_mcount() { + // Similar to `clang -pg` behavior. Handled by the + // `post-inline-ee-instrument` LLVM pass. + + // The function name varies on platforms. + // See test/CodeGen/mcount.c in clang. + let mcount_name = CString::new( + cx.sess().target.target.options.target_mcount.as_str().as_bytes()).unwrap(); + + llvm::AddFunctionAttrStringValue( + llfn, llvm::AttributePlace::Function, + const_cstr!("instrument-function-entry-inlined"), &mcount_name); + } +} + pub fn set_probestack(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { // Only use stack probes if the target specification indicates that we // should be using stack probes @@ -148,6 +150,28 @@ pub fn non_lazy_bind(sess: &Session, llfn: &'ll Value) { } } +pub(crate) fn default_optimisation_attrs(sess: &Session, llfn: &'ll Value) { + match sess.opts.optimize { + OptLevel::Size => { + llvm::Attribute::MinSize.unapply_llfn(Function, llfn); + llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn); + llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn); + }, + OptLevel::SizeMin => { + llvm::Attribute::MinSize.apply_llfn(Function, llfn); + llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn); + llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn); + } + OptLevel::No => { + llvm::Attribute::MinSize.unapply_llfn(Function, llfn); + llvm::Attribute::OptimizeForSize.unapply_llfn(Function, llfn); + llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn); + } + _ => {} + } +} + + /// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`) /// attributes. pub fn from_fn_attrs( @@ -159,6 +183,22 @@ pub fn from_fn_attrs( let codegen_fn_attrs = id.map(|id| cx.tcx.codegen_fn_attrs(id)) .unwrap_or_else(|| CodegenFnAttrs::new()); + match codegen_fn_attrs.optimize { + OptimizeAttr::None => { + default_optimisation_attrs(cx.tcx.sess, llfn); + } + OptimizeAttr::Speed => { + llvm::Attribute::MinSize.unapply_llfn(Function, llfn); + llvm::Attribute::OptimizeForSize.unapply_llfn(Function, llfn); + llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn); + } + OptimizeAttr::Size => { + llvm::Attribute::MinSize.apply_llfn(Function, llfn); + llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn); + llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn); + } + } + inline(cx, llfn, codegen_fn_attrs.inline); // The `uwtable` attribute according to LLVM is: @@ -183,11 +223,15 @@ pub fn from_fn_attrs( } set_frame_pointer_elimination(cx, llfn); + set_instrument_function(cx, llfn); set_probestack(cx, llfn); if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) { Attribute::Cold.apply_llfn(Function, llfn); } + if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_RETURNS_TWICE) { + Attribute::ReturnsTwice.apply_llfn(Function, llfn); + } if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) { naked(llfn, true); } @@ -270,7 +314,7 @@ pub fn from_fn_attrs( } } -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { providers.target_features_whitelist = |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); if tcx.sess.opts.actually_rustdoc { @@ -290,7 +334,7 @@ pub fn provide(providers: &mut Providers) { provide_extern(providers); } -pub fn provide_extern(providers: &mut Providers) { +pub fn provide_extern(providers: &mut Providers<'_>) { providers.wasm_import_module_map = |tcx, cnum| { // Build up a map from DefId to a `NativeLibrary` structure, where // `NativeLibrary` internally contains information about @@ -324,7 +368,7 @@ pub fn provide_extern(providers: &mut Providers) { }; } -fn wasm_import_module(tcx: TyCtxt, id: DefId) -> Option { +fn wasm_import_module(tcx: TyCtxt<'_, '_, '_>, id: DefId) -> Option { tcx.wasm_import_module_map(id.krate) .get(&id) .map(|s| CString::new(&s[..]).unwrap()) diff --git a/src/librustc_codegen_llvm/back/archive.rs b/src/librustc_codegen_llvm/back/archive.rs index 76c50711639a4..3fb9d4b5b776b 100644 --- a/src/librustc_codegen_llvm/back/archive.rs +++ b/src/librustc_codegen_llvm/back/archive.rs @@ -1,13 +1,3 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A helper class for dealing with static archives use std::ffi::{CString, CStr}; @@ -17,12 +7,11 @@ use std::path::{Path, PathBuf}; use std::ptr; use std::str; -use back::bytecode::RLIB_BYTECODE_EXTENSION; +use crate::back::bytecode::RLIB_BYTECODE_EXTENSION; +use crate::llvm::archive_ro::{ArchiveRO, Child}; +use crate::llvm::{self, ArchiveKind}; +use crate::metadata::METADATA_FILENAME; use rustc_codegen_ssa::back::archive::find_library; -use libc; -use llvm::archive_ro::{ArchiveRO, Child}; -use llvm::{self, ArchiveKind}; -use metadata::METADATA_FILENAME; use rustc::session::Session; pub struct ArchiveConfig<'a> { @@ -53,7 +42,7 @@ enum Addition { }, } -fn is_relevant_child(c: &Child) -> bool { +fn is_relevant_child(c: &Child<'_>) -> bool { match c.name() { Some(name) => !name.contains("SYMDEF"), None => false, @@ -61,7 +50,7 @@ fn is_relevant_child(c: &Child) -> bool { } impl<'a> ArchiveBuilder<'a> { - /// Create a new static archive, ready for modifying the archive specified + /// Creates a new static archive, ready for modifying the archive specified /// by `config`. pub fn new(config: ArchiveConfig<'a>) -> ArchiveBuilder<'a> { ArchiveBuilder { diff --git a/src/librustc_codegen_llvm/back/bytecode.rs b/src/librustc_codegen_llvm/back/bytecode.rs index 0b264de18c124..8b288c45336b7 100644 --- a/src/librustc_codegen_llvm/back/bytecode.rs +++ b/src/librustc_codegen_llvm/back/bytecode.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Management of the encoding of LLVM bytecode into rlibs //! //! This module contains the management of encoding LLVM bytecode into rlibs, diff --git a/src/librustc_codegen_llvm/back/link.rs b/src/librustc_codegen_llvm/back/link.rs index f1c0464f5f25b..19419a72b94dd 100644 --- a/src/librustc_codegen_llvm/back/link.rs +++ b/src/librustc_codegen_llvm/back/link.rs @@ -1,23 +1,15 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use back::wasm; use super::archive::{ArchiveBuilder, ArchiveConfig}; use super::bytecode::RLIB_BYTECODE_EXTENSION; +use super::rpath::RPathConfig; +use super::rpath; +use crate::back::wasm; +use crate::metadata::METADATA_FILENAME; +use crate::context::get_reloc_model; +use crate::llvm; use rustc_codegen_ssa::back::linker::Linker; use rustc_codegen_ssa::back::link::{remove, ignored_for_lto, each_linked_rlib, linker_and_flavor, get_linker}; use rustc_codegen_ssa::back::command::Command; -use super::rpath::RPathConfig; -use super::rpath; -use metadata::METADATA_FILENAME; use rustc::session::config::{self, DebugInfo, OutputFilenames, OutputType, PrintRequest}; use rustc::session::config::{RUST_CGU_EXT, Lto, Sanitizer}; use rustc::session::filesearch; @@ -26,14 +18,12 @@ use rustc::session::Session; use rustc::middle::cstore::{NativeLibrary, NativeLibraryKind}; use rustc::middle::dependency_format::Linkage; use rustc_codegen_ssa::CodegenResults; -use rustc::util::common::time; +use rustc::util::common::{time, time_ext}; use rustc_fs_util::fix_windows_verbatim_for_gcc; use rustc::hir::def_id::CrateNum; use tempfile::{Builder as TempFileBuilder, TempDir}; use rustc_target::spec::{PanicStrategy, RelroLevel, LinkerFlavor}; use rustc_data_structures::fx::FxHashSet; -use context::get_reloc_model; -use llvm; use std::ascii; use std::char; @@ -52,7 +42,7 @@ pub use rustc_codegen_utils::link::{find_crate_name, filename_for_input, default out_filename, check_file_is_writeable}; -/// Perform the linkage portion of the compilation phase. This will generate all +/// Performs the linkage portion of the compilation phase. This will generate all /// of the requested outputs for this compilation session. pub(crate) fn link_binary(sess: &Session, codegen_results: &CodegenResults, @@ -457,6 +447,21 @@ fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLibrary]) { } } +fn get_file_path(sess: &Session, name: &str) -> PathBuf { + let fs = sess.target_filesearch(PathKind::Native); + let file_path = fs.get_lib_path().join(name); + if file_path.exists() { + return file_path + } + for search_path in fs.search_paths() { + let file_path = search_path.dir.join(name); + if file_path.exists() { + return file_path + } + } + PathBuf::from(name) +} + // Create a dynamic library or executable // // This will invoke the system linker/cc to create the resulting file. This @@ -472,7 +477,6 @@ fn link_natively(sess: &Session, // The invocations of cc share some flags across platforms let (pname, mut cmd) = get_linker(sess, &linker, flavor); - let root = sess.target_filesearch(PathKind::Native).get_lib_path(); if let Some(args) = sess.target.target.options.pre_link_args.get(&flavor) { cmd.args(args); } @@ -500,12 +504,12 @@ fn link_natively(sess: &Session, &sess.target.target.options.pre_link_objects_dll }; for obj in pre_link_objects { - cmd.arg(root.join(obj)); + cmd.arg(get_file_path(sess, obj)); } if crate_type == config::CrateType::Executable && sess.crt_static() { for obj in &sess.target.target.options.pre_link_objects_exe_crt { - cmd.arg(root.join(obj)); + cmd.arg(get_file_path(sess, obj)); } } @@ -519,7 +523,7 @@ fn link_natively(sess: &Session, } { - let target_cpu = ::llvm_util::target_cpu(sess); + let target_cpu = crate::llvm_util::target_cpu(sess); let mut linker = codegen_results.linker_info.to_linker(cmd, &sess, flavor, target_cpu); link_args(&mut *linker, flavor, sess, crate_type, tmpdir, out_filename, codegen_results); @@ -529,11 +533,11 @@ fn link_natively(sess: &Session, cmd.args(args); } for obj in &sess.target.target.options.post_link_objects { - cmd.arg(root.join(obj)); + cmd.arg(get_file_path(sess, obj)); } if sess.crt_static() { for obj in &sess.target.target.options.post_link_objects_crt { - cmd.arg(root.join(obj)); + cmd.arg(get_file_path(sess, obj)); } } if let Some(args) = sess.target.target.options.post_link_args.get(&flavor) { @@ -694,7 +698,6 @@ fn link_natively(sess: &Session, } if sess.opts.target_triple.triple() == "wasm32-unknown-unknown" { - wasm::rewrite_imports(&out_filename, &codegen_results.crate_info.wasm_imports); wasm::add_producer_section( &out_filename, &sess.edition().to_string(), @@ -804,7 +807,7 @@ fn exec_linker(sess: &Session, cmd: &mut Command, out_filename: &Path, tmpdir: & } impl<'a> fmt::Display for Escape<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if self.is_like_msvc { // This is "documented" at // https://msdn.microsoft.com/en-us/library/4xdcbak7.aspx @@ -853,7 +856,7 @@ fn link_args(cmd: &mut dyn Linker, codegen_results: &CodegenResults) { // Linker plugins should be specified early in the list of arguments - cmd.cross_lang_lto(); + cmd.linker_plugin_lto(); // The default library location, we need this to find the runtime. // The location of crates will be determined as needed. @@ -1316,7 +1319,7 @@ fn add_upstream_rust_crates(cmd: &mut dyn Linker, let name = cratepath.file_name().unwrap().to_str().unwrap(); let name = &name[3..name.len() - 5]; // chop off lib/.rlib - time(sess, &format!("altering {}.rlib", name), || { + time_ext(sess.time_extended(), Some(sess), &format!("altering {}.rlib", name), || { let cfg = archive_config(sess, &dst, Some(cratepath)); let mut archive = ArchiveBuilder::new(cfg); archive.update_symbols(); @@ -1393,10 +1396,6 @@ fn add_upstream_rust_crates(cmd: &mut dyn Linker, // Same thing as above, but for dynamic crates instead of static crates. fn add_dynamic_crate(cmd: &mut dyn Linker, sess: &Session, cratepath: &Path) { - // If we're performing LTO, then it should have been previously required - // that all upstream rust dependencies were available in an rlib format. - assert!(!are_upstream_rust_objects_already_included(sess)); - // Just need to tell the linker about where the library lives and // what its name is let parent = cratepath.parent(); @@ -1487,7 +1486,7 @@ fn are_upstream_rust_objects_already_included(sess: &Session) -> bool { Lto::Thin => { // If we defer LTO to the linker, we haven't run LTO ourselves, so // any upstream object files have not been copied yet. - !sess.opts.debugging_opts.cross_lang_lto.enabled() + !sess.opts.cg.linker_plugin_lto.enabled() } Lto::No | Lto::ThinLocal => false, diff --git a/src/librustc_codegen_llvm/back/lto.rs b/src/librustc_codegen_llvm/back/lto.rs index bddb45da10b00..84c652ff238af 100644 --- a/src/librustc_codegen_llvm/back/lto.rs +++ b/src/librustc_codegen_llvm/back/lto.rs @@ -1,37 +1,25 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use back::bytecode::{DecodedBytecode, RLIB_BYTECODE_EXTENSION}; +use crate::back::bytecode::{DecodedBytecode, RLIB_BYTECODE_EXTENSION}; +use crate::back::write::{self, DiagnosticHandlers, with_llvm_pmb, save_temp_bitcode, + to_llvm_opt_settings}; +use crate::llvm::archive_ro::ArchiveRO; +use crate::llvm::{self, True, False}; +use crate::{ModuleLlvm, LlvmCodegenBackend}; use rustc_codegen_ssa::back::symbol_export; -use rustc_codegen_ssa::back::write::{ModuleConfig, CodegenContext, pre_lto_bitcode_filename}; +use rustc_codegen_ssa::back::write::{ModuleConfig, CodegenContext, FatLTOInput}; use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinShared, ThinModule}; use rustc_codegen_ssa::traits::*; -use back::write::{self, DiagnosticHandlers, with_llvm_pmb, save_temp_bitcode, get_llvm_opt_level}; use errors::{FatalError, Handler}; -use llvm::archive_ro::ArchiveRO; -use llvm::{self, True, False}; use rustc::dep_graph::WorkProduct; use rustc::dep_graph::cgu_reuse_tracker::CguReuse; use rustc::hir::def_id::LOCAL_CRATE; use rustc::middle::exported_symbols::SymbolExportLevel; use rustc::session::config::{self, Lto}; use rustc::util::common::time_ext; +use rustc::util::profiling::ProfileCategory; use rustc_data_structures::fx::FxHashMap; -use time_graph::Timeline; -use {ModuleLlvm, LlvmCodegenBackend}; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; -use libc; - use std::ffi::{CStr, CString}; -use std::fs; use std::ptr; use std::slice; use std::sync::Arc; @@ -49,7 +37,6 @@ pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool { } fn prepare_lto(cgcx: &CodegenContext, - timeline: &mut Timeline, diag_handler: &Handler) -> Result<(Vec, Vec<(SerializedModule, CString)>), FatalError> { @@ -80,7 +67,8 @@ fn prepare_lto(cgcx: &CodegenContext, .iter() .filter_map(symbol_filter) .collect::>(); - timeline.record("whitelist"); + let _timer = cgcx.profile_activity(ProfileCategory::Codegen, + "generate_symbol_white_list_for_thinlto"); info!("{} symbols to preserve in this crate", symbol_white_list.len()); // If we're performing LTO for the entire crate graph, then for each of our @@ -109,6 +97,8 @@ fn prepare_lto(cgcx: &CodegenContext, } for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() { + let _timer = cgcx.profile_activity(ProfileCategory::Codegen, + format!("load: {}", path.display())); let exported_symbols = cgcx.exported_symbols .as_ref().expect("needs exported symbols for LTO"); symbol_white_list.extend( @@ -133,7 +123,6 @@ fn prepare_lto(cgcx: &CodegenContext, let bc = SerializedModule::FromRlib(bc); upstream_modules.push((bc, CString::new(id).unwrap())); } - timeline.record(&format!("load: {}", path.display())); } } @@ -143,16 +132,23 @@ fn prepare_lto(cgcx: &CodegenContext, /// Performs fat LTO by merging all modules into a single one and returning it /// for further optimization. pub(crate) fn run_fat(cgcx: &CodegenContext, - modules: Vec>, - timeline: &mut Timeline) + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>) -> Result, FatalError> { let diag_handler = cgcx.create_diag_handler(); - let (symbol_white_list, upstream_modules) = prepare_lto(cgcx, timeline, &diag_handler)?; + let (symbol_white_list, upstream_modules) = prepare_lto(cgcx, &diag_handler)?; let symbol_white_list = symbol_white_list.iter() .map(|c| c.as_ptr()) .collect::>(); - fat_lto(cgcx, &diag_handler, modules, upstream_modules, &symbol_white_list, timeline) + fat_lto( + cgcx, + &diag_handler, + modules, + cached_modules, + upstream_modules, + &symbol_white_list, + ) } /// Performs thin LTO by performing necessary global analysis and returning two @@ -160,16 +156,15 @@ pub(crate) fn run_fat(cgcx: &CodegenContext, /// can simply be copied over from the incr. comp. cache. pub(crate) fn run_thin(cgcx: &CodegenContext, modules: Vec<(String, ThinBuffer)>, - cached_modules: Vec<(SerializedModule, WorkProduct)>, - timeline: &mut Timeline) + cached_modules: Vec<(SerializedModule, WorkProduct)>) -> Result<(Vec>, Vec), FatalError> { let diag_handler = cgcx.create_diag_handler(); - let (symbol_white_list, upstream_modules) = prepare_lto(cgcx, timeline, &diag_handler)?; + let (symbol_white_list, upstream_modules) = prepare_lto(cgcx, &diag_handler)?; let symbol_white_list = symbol_white_list.iter() .map(|c| c.as_ptr()) .collect::>(); - if cgcx.opts.debugging_opts.cross_lang_lto.enabled() { + if cgcx.opts.cg.linker_plugin_lto.enabled() { unreachable!("We should never reach this case if the LTO step \ is deferred to the linker"); } @@ -178,41 +173,23 @@ pub(crate) fn run_thin(cgcx: &CodegenContext, modules, upstream_modules, cached_modules, - &symbol_white_list, - timeline) + &symbol_white_list) } pub(crate) fn prepare_thin( - cgcx: &CodegenContext, module: ModuleCodegen ) -> (String, ThinBuffer) { let name = module.name.clone(); let buffer = ThinBuffer::new(module.module_llvm.llmod()); - - // We emit the module after having serialized it into a ThinBuffer - // because only then it will contain the ThinLTO module summary. - if let Some(ref incr_comp_session_dir) = cgcx.incr_comp_session_dir { - if cgcx.config(module.kind).emit_pre_thin_lto_bc { - let path = incr_comp_session_dir - .join(pre_lto_bitcode_filename(&name)); - - fs::write(&path, buffer.data()).unwrap_or_else(|e| { - panic!("Error writing pre-lto-bitcode file `{}`: {}", - path.display(), - e); - }); - } - } - (name, buffer) } fn fat_lto(cgcx: &CodegenContext, diag_handler: &Handler, - mut modules: Vec>, + mut modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, mut serialized_modules: Vec<(SerializedModule, CString)>, - symbol_white_list: &[*const libc::c_char], - timeline: &mut Timeline) + symbol_white_list: &[*const libc::c_char]) -> Result, FatalError> { info!("going for a fat lto"); @@ -226,8 +203,14 @@ fn fat_lto(cgcx: &CodegenContext, // file copy operations in the backend work correctly. The only other kind // of module here should be an allocator one, and if your crate is smaller // than the allocator module then the size doesn't really matter anyway. - let (_, costliest_module) = modules.iter() + let costliest_module = modules.iter() .enumerate() + .filter_map(|(i, module)| { + match module { + FatLTOInput::InMemory(m) => Some((i, m)), + FatLTOInput::Serialized { .. } => None, + } + }) .filter(|&(_, module)| module.kind == ModuleKind::Regular) .map(|(i, module)| { let cost = unsafe { @@ -235,9 +218,38 @@ fn fat_lto(cgcx: &CodegenContext, }; (cost, i) }) - .max() - .expect("must be codegen'ing at least one module"); - let module = modules.remove(costliest_module); + .max(); + + // If we found a costliest module, we're good to go. Otherwise all our + // inputs were serialized which could happen in the case, for example, that + // all our inputs were incrementally reread from the cache and we're just + // re-executing the LTO passes. If that's the case deserialize the first + // module and create a linker with it. + let module: ModuleCodegen = match costliest_module { + Some((_cost, i)) => { + match modules.remove(i) { + FatLTOInput::InMemory(m) => m, + FatLTOInput::Serialized { .. } => unreachable!(), + } + } + None => { + let pos = modules.iter().position(|m| { + match m { + FatLTOInput::InMemory(_) => false, + FatLTOInput::Serialized { .. } => true, + } + }).expect("must have at least one serialized module"); + let (name, buffer) = match modules.remove(pos) { + FatLTOInput::Serialized { name, buffer } => (name, buffer), + FatLTOInput::InMemory(_) => unreachable!(), + }; + ModuleCodegen { + module_llvm: ModuleLlvm::parse(cgcx, &name, &buffer, diag_handler)?, + name, + kind: ModuleKind::Regular, + } + } + }; let mut serialized_bitcode = Vec::new(); { let (llcx, llmod) = { @@ -257,10 +269,20 @@ fn fat_lto(cgcx: &CodegenContext, // way we know of to do that is to serialize them to a string and them parse // them later. Not great but hey, that's why it's "fat" LTO, right? serialized_modules.extend(modules.into_iter().map(|module| { - let buffer = ModuleBuffer::new(module.module_llvm.llmod()); - let llmod_id = CString::new(&module.name[..]).unwrap(); - - (SerializedModule::Local(buffer), llmod_id) + match module { + FatLTOInput::InMemory(module) => { + let buffer = ModuleBuffer::new(module.module_llvm.llmod()); + let llmod_id = CString::new(&module.name[..]).unwrap(); + (SerializedModule::Local(buffer), llmod_id) + } + FatLTOInput::Serialized { name, buffer } => { + let llmod_id = CString::new(name).unwrap(); + (SerializedModule::Local(buffer), llmod_id) + } + } + })); + serialized_modules.extend(cached_modules.into_iter().map(|(buffer, wp)| { + (buffer, CString::new(wp.cgu_name.clone()).unwrap()) })); // For all serialized bitcode files we parse them and link them in as we did @@ -277,7 +299,6 @@ fn fat_lto(cgcx: &CodegenContext, write::llvm_err(&diag_handler, &msg) }) })?; - timeline.record(&format!("link {:?}", name)); serialized_bitcode.push(bc_decoded); } drop(linker); @@ -299,7 +320,6 @@ fn fat_lto(cgcx: &CodegenContext, } save_temp_bitcode(&cgcx, &module, "lto.after-nounwind"); } - timeline.record("passes"); } Ok(LtoModuleCodegen::Fat { @@ -369,8 +389,7 @@ fn thin_lto(cgcx: &CodegenContext, modules: Vec<(String, ThinBuffer)>, serialized_modules: Vec<(SerializedModule, CString)>, cached_modules: Vec<(SerializedModule, WorkProduct)>, - symbol_white_list: &[*const libc::c_char], - timeline: &mut Timeline) + symbol_white_list: &[*const libc::c_char]) -> Result<(Vec>, Vec), FatalError> { unsafe { @@ -396,7 +415,6 @@ fn thin_lto(cgcx: &CodegenContext, }); thin_buffers.push(buffer); module_names.push(cname); - timeline.record(&name); } // FIXME: All upstream crates are deserialized internally in the @@ -449,7 +467,6 @@ fn thin_lto(cgcx: &CodegenContext, })?; info!("thin LTO data created"); - timeline.record("data"); let import_map = if cgcx.incr_comp_session_dir.is_some() { ThinLTOImports::from_thin_lto_data(data) @@ -460,7 +477,6 @@ fn thin_lto(cgcx: &CodegenContext, ThinLTOImports::default() }; info!("thin LTO import map loaded"); - timeline.record("import-map-loaded"); let data = ThinData(data); @@ -542,7 +558,7 @@ pub(crate) fn run_pass_manager(cgcx: &CodegenContext, // Note that in general this shouldn't matter too much as you typically // only turn on ThinLTO when you're compiling with optimizations // otherwise. - let opt_level = config.opt_level.map(get_llvm_opt_level) + let opt_level = config.opt_level.map(|x| to_llvm_opt_settings(x).0) .unwrap_or(llvm::CodeGenOptLevel::None); let opt_level = match opt_level { llvm::CodeGenOptLevel::None => llvm::CodeGenOptLevel::Less, @@ -589,6 +605,16 @@ impl ModuleBuffer { llvm::LLVMRustModuleBufferCreate(m) }) } + + pub fn parse<'a>( + &self, + name: &str, + cx: &'a llvm::Context, + handler: &Handler, + ) -> Result<&'a llvm::Module, FatalError> { + let name = CString::new(name).unwrap(); + parse_module(cx, &name, self.data(), handler) + } } impl ModuleBufferMethods for ModuleBuffer { @@ -655,7 +681,6 @@ impl Drop for ThinBuffer { pub unsafe fn optimize_thin_module( thin_module: &mut ThinModule, cgcx: &CodegenContext, - timeline: &mut Timeline ) -> Result, FatalError> { let diag_handler = cgcx.create_diag_handler(); let tm = (cgcx.tm_factory.0)().map_err(|e| { @@ -668,15 +693,12 @@ pub unsafe fn optimize_thin_module( // crates but for locally codegened modules we may be able to reuse // that LLVM Context and Module. let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); - let llmod_raw = llvm::LLVMRustParseBitcodeForThinLTO( + let llmod_raw = parse_module( llcx, - thin_module.data().as_ptr(), - thin_module.data().len(), - thin_module.shared.module_names[thin_module.idx].as_ptr(), - ).ok_or_else(|| { - let msg = "failed to parse bitcode for thin LTO module"; - write::llvm_err(&diag_handler, msg) - })? as *const _; + &thin_module.shared.module_names[thin_module.idx], + thin_module.data(), + &diag_handler, + )? as *const _; let module = ModuleCodegen { module_llvm: ModuleLlvm { llmod_raw, @@ -705,9 +727,10 @@ pub unsafe fn optimize_thin_module( // Like with "fat" LTO, get some better optimizations if landing pads // are disabled by removing all landing pads. if cgcx.no_landing_pads { + let _timer = cgcx.profile_activity(ProfileCategory::Codegen, + "LLVM_remove_landing_pads"); llvm::LLVMRustMarkAllFunctionsNounwind(llmod); save_temp_bitcode(&cgcx, &module, "thin-lto-after-nounwind"); - timeline.record("nounwind"); } // Up next comes the per-module local analyses that we do for Thin LTO. @@ -723,25 +746,21 @@ pub unsafe fn optimize_thin_module( return Err(write::llvm_err(&diag_handler, msg)) } save_temp_bitcode(cgcx, &module, "thin-lto-after-rename"); - timeline.record("rename"); if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) { let msg = "failed to prepare thin LTO module"; return Err(write::llvm_err(&diag_handler, msg)) } save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve"); - timeline.record("resolve"); if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) { let msg = "failed to prepare thin LTO module"; return Err(write::llvm_err(&diag_handler, msg)) } save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize"); - timeline.record("internalize"); if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod) { let msg = "failed to prepare thin LTO module"; return Err(write::llvm_err(&diag_handler, msg)) } save_temp_bitcode(cgcx, &module, "thin-lto-after-import"); - timeline.record("import"); // Ok now this is a bit unfortunate. This is also something you won't // find upstream in LLVM's ThinLTO passes! This is a hack for now to @@ -774,7 +793,6 @@ pub unsafe fn optimize_thin_module( // fixed in LLVM. llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1); save_temp_bitcode(cgcx, &module, "thin-lto-after-patch"); - timeline.record("patch"); // Alright now that we've done everything related to the ThinLTO // analysis it's time to run some optimizations! Here we use the same @@ -785,7 +803,6 @@ pub unsafe fn optimize_thin_module( let config = cgcx.config(module.kind); run_pass_manager(cgcx, &module, config, true); save_temp_bitcode(cgcx, &module, "thin-lto-after-pm"); - timeline.record("thin-done"); } Ok(module) } @@ -801,7 +818,7 @@ impl ThinLTOImports { self.imports.get(llvm_module_name).map(|v| &v[..]).unwrap_or(&[]) } - /// Load the ThinLTO import map from ThinLTOData. + /// Loads the ThinLTO import map from ThinLTOData. unsafe fn from_thin_lto_data(data: *const llvm::ThinLTOData) -> ThinLTOImports { unsafe extern "C" fn imported_module_callback(payload: *mut libc::c_void, importing_module_name: *const libc::c_char, @@ -833,3 +850,22 @@ fn module_name_to_str(c_str: &CStr) -> &str { c_str.to_str().unwrap_or_else(|e| bug!("Encountered non-utf8 LLVM module name `{}`: {}", c_str.to_string_lossy(), e)) } + +fn parse_module<'a>( + cx: &'a llvm::Context, + name: &CStr, + data: &[u8], + diag_handler: &Handler, +) -> Result<&'a llvm::Module, FatalError> { + unsafe { + llvm::LLVMRustParseBitcodeForLTO( + cx, + data.as_ptr(), + data.len(), + name.as_ptr(), + ).ok_or_else(|| { + let msg = "failed to parse bitcode for LTO module"; + write::llvm_err(&diag_handler, msg) + }) + } +} diff --git a/src/librustc_codegen_llvm/back/rpath.rs b/src/librustc_codegen_llvm/back/rpath.rs index 73a7366d0a393..2b7abcb52bef8 100644 --- a/src/librustc_codegen_llvm/back/rpath.rs +++ b/src/librustc_codegen_llvm/back/rpath.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc_data_structures::fx::FxHashSet; use std::env; use std::path::{Path, PathBuf}; @@ -25,7 +15,7 @@ pub struct RPathConfig<'a> { pub get_install_prefix_lib_path: &'a mut dyn FnMut() -> PathBuf, } -pub fn get_rpath_flags(config: &mut RPathConfig) -> Vec { +pub fn get_rpath_flags(config: &mut RPathConfig<'_>) -> Vec { // No rpath on windows if !config.has_rpath { return Vec::new(); @@ -62,7 +52,7 @@ fn rpaths_to_flags(rpaths: &[String]) -> Vec { ret } -fn get_rpaths(config: &mut RPathConfig, libs: &[PathBuf]) -> Vec { +fn get_rpaths(config: &mut RPathConfig<'_>, libs: &[PathBuf]) -> Vec { debug!("output: {:?}", config.out_filename.display()); debug!("libs:"); for libpath in libs { @@ -96,12 +86,12 @@ fn get_rpaths(config: &mut RPathConfig, libs: &[PathBuf]) -> Vec { rpaths } -fn get_rpaths_relative_to_output(config: &mut RPathConfig, +fn get_rpaths_relative_to_output(config: &mut RPathConfig<'_>, libs: &[PathBuf]) -> Vec { libs.iter().map(|a| get_rpath_relative_to_output(config, a)).collect() } -fn get_rpath_relative_to_output(config: &mut RPathConfig, lib: &Path) -> String { +fn get_rpath_relative_to_output(config: &mut RPathConfig<'_>, lib: &Path) -> String { // Mac doesn't appear to support $ORIGIN let prefix = if config.is_like_osx { "@loader_path" @@ -111,9 +101,9 @@ fn get_rpath_relative_to_output(config: &mut RPathConfig, lib: &Path) -> String let cwd = env::current_dir().unwrap(); let mut lib = fs::canonicalize(&cwd.join(lib)).unwrap_or_else(|_| cwd.join(lib)); - lib.pop(); + lib.pop(); // strip filename let mut output = cwd.join(&config.out_filename); - output.pop(); + output.pop(); // strip filename let output = fs::canonicalize(&output).unwrap_or(output); let relative = path_relative_from(&lib, &output).unwrap_or_else(|| panic!("couldn't create relative path from {:?} to {:?}", output, lib)); @@ -137,7 +127,7 @@ fn path_relative_from(path: &Path, base: &Path) -> Option { } else { let mut ita = path.components(); let mut itb = base.components(); - let mut comps: Vec = vec![]; + let mut comps: Vec> = vec![]; loop { match (ita.next(), itb.next()) { (None, None) => break, @@ -164,7 +154,7 @@ fn path_relative_from(path: &Path, base: &Path) -> Option { } -fn get_install_prefix_rpath(config: &mut RPathConfig) -> String { +fn get_install_prefix_rpath(config: &mut RPathConfig<'_>) -> String { let path = (config.get_install_prefix_lib_path)(); let path = env::current_dir().unwrap().join(&path); // FIXME (#9639): This needs to handle non-utf8 paths diff --git a/src/librustc_codegen_llvm/back/wasm.rs b/src/librustc_codegen_llvm/back/wasm.rs index 1a5c65f3c4397..f90bb89fbe87d 100644 --- a/src/librustc_codegen_llvm/back/wasm.rs +++ b/src/librustc_codegen_llvm/back/wasm.rs @@ -1,128 +1,13 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::fs; use std::path::Path; use std::str; -use rustc_data_structures::fx::FxHashMap; use serialize::leb128; // https://webassembly.github.io/spec/core/binary/modules.html#binary-importsec -const WASM_IMPORT_SECTION_ID: u8 = 2; const WASM_CUSTOM_SECTION_ID: u8 = 0; -const WASM_EXTERNAL_KIND_FUNCTION: u8 = 0; -const WASM_EXTERNAL_KIND_TABLE: u8 = 1; -const WASM_EXTERNAL_KIND_MEMORY: u8 = 2; -const WASM_EXTERNAL_KIND_GLOBAL: u8 = 3; - -/// Rewrite the module imports are listed from in a wasm module given the field -/// name to module name mapping in `import_map`. -/// -/// LLVM 6 which we're using right now doesn't have the ability to configure the -/// module a wasm symbol is import from. Rather all imported symbols come from -/// the bland `"env"` module unconditionally. Furthermore we'd *also* need -/// support in LLD for preserving these import modules, which it unfortunately -/// currently does not. -/// -/// This function is intended as a hack for now where we manually rewrite the -/// wasm output by LLVM to have the correct import modules listed. The -/// `#[link(wasm_import_module = "...")]` attribute in Rust translates to the -/// module that each symbol is imported from, so here we manually go through the -/// wasm file, decode it, rewrite imports, and then rewrite the wasm module. -/// -/// Support for this was added to LLVM in -/// https://github.com/llvm-mirror/llvm/commit/0f32e1365, although support still -/// needs to be added, tracked at https://bugs.llvm.org/show_bug.cgi?id=37168 -pub fn rewrite_imports(path: &Path, import_map: &FxHashMap) { - if import_map.is_empty() { - return - } - - let wasm = fs::read(path).expect("failed to read wasm output"); - let mut ret = WasmEncoder::new(); - ret.data.extend(&wasm[..8]); - - // skip the 8 byte wasm/version header - for (id, raw) in WasmSections(WasmDecoder::new(&wasm[8..])) { - ret.byte(id); - if id == WASM_IMPORT_SECTION_ID { - info!("rewriting import section"); - let data = rewrite_import_section( - &mut WasmDecoder::new(raw), - import_map, - ); - ret.bytes(&data); - } else { - info!("carry forward section {}, {} bytes long", id, raw.len()); - ret.bytes(raw); - } - } - - fs::write(path, &ret.data).expect("failed to write wasm output"); - - fn rewrite_import_section( - wasm: &mut WasmDecoder, - import_map: &FxHashMap, - ) - -> Vec - { - let mut dst = WasmEncoder::new(); - let n = wasm.u32(); - dst.u32(n); - info!("rewriting {} imports", n); - for _ in 0..n { - rewrite_import_entry(wasm, &mut dst, import_map); - } - return dst.data - } - - fn rewrite_import_entry(wasm: &mut WasmDecoder, - dst: &mut WasmEncoder, - import_map: &FxHashMap) { - // More info about the binary format here is available at: - // https://webassembly.github.io/spec/core/binary/modules.html#import-section - // - // Note that you can also find the whole point of existence of this - // function here, where we map the `module` name to a different one if - // we've got one listed. - let module = wasm.str(); - let field = wasm.str(); - let new_module = if module == "env" { - import_map.get(field).map(|s| &**s).unwrap_or(module) - } else { - module - }; - info!("import rewrite ({} => {}) / {}", module, new_module, field); - dst.str(new_module); - dst.str(field); - let kind = wasm.byte(); - dst.byte(kind); - match kind { - WASM_EXTERNAL_KIND_FUNCTION => dst.u32(wasm.u32()), - WASM_EXTERNAL_KIND_TABLE => { - dst.byte(wasm.byte()); // element_type - dst.limits(wasm.limits()); - } - WASM_EXTERNAL_KIND_MEMORY => dst.limits(wasm.limits()), - WASM_EXTERNAL_KIND_GLOBAL => { - dst.byte(wasm.byte()); // content_type - dst.bool(wasm.bool()); // mutable - } - b => panic!("unknown kind: {}", b), - } - } -} - -/// Add or augment the existing `producers` section to encode information about +/// Adds or augment the existing `producers` section to encode information about /// the Rust compiler used to produce the wasm file. pub fn add_producer_section( path: &Path, @@ -276,15 +161,6 @@ impl<'a> WasmDecoder<'a> { let len = self.u32(); str::from_utf8(self.skip(len as usize)).unwrap() } - - fn bool(&mut self) -> bool { - self.byte() == 1 - } - - fn limits(&mut self) -> (u32, Option) { - let has_max = self.bool(); - (self.u32(), if has_max { Some(self.u32()) } else { None }) - } } struct WasmEncoder { @@ -312,16 +188,4 @@ impl WasmEncoder { fn str(&mut self, val: &str) { self.bytes(val.as_bytes()) } - - fn bool(&mut self, b: bool) { - self.byte(b as u8); - } - - fn limits(&mut self, limits: (u32, Option)) { - self.bool(limits.1.is_some()); - self.u32(limits.0); - if let Some(c) = limits.1 { - self.u32(c); - } - } } diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index 78a3b6907a63d..f0ed201ad5c27 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -1,36 +1,27 @@ -// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use attributes; -use back::bytecode::{self, RLIB_BYTECODE_EXTENSION}; -use back::lto::ThinBuffer; +use crate::attributes; +use crate::back::bytecode::{self, RLIB_BYTECODE_EXTENSION}; +use crate::back::lto::ThinBuffer; +use crate::base; +use crate::consts; +use crate::llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic}; +use crate::llvm_util; +use crate::ModuleLlvm; +use crate::type_::Type; +use crate::context::{is_pie_binary, get_reloc_model}; +use crate::common; +use crate::LlvmCodegenBackend; +use rustc::hir::def_id::LOCAL_CRATE; use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig, run_assembler}; use rustc_codegen_ssa::traits::*; -use base; -use consts; use rustc::session::config::{self, OutputType, Passes, Lto}; use rustc::session::Session; -use time_graph::Timeline; -use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic}; -use llvm_util; -use ModuleLlvm; +use rustc::ty::TyCtxt; use rustc_codegen_ssa::{ModuleCodegen, CompiledModule}; use rustc::util::common::time_ext; +use rustc::util::profiling::ProfileCategory; use rustc_fs_util::{path_to_c_string, link_or_copy}; use rustc_data_structures::small_c_str::SmallCStr; -use errors::{self, Handler, FatalError}; -use type_::Type; -use context::{is_pie_binary, get_reloc_model}; -use common; -use LlvmCodegenBackend; -use rustc_demangle; +use errors::{Handler, FatalError}; use std::ffi::{CString, CStr}; use std::fs; @@ -82,51 +73,54 @@ pub fn write_output_file( unsafe { let output_c = path_to_c_string(output); let result = llvm::LLVMRustWriteOutputFile(target, pm, m, output_c.as_ptr(), file_type); - if result.into_result().is_err() { + result.into_result().map_err(|()| { let msg = format!("could not write output to {}", output.display()); - Err(llvm_err(handler, &msg)) - } else { - Ok(()) - } - } -} - -pub(crate) fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel { - match optimize { - config::OptLevel::No => llvm::CodeGenOptLevel::None, - config::OptLevel::Less => llvm::CodeGenOptLevel::Less, - config::OptLevel::Default => llvm::CodeGenOptLevel::Default, - config::OptLevel::Aggressive => llvm::CodeGenOptLevel::Aggressive, - _ => llvm::CodeGenOptLevel::Default, + llvm_err(handler, &msg) + }) } } -pub(crate) fn get_llvm_opt_size(optimize: config::OptLevel) -> llvm::CodeGenOptSize { - match optimize { - config::OptLevel::Size => llvm::CodeGenOptSizeDefault, - config::OptLevel::SizeMin => llvm::CodeGenOptSizeAggressive, - _ => llvm::CodeGenOptSizeNone, - } +pub fn create_informational_target_machine( + sess: &Session, + find_features: bool, +) -> &'static mut llvm::TargetMachine { + target_machine_factory(sess, config::OptLevel::No, find_features)().unwrap_or_else(|err| { + llvm_err(sess.diagnostic(), &err).raise() + }) } pub fn create_target_machine( - sess: &Session, + tcx: TyCtxt<'_, '_, '_>, find_features: bool, ) -> &'static mut llvm::TargetMachine { - target_machine_factory(sess, find_features)().unwrap_or_else(|err| { - llvm_err(sess.diagnostic(), &err).raise() + target_machine_factory(&tcx.sess, tcx.backend_optimization_level(LOCAL_CRATE), find_features)() + .unwrap_or_else(|err| { + llvm_err(tcx.sess.diagnostic(), &err).raise() }) } +pub fn to_llvm_opt_settings(cfg: config::OptLevel) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) +{ + use self::config::OptLevel::*; + match cfg { + No => (llvm::CodeGenOptLevel::None, llvm::CodeGenOptSizeNone), + Less => (llvm::CodeGenOptLevel::Less, llvm::CodeGenOptSizeNone), + Default => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeNone), + Aggressive => (llvm::CodeGenOptLevel::Aggressive, llvm::CodeGenOptSizeNone), + Size => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeDefault), + SizeMin => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeAggressive), + } +} + // If find_features is true this won't access `sess.crate_types` by assuming // that `is_pie_binary` is false. When we discover LLVM target features // `sess.crate_types` is uninitialized so we cannot access it. -pub fn target_machine_factory(sess: &Session, find_features: bool) +pub fn target_machine_factory(sess: &Session, optlvl: config::OptLevel, find_features: bool) -> Arc Result<&'static mut llvm::TargetMachine, String> + Send + Sync> { let reloc_model = get_reloc_model(sess); - let opt_level = get_llvm_opt_level(sess.opts.optimize); + let (opt_level, _) = to_llvm_opt_settings(optlvl); let use_softfp = sess.opts.cg.soft_float; let ffunction_sections = sess.target.target.options.function_sections; @@ -310,8 +304,7 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void pub(crate) unsafe fn optimize(cgcx: &CodegenContext, diag_handler: &Handler, module: &ModuleCodegen, - config: &ModuleConfig, - timeline: &mut Timeline) + config: &ModuleConfig) -> Result<(), FatalError> { let llmod = module.module_llvm.llmod(); @@ -367,10 +360,10 @@ pub(crate) unsafe fn optimize(cgcx: &CodegenContext, if !config.no_prepopulate_passes { llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod); llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod); - let opt_level = config.opt_level.map(get_llvm_opt_level) + let opt_level = config.opt_level.map(|x| to_llvm_opt_settings(x).0) .unwrap_or(llvm::CodeGenOptLevel::None); let prepare_for_thin_lto = cgcx.lto == Lto::Thin || cgcx.lto == Lto::ThinLocal || - (cgcx.lto != Lto::Fat && cgcx.opts.debugging_opts.cross_lang_lto.enabled()); + (cgcx.lto != Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled()); with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| { llvm::LLVMPassManagerBuilderPopulateFunctionPassManager(b, fpm); llvm::LLVMPassManagerBuilderPopulateModulePassManager(b, mpm); @@ -420,19 +413,24 @@ pub(crate) unsafe fn optimize(cgcx: &CodegenContext, diag_handler.abort_if_errors(); // Finally, run the actual optimization passes - time_ext(config.time_passes, - None, - &format!("llvm function passes [{}]", module_name.unwrap()), - || { - llvm::LLVMRustRunFunctionPassManager(fpm, llmod) - }); - timeline.record("fpm"); - time_ext(config.time_passes, - None, - &format!("llvm module passes [{}]", module_name.unwrap()), - || { - llvm::LLVMRunPassManager(mpm, llmod) - }); + { + let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_function_passes"); + time_ext(config.time_passes, + None, + &format!("llvm function passes [{}]", module_name.unwrap()), + || { + llvm::LLVMRustRunFunctionPassManager(fpm, llmod) + }); + } + { + let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_module_passes"); + time_ext(config.time_passes, + None, + &format!("llvm module passes [{}]", module_name.unwrap()), + || { + llvm::LLVMRunPassManager(mpm, llmod) + }); + } // Deallocate managers that we're now done with llvm::LLVMDisposePassManager(fpm); @@ -444,11 +442,10 @@ pub(crate) unsafe fn optimize(cgcx: &CodegenContext, pub(crate) unsafe fn codegen(cgcx: &CodegenContext, diag_handler: &Handler, module: ModuleCodegen, - config: &ModuleConfig, - timeline: &mut Timeline) + config: &ModuleConfig) -> Result { - timeline.record("codegen"); + let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "codegen"); { let llmod = module.module_llvm.llmod(); let llcx = &*module.module_llvm.llcx; @@ -499,29 +496,32 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext, if write_bc || config.emit_bc_compressed || config.embed_bitcode { + let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_make_bitcode"); let thin = ThinBuffer::new(llmod); let data = thin.data(); - timeline.record("make-bc"); if write_bc { + let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_emit_bitcode"); if let Err(e) = fs::write(&bc_out, data) { - diag_handler.err(&format!("failed to write bytecode: {}", e)); + let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e); + diag_handler.err(&msg); } - timeline.record("write-bc"); } if config.embed_bitcode { + let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_embed_bitcode"); embed_bitcode(cgcx, llcx, llmod, Some(data)); - timeline.record("embed-bc"); } if config.emit_bc_compressed { + let _timer = cgcx.profile_activity(ProfileCategory::Codegen, + "LLVM_compress_bitcode"); let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION); let data = bytecode::encode(&module.name, data); if let Err(e) = fs::write(&dst, data) { - diag_handler.err(&format!("failed to write bytecode: {}", e)); + let msg = format!("failed to write bytecode to {}: {}", dst.display(), e); + diag_handler.err(&msg); } - timeline.record("compress-bc"); } } else if config.embed_bitcode_marker { embed_bitcode(cgcx, llcx, llmod, None); @@ -530,8 +530,9 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext, time_ext(config.time_passes, None, &format!("codegen passes [{}]", module_name.unwrap()), || -> Result<(), FatalError> { if config.emit_ir { + let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_emit_ir"); let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name); - let out = path_to_c_string(&out); + let out_c = path_to_c_string(&out); extern "C" fn demangle_callback(input_ptr: *const c_char, input_len: size_t, @@ -565,13 +566,18 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext, } with_codegen(tm, llmod, config.no_builtins, |cpm| { - llvm::LLVMRustPrintModule(cpm, llmod, out.as_ptr(), demangle_callback); + let result = + llvm::LLVMRustPrintModule(cpm, llmod, out_c.as_ptr(), demangle_callback); llvm::LLVMDisposePassManager(cpm); - }); - timeline.record("ir"); + result.into_result().map_err(|()| { + let msg = format!("failed to write LLVM IR to {}", out.display()); + llvm_err(diag_handler, &msg) + }) + })?; } if config.emit_asm || asm_to_obj { + let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_emit_asm"); let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name); // We can't use the same module for asm and binary output, because that triggers @@ -586,19 +592,18 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext, write_output_file(diag_handler, tm, cpm, llmod, &path, llvm::FileType::AssemblyFile) })?; - timeline.record("asm"); } if write_obj { + let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_emit_obj"); with_codegen(tm, llmod, config.no_builtins, |cpm| { write_output_file(diag_handler, tm, cpm, llmod, &obj_out, llvm::FileType::ObjectFile) })?; - timeline.record("obj"); } else if asm_to_obj { + let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_asm_to_obj"); let assembly = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name); run_assembler(cgcx, diag_handler, &assembly, &obj_out); - timeline.record("asm_to_obj"); if !config.emit_asm && !cgcx.save_temps { drop(fs::remove_file(&assembly)); @@ -699,7 +704,8 @@ pub unsafe fn with_llvm_pmb(llmod: &llvm::Module, // reasonable defaults and prepare it to actually populate the pass // manager. let builder = llvm::LLVMPassManagerBuilderCreate(); - let opt_size = config.opt_size.map(get_llvm_opt_size).unwrap_or(llvm::CodeGenOptSizeNone); + let opt_size = config.opt_size.map(|x| to_llvm_opt_settings(x).1) + .unwrap_or(llvm::CodeGenOptSizeNone); let inline_threshold = config.inline_threshold; let pgo_gen_path = config.pgo_gen.as_ref().map(|s| { diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 904e5d74f8ee9..7ea5e91230905 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Codegen the completed AST to the LLVM IR. //! //! Some functions here, such as codegen_block and codegen_expr, return a value -- @@ -17,28 +7,28 @@ //! //! Hopefully useful general knowledge about codegen: //! -//! * There's no way to find out the Ty type of a Value. Doing so -//! would be "trying to get the eggs out of an omelette" (credit: -//! pcwalton). You can, instead, find out its llvm::Type by calling val_ty, -//! but one llvm::Type corresponds to many `Ty`s; for instance, tup(int, int, -//! int) and rec(x=int, y=int, z=int) will have the same llvm::Type. +//! * There's no way to find out the `Ty` type of a Value. Doing so +//! would be "trying to get the eggs out of an omelette" (credit: +//! pcwalton). You can, instead, find out its `llvm::Type` by calling `val_ty`, +//! but one `llvm::Type` corresponds to many `Ty`s; for instance, `tup(int, int, +//! int)` and `rec(x=int, y=int, z=int)` will have the same `llvm::Type`. -use super::ModuleLlvm; +use super::{LlvmCodegenBackend, ModuleLlvm}; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; use rustc_codegen_ssa::base::maybe_create_entry_wrapper; -use super::LlvmCodegenBackend; -use llvm; -use metadata; +use crate::llvm; +use crate::metadata; +use crate::builder::Builder; +use crate::common; +use crate::context::CodegenCx; +use crate::monomorphize::partitioning::CodegenUnitExt; +use rustc::dep_graph; use rustc::mir::mono::{Linkage, Visibility, Stats}; use rustc::middle::cstore::{EncodedMetadata}; use rustc::ty::TyCtxt; use rustc::middle::exported_symbols; use rustc::session::config::{self, DebugInfo}; -use builder::Builder; -use common; -use context::CodegenCx; -use monomorphize::partitioning::CodegenUnitExt; use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_data_structures::small_c_str::SmallCStr; @@ -50,12 +40,12 @@ use std::time::Instant; use syntax_pos::symbol::InternedString; use rustc::hir::CodegenFnAttrs; -use value::Value; +use crate::value::Value; pub fn write_metadata<'a, 'gcx>( tcx: TyCtxt<'a, 'gcx, 'gcx>, - llvm_module: &ModuleLlvm + llvm_module: &mut ModuleLlvm ) -> EncodedMetadata { use std::io::Write; use flate2::Compression; @@ -146,7 +136,7 @@ pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> { } } -pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>, +pub fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, cgu_name: InternedString) -> Stats { let start_time = Instant::now(); @@ -155,7 +145,8 @@ pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>, let ((stats, module), _) = tcx.dep_graph.with_task(dep_node, tcx, cgu_name, - module_codegen); + module_codegen, + dep_graph::hash_result); let time_to_codegen = start_time.elapsed(); // We assume that the cost to run LLVM on a CGU is proportional to @@ -171,26 +162,25 @@ pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>, cgu_name: InternedString) -> (Stats, ModuleCodegen) { - let backend = LlvmCodegenBackend(()); let cgu = tcx.codegen_unit(cgu_name); // Instantiate monomorphizations without filling out definitions yet... - let llvm_module = backend.new_metadata(tcx.sess, &cgu_name.as_str()); + let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str()); let stats = { let cx = CodegenCx::new(tcx, cgu, &llvm_module); let mono_items = cx.codegen_unit .items_in_deterministic_order(cx.tcx); for &(mono_item, (linkage, visibility)) in &mono_items { - mono_item.predefine::(&cx, linkage, visibility); + mono_item.predefine::>(&cx, linkage, visibility); } // ... and now that we have everything pre-defined, fill out those definitions. for &(mono_item, _) in &mono_items { - mono_item.define::(&cx); + mono_item.define::>(&cx); } // If this codegen unit contains the main function, also create the // wrapper here - maybe_create_entry_wrapper::(&cx); + maybe_create_entry_wrapper::>(&cx); // Run replace-all-uses-with for statics that need it for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() { diff --git a/src/librustc_codegen_llvm/build.rs b/src/librustc_codegen_llvm/build.rs index 97accbb4b8fe6..d1fc624c68927 100644 --- a/src/librustc_codegen_llvm/build.rs +++ b/src/librustc_codegen_llvm/build.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - fn main() { println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rerun-if-env-changed=CFG_VERSION"); diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 01b1387d9cc2f..123fda1e215ff 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -1,22 +1,13 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; -use llvm::{self, False, BasicBlock}; +use crate::llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope}; +use crate::llvm::{self, False, BasicBlock}; +use crate::common::Funclet; +use crate::context::CodegenCx; +use crate::type_::Type; +use crate::type_of::LayoutLlvmExt; +use crate::value::Value; +use syntax::symbol::LocalInternedString; use rustc_codegen_ssa::common::{IntPredicate, TypeKind, RealPredicate}; -use rustc_codegen_ssa::{self, MemFlags}; -use common::Funclet; -use context::CodegenCx; -use type_::Type; -use type_of::LayoutLlvmExt; -use value::Value; +use rustc_codegen_ssa::MemFlags; use libc::{c_uint, c_char}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::{self, Align, Size, TyLayout}; @@ -24,14 +15,13 @@ use rustc::hir::def_id::DefId; use rustc::session::config; use rustc_data_structures::small_c_str::SmallCStr; use rustc_codegen_ssa::traits::*; -use syntax; use rustc_codegen_ssa::base::to_immediate; use rustc_codegen_ssa::mir::operand::{OperandValue, OperandRef}; use rustc_codegen_ssa::mir::place::PlaceRef; use std::borrow::Cow; -use std::ffi::CStr; use std::ops::{Deref, Range}; use std::ptr; +use std::iter::TrustedLen; // All Builders must have an llfn associated with them #[must_use] @@ -131,50 +121,18 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { Builder::new_block(self.cx, self.llfn(), name) } - fn llfn(&self) -> &'ll Value { - unsafe { - llvm::LLVMGetBasicBlockParent(self.llbb()) - } - } - fn llbb(&self) -> &'ll BasicBlock { unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) } } - fn count_insn(&self, category: &str) { - if self.sess().codegen_stats() { - self.stats.borrow_mut().n_llvm_insns += 1; - } - if self.sess().count_llvm_insns() { - *self.stats - .borrow_mut() - .llvm_insns - .entry(category.to_string()) - .or_insert(0) += 1; - } - } - - fn set_value_name(&mut self, value: &'ll Value, name: &str) { - let cname = SmallCStr::new(name); - unsafe { - llvm::LLVMSetValueName(value, cname.as_ptr()); - } - } - fn position_at_end(&mut self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); } } - fn position_at_start(&mut self, llbb: &'ll BasicBlock) { - unsafe { - llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); - } - } - fn ret_void(&mut self) { self.count_insn("retvoid"); unsafe { @@ -212,10 +170,16 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { &mut self, v: &'ll Value, else_llbb: &'ll BasicBlock, - num_cases: usize, - ) -> &'ll Value { - unsafe { - llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, num_cases as c_uint) + cases: impl ExactSizeIterator + TrustedLen, + ) { + let switch = unsafe { + llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) + }; + for (on_val, dest) in cases { + let on_val = self.const_uint_big(self.val_ty(v), on_val); + unsafe { + llvm::LLVMAddCase(switch, on_val, dest) + } } } @@ -467,7 +431,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn checked_binop( &mut self, oop: OverflowOp, - ty: Ty, + ty: Ty<'_>, lhs: Self::Value, rhs: Self::Value, ) -> (Self::Value, Self::Value) { @@ -627,7 +591,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); if place.layout.is_zst() { - return OperandRef::new_zst(self.cx(), place.layout); + return OperandRef::new_zst(self, place.layout); } fn scalar_load_metadata<'a, 'll, 'tcx>( @@ -694,7 +658,37 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { OperandRef { val, layout: place.layout } } + fn write_operand_repeatedly( + mut self, + cg_elem: OperandRef<'tcx, &'ll Value>, + count: u64, + dest: PlaceRef<'tcx, &'ll Value>, + ) -> Self { + let zero = self.const_usize(0); + let count = self.const_usize(count); + let start = dest.project_index(&mut self, zero).llval; + let end = dest.project_index(&mut self, count).llval; + + let mut header_bx = self.build_sibling_block("repeat_loop_header"); + let mut body_bx = self.build_sibling_block("repeat_loop_body"); + let next_bx = self.build_sibling_block("repeat_loop_next"); + + self.br(header_bx.llbb()); + let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]); + + let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); + header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); + + let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size); + cg_elem.val.store(&mut body_bx, + PlaceRef::new_sized(current, cg_elem.layout, align)); + let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]); + body_bx.br(header_bx.llbb()); + header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); + + next_bx + } fn range_metadata(&mut self, load: &'ll Value, range: Range) { if self.sess().target.target.arch == "amdgpu" { @@ -797,6 +791,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } + fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value { + self.count_insn("structgep"); + assert_eq!(idx as c_uint as u64, idx); + unsafe { + llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) + } + } + /* Casts */ fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("trunc"); @@ -907,64 +909,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } /* Miscellaneous instructions */ - fn empty_phi(&mut self, ty: &'ll Type) -> &'ll Value { - self.count_insn("emptyphi"); - unsafe { - llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) - } - } - - fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { - assert_eq!(vals.len(), bbs.len()); - let phi = self.empty_phi(ty); - self.count_insn("addincoming"); - unsafe { - llvm::LLVMAddIncoming(phi, vals.as_ptr(), - bbs.as_ptr(), - vals.len() as c_uint); - phi - } - } - - fn inline_asm_call(&mut self, asm: &CStr, cons: &CStr, - inputs: &[&'ll Value], output: &'ll Type, - volatile: bool, alignstack: bool, - dia: syntax::ast::AsmDialect) -> Option<&'ll Value> { - self.count_insn("inlineasm"); - - let volatile = if volatile { llvm::True } - else { llvm::False }; - let alignstack = if alignstack { llvm::True } - else { llvm::False }; - - let argtys = inputs.iter().map(|v| { - debug!("Asm Input Type: {:?}", *v); - self.cx.val_ty(*v) - }).collect::>(); - - debug!("Asm Output Type: {:?}", output); - let fty = self.type_func(&argtys[..], output); - unsafe { - // Ask LLVM to verify that the constraints are well-formed. - let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr()); - debug!("Constraint verification result: {:?}", constraints_ok); - if constraints_ok { - let v = llvm::LLVMRustInlineAsm( - fty, - asm.as_ptr(), - cons.as_ptr(), - volatile, - alignstack, - AsmDialect::from_generic(dia), - ); - Some(self.call(v, inputs, None)) - } else { - // LLVM has detected an issue with our constraints, bail out - None - } - } - } - fn memcpy(&mut self, dst: &'ll Value, dst_align: Align, src: &'ll Value, src_align: Align, size: &'ll Value, flags: MemFlags) { @@ -1022,15 +966,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); } - fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("minnum"); - unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) } - } - fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - self.count_insn("maxnum"); - unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) } - } - fn select( &mut self, cond: &'ll Value, then_val: &'ll Value, @@ -1057,24 +992,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn insert_element( - &mut self, vec: &'ll Value, - elt: &'ll Value, - idx: &'ll Value, - ) -> &'ll Value { - self.count_insn("insertelement"); - unsafe { - llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname()) - } - } - - fn shuffle_vector(&mut self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { - self.count_insn("shufflevector"); - unsafe { - llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) - } - } - fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value { unsafe { let elt_ty = self.cx.val_ty(elt); @@ -1085,81 +1002,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fadd_fast"); - unsafe { - // FIXME: add a non-fast math version once - // https://bugs.llvm.org/show_bug.cgi?id=36732 - // is fixed. - let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - } - } - fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fmul_fast"); - unsafe { - // FIXME: add a non-fast math version once - // https://bugs.llvm.org/show_bug.cgi?id=36732 - // is fixed. - let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - } - } - fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.add"); - unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) } - } - fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.mul"); - unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) } - } - fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.and"); - unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) } - } - fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.or"); - unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) } - } - fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.xor"); - unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) } - } - fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fmin"); - unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) } - } - fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fmax"); - unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) } - } - fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fmin_fast"); - unsafe { - let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - } - } - fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value { - self.count_insn("vector.reduce.fmax_fast"); - unsafe { - let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true); - llvm::LLVMRustSetHasUnsafeAlgebra(instr); - instr - } - } - fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { - self.count_insn("vector.reduce.min"); - unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) } - } - fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { - self.count_insn("vector.reduce.max"); - unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) } - } - fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value { self.count_insn("extractvalue"); assert_eq!(idx as c_uint as u64, idx); @@ -1187,12 +1029,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) { - unsafe { - llvm::LLVMAddClause(landing_pad, clause); - } - } - fn set_cleanup(&mut self, landing_pad: &'ll Value) { self.count_insn("setcleanup"); unsafe { @@ -1246,14 +1082,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { Funclet::new(ret.expect("LLVM does not have support for catchpad")) } - fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value { - self.count_insn("catchret"); - let ret = unsafe { - llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) - }; - ret.expect("LLVM does not have support for catchret") - } - fn catch_switch( &mut self, parent: Option<&'ll Value>, @@ -1337,26 +1165,254 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn add_case(&mut self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { + fn set_invariant_load(&mut self, load: &'ll Value) { unsafe { - llvm::LLVMAddCase(s, on_val, dest) + llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); } } - fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { - self.count_insn("addincoming"); + fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) { + self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); + } + + fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) { + self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); + } + + fn call( + &mut self, + llfn: &'ll Value, + args: &[&'ll Value], + funclet: Option<&Funclet<'ll>>, + ) -> &'ll Value { + self.count_insn("call"); + + debug!("Call {:?} with args ({:?})", + llfn, + args); + + let args = self.check_call("call", llfn, args); + let bundle = funclet.map(|funclet| funclet.bundle()); + let bundle = bundle.as_ref().map(|b| &*b.raw); + unsafe { - llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); + llvm::LLVMRustBuildCall( + self.llbuilder, + llfn, + args.as_ptr() as *const &llvm::Value, + args.len() as c_uint, + bundle, noname() + ) } } - fn set_invariant_load(&mut self, load: &'ll Value) { + fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("zext"); unsafe { - llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, - llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); + llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) } } + + fn cx(&self) -> &CodegenCx<'ll, 'tcx> { + self.cx + } + + unsafe fn delete_basic_block(&mut self, bb: &'ll BasicBlock) { + llvm::LLVMDeleteBasicBlock(bb); + } + + fn do_not_inline(&mut self, llret: &'ll Value) { + llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); + } +} + +impl StaticBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { +fn get_static(&mut self, def_id: DefId) -> &'ll Value { + // Forward to the `get_static` method of `CodegenCx` + self.cx().get_static(def_id) + } + + fn static_panic_msg( + &mut self, + msg: Option, + filename: LocalInternedString, + line: Self::Value, + col: Self::Value, + kind: &str, + ) -> Self::Value { + let align = self.tcx.data_layout.aggregate_align.abi + .max(self.tcx.data_layout.i32_align.abi) + .max(self.tcx.data_layout.pointer_align.abi); + + let filename = self.const_str_slice(filename); + + let with_msg_components; + let without_msg_components; + + let components = if let Some(msg) = msg { + let msg = self.const_str_slice(msg); + with_msg_components = [msg, filename, line, col]; + &with_msg_components as &[_] + } else { + without_msg_components = [filename, line, col]; + &without_msg_components as &[_] + }; + + let struct_ = self.const_struct(&components, false); + self.static_addr_of(struct_, align, Some(kind)) + } +} + +impl Builder<'a, 'll, 'tcx> { + pub fn llfn(&self) -> &'ll Value { + unsafe { + llvm::LLVMGetBasicBlockParent(self.llbb()) + } + } + + fn count_insn(&self, category: &str) { + if self.sess().codegen_stats() { + self.stats.borrow_mut().n_llvm_insns += 1; + } + if self.sess().count_llvm_insns() { + *self.stats + .borrow_mut() + .llvm_insns + .entry(category.to_string()) + .or_insert(0) += 1; + } + } + + fn position_at_start(&mut self, llbb: &'ll BasicBlock) { + unsafe { + llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); + } + } + + pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("minnum"); + unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) } + } + + pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + self.count_insn("maxnum"); + unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) } + } + + pub fn insert_element( + &mut self, vec: &'ll Value, + elt: &'ll Value, + idx: &'ll Value, + ) -> &'ll Value { + self.count_insn("insertelement"); + unsafe { + llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, noname()) + } + } + + pub fn shuffle_vector( + &mut self, + v1: &'ll Value, + v2: &'ll Value, + mask: &'ll Value, + ) -> &'ll Value { + self.count_insn("shufflevector"); + unsafe { + llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) + } + } + + pub fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fadd_fast"); + unsafe { + // FIXME: add a non-fast math version once + // https://bugs.llvm.org/show_bug.cgi?id=36732 + // is fixed. + let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmul_fast"); + unsafe { + // FIXME: add a non-fast math version once + // https://bugs.llvm.org/show_bug.cgi?id=36732 + // is fixed. + let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.add"); + unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) } + } + pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.mul"); + unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) } + } + pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.and"); + unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) } + } + pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.or"); + unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) } + } + pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.xor"); + unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) } + } + pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmin"); + unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) } + } + pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmax"); + unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) } + } + pub fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmin_fast"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value { + self.count_insn("vector.reduce.fmax_fast"); + unsafe { + let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true); + llvm::LLVMRustSetHasUnsafeAlgebra(instr); + instr + } + } + pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { + self.count_insn("vector.reduce.min"); + unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) } + } + pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { + self.count_insn("vector.reduce.max"); + unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) } + } + + pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) { + unsafe { + llvm::LLVMAddClause(landing_pad, clause); + } + } + + pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value { + self.count_insn("catchret"); + let ret = unsafe { + llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) + }; + ret.expect("LLVM does not have support for catchret") + } + fn check_store<'b>(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value { @@ -1418,76 +1474,13 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { Cow::Owned(casted_args) } - fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) { - self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); - } - - fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) { - self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); - } - - fn call( - &mut self, - llfn: &'ll Value, - args: &[&'ll Value], - funclet: Option<&Funclet<'ll>>, - ) -> &'ll Value { - self.count_insn("call"); - - debug!("Call {:?} with args ({:?})", - llfn, - args); - - let args = self.check_call("call", llfn, args); - let bundle = funclet.map(|funclet| funclet.bundle()); - let bundle = bundle.as_ref().map(|b| &*b.raw); - - unsafe { - llvm::LLVMRustBuildCall( - self.llbuilder, - llfn, - args.as_ptr() as *const &llvm::Value, - args.len() as c_uint, - bundle, noname() - ) - } - } - - fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("zext"); - unsafe { - llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) - } - } - - fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value { - self.count_insn("structgep"); - assert_eq!(idx as c_uint as u64, idx); + pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { + self.count_insn("vaarg"); unsafe { - llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) + llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) } } - fn cx(&self) -> &CodegenCx<'ll, 'tcx> { - self.cx - } - - unsafe fn delete_basic_block(&mut self, bb: &'ll BasicBlock) { - llvm::LLVMDeleteBasicBlock(bb); - } - - fn do_not_inline(&mut self, llret: &'ll Value) { - llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); - } -} - -impl StaticBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { - fn get_static(&self, def_id: DefId) -> &'ll Value { - self.cx().get_static(def_id) - } -} - -impl Builder<'a, 'll, 'tcx> { fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) { if self.cx.sess().opts.optimize == config::OptLevel::No { return; @@ -1503,4 +1496,25 @@ impl Builder<'a, 'll, 'tcx> { let ptr = self.pointercast(ptr, self.cx.type_i8p()); self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None); } + + fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { + self.count_insn("addincoming"); + assert_eq!(vals.len(), bbs.len()); + let phi = unsafe { + llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) + }; + unsafe { + llvm::LLVMAddIncoming(phi, vals.as_ptr(), + bbs.as_ptr(), + vals.len() as c_uint); + phi + } + } + + fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { + self.count_insn("addincoming"); + unsafe { + llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); + } + } } diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index 87185a20c5091..2d732adcb9138 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -1,24 +1,14 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Handles codegen of callees as well as other call-related -//! things. Callees are a superset of normal rust values and sometimes -//! have different representations. In particular, top-level fn items +//! things. Callees are a superset of normal rust values and sometimes +//! have different representations. In particular, top-level fn items //! and methods are represented as just a fn ptr and not a full //! closure. -use attributes; -use llvm; -use monomorphize::Instance; -use context::CodegenCx; -use value::Value; +use crate::attributes; +use crate::llvm; +use crate::monomorphize::Instance; +use crate::context::CodegenCx; +use crate::value::Value; use rustc_codegen_ssa::traits::*; use rustc::ty::TypeFoldable; @@ -123,7 +113,7 @@ pub fn get_fn( unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage); - let is_generic = instance.substs.types().next().is_some(); + let is_generic = instance.substs.non_erasable_generics().next().is_some(); if is_generic { // This is a monomorphization. Its expected visibility depends diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index ad14ca7caf60a..9554e54e4142a 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -1,28 +1,18 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_camel_case_types, non_snake_case)] //! Code that is useful in various codegen modules. -use llvm::{self, True, False, Bool, BasicBlock, OperandBundleDef}; -use abi; -use consts; -use type_::Type; -use type_of::LayoutLlvmExt; -use value::Value; +use crate::llvm::{self, True, False, Bool, BasicBlock, OperandBundleDef}; +use crate::abi; +use crate::consts; +use crate::type_::Type; +use crate::type_of::LayoutLlvmExt; +use crate::value::Value; use rustc_codegen_ssa::traits::*; +use crate::consts::const_alloc_to_llvm; use rustc::ty::layout::{HasDataLayout, LayoutOf, self, TyLayout, Size}; use rustc::mir::interpret::{Scalar, AllocKind, Allocation}; -use consts::const_alloc_to_llvm; use rustc_codegen_ssa::mir::place::PlaceRef; use libc::{c_uint, c_char}; @@ -30,7 +20,7 @@ use libc::{c_uint, c_char}; use syntax::symbol::LocalInternedString; use syntax::ast::Mutability; -pub use context::CodegenCx; +pub use crate::context::CodegenCx; /* * A note on nomenclature of linking: "extern", "foreign", and "upcall". @@ -103,6 +93,100 @@ impl BackendTypes for CodegenCx<'ll, 'tcx> { type DIScope = &'ll llvm::debuginfo::DIScope; } +impl CodegenCx<'ll, 'tcx> { + pub fn const_fat_ptr( + &self, + ptr: &'ll Value, + meta: &'ll Value + ) -> &'ll Value { + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + self.const_struct(&[ptr, meta], false) + } + + pub fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); + } + } + + pub fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); + } + } + + pub fn const_bytes(&self, bytes: &[u8]) -> &'ll Value { + bytes_in_context(self.llcx, bytes) + } + + fn const_cstr( + &self, + s: LocalInternedString, + null_terminated: bool, + ) -> &'ll Value { + unsafe { + if let Some(&llval) = self.const_cstr_cache.borrow().get(&s) { + return llval; + } + + let sc = llvm::LLVMConstStringInContext(self.llcx, + s.as_ptr() as *const c_char, + s.len() as c_uint, + !null_terminated as Bool); + let sym = self.generate_local_symbol_name("str"); + let g = self.define_global(&sym[..], self.val_ty(sc)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", sym); + }); + llvm::LLVMSetInitializer(g, sc); + llvm::LLVMSetGlobalConstant(g, True); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); + + self.const_cstr_cache.borrow_mut().insert(s, g); + g + } + } + + pub fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value { + let len = s.len(); + let cs = consts::ptrcast(self.const_cstr(s, false), + self.type_ptr_to(self.layout_of(self.tcx.mk_str()).llvm_type(self))); + self.const_fat_ptr(cs, self.const_usize(len as u64)) + } + + pub fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { + unsafe { + assert_eq!(idx as c_uint as u64, idx); + let us = &[idx as c_uint]; + let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); + + debug!("const_get_elt(v={:?}, idx={}, r={:?})", + v, idx, r); + + r + } + } + + pub fn const_get_real(&self, v: &'ll Value) -> Option<(f64, bool)> { + unsafe { + if self.is_const_real(v) { + let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); + let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); + let loses_info = if loses_info == 1 { true } else { false }; + Some((r, loses_info)) + } else { + None + } + } + } + + fn is_const_real(&self, v: &'ll Value) -> bool { + unsafe { + llvm::LLVMIsAConstantFP(v).is_some() + } + } +} + impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn const_null(&self, t: &'ll Type) -> &'ll Value { unsafe { @@ -165,50 +249,6 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { self.const_uint(self.type_i8(), i as u64) } - fn const_cstr( - &self, - s: LocalInternedString, - null_terminated: bool, - ) -> &'ll Value { - unsafe { - if let Some(&llval) = self.const_cstr_cache.borrow().get(&s) { - return llval; - } - - let sc = llvm::LLVMConstStringInContext(self.llcx, - s.as_ptr() as *const c_char, - s.len() as c_uint, - !null_terminated as Bool); - let sym = self.generate_local_symbol_name("str"); - let g = self.define_global(&sym[..], self.val_ty(sc)).unwrap_or_else(||{ - bug!("symbol `{}` is already defined", sym); - }); - llvm::LLVMSetInitializer(g, sc); - llvm::LLVMSetGlobalConstant(g, True); - llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); - - self.const_cstr_cache.borrow_mut().insert(s, g); - g - } - } - - fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value { - let len = s.len(); - let cs = consts::ptrcast(self.const_cstr(s, false), - self.type_ptr_to(self.layout_of(self.tcx.mk_str()).llvm_type(self))); - self.const_fat_ptr(cs, self.const_usize(len as u64)) - } - - fn const_fat_ptr( - &self, - ptr: &'ll Value, - meta: &'ll Value - ) -> &'ll Value { - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - self.const_struct(&[ptr, meta], false) - } - fn const_struct( &self, elts: &[&'ll Value], @@ -217,48 +257,6 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { struct_in_context(self.llcx, elts, packed) } - fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { - unsafe { - return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); - } - } - - fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value { - unsafe { - return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); - } - } - - fn const_bytes(&self, bytes: &[u8]) -> &'ll Value { - bytes_in_context(self.llcx, bytes) - } - - fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { - unsafe { - assert_eq!(idx as c_uint as u64, idx); - let us = &[idx as c_uint]; - let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); - - debug!("const_get_elt(v={:?}, idx={}, r={:?})", - v, idx, r); - - r - } - } - - fn const_get_real(&self, v: &'ll Value) -> Option<(f64, bool)> { - unsafe { - if self.is_const_real(v) { - let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); - let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); - let loses_info = if loses_info == 1 { true } else { false }; - Some((r, loses_info)) - } else { - None - } - } - } - fn const_to_uint(&self, v: &'ll Value) -> u64 { unsafe { llvm::LLVMConstIntGetZExtValue(v) @@ -271,12 +269,6 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn is_const_real(&self, v: &'ll Value) -> bool { - unsafe { - llvm::LLVMIsAConstantFP(v).is_some() - } - } - fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option { unsafe { if self.is_const_integral(v) { diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 086fb1f5a93cc..8c83e9ef538e5 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -1,30 +1,20 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - +use crate::llvm::{self, SetUnnamedAddr, True}; +use crate::debuginfo; +use crate::monomorphize::MonoItem; +use crate::common::CodegenCx; +use crate::monomorphize::Instance; +use crate::base; +use crate::type_::Type; +use crate::type_of::LayoutLlvmExt; +use crate::value::Value; use libc::c_uint; -use llvm::{self, SetUnnamedAddr, True}; use rustc::hir::def_id::DefId; use rustc::mir::interpret::{ConstValue, Allocation, read_target_uint, Pointer, ErrorHandled, GlobalId}; use rustc::hir::Node; -use debuginfo; -use monomorphize::MonoItem; -use common::CodegenCx; -use monomorphize::Instance; use syntax_pos::Span; use rustc_target::abi::HasDataLayout; use syntax_pos::symbol::LocalInternedString; -use base; -use type_::Type; -use type_of::LayoutLlvmExt; -use value::Value; use rustc::ty::{self, Ty}; use rustc_codegen_ssa::traits::*; @@ -81,7 +71,7 @@ pub fn codegen_static_initializer( let static_ = cx.tcx.const_eval(param_env.and(cid))?; let alloc = match static_.val { - ConstValue::ByRef(_, alloc, n) if n.bytes() == 0 => alloc, + ConstValue::ByRef(ptr, alloc) if ptr.offset.bytes() == 0 => alloc, _ => bug!("static const eval returned {:#?}", static_), }; Ok((const_alloc_to_llvm(cx, alloc), alloc)) @@ -223,10 +213,10 @@ impl CodegenCx<'ll, 'tcx> { debug!("get_static: sym={} instance={:?}", sym, instance); - let g = if let Some(id) = self.tcx.hir().as_local_node_id(def_id) { + let g = if let Some(id) = self.tcx.hir().as_local_hir_id(def_id) { let llty = self.layout_of(ty).llvm_type(self); - let (g, attrs) = match self.tcx.hir().get(id) { + let (g, attrs) = match self.tcx.hir().get_by_hir_id(id) { Node::Item(&hir::Item { ref attrs, span, node: hir::ItemKind::Static(..), .. }) => { @@ -285,12 +275,12 @@ impl CodegenCx<'ll, 'tcx> { self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) && // ThinLTO can't handle this workaround in all cases, so we don't // emit the attrs. Instead we make them unnecessary by disallowing - // dynamic linking when cross-language LTO is enabled. - !self.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled(); + // dynamic linking when linker plugin based LTO is enabled. + !self.tcx.sess.opts.cg.linker_plugin_lto.enabled(); // If this assertion triggers, there's something wrong with commandline // argument validation. - debug_assert!(!(self.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && + debug_assert!(!(self.tcx.sess.opts.cg.linker_plugin_lto.enabled() && self.tcx.sess.target.target.options.is_like_msvc && self.tcx.sess.opts.cg.prefer_dynamic)); diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index b75cd8f68b368..f6956bd5736eb 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -1,26 +1,15 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use attributes; -use llvm; +use crate::attributes; +use crate::llvm; +use crate::debuginfo; +use crate::monomorphize::Instance; +use crate::value::Value; use rustc::dep_graph::DepGraphSafe; use rustc::hir; -use debuginfo; -use monomorphize::Instance; -use value::Value; -use monomorphize::partitioning::CodegenUnit; -use type_::Type; -use type_of::PointeeInfo; +use crate::monomorphize::partitioning::CodegenUnit; +use crate::type_::Type; +use crate::type_of::PointeeInfo; use rustc_codegen_ssa::traits::*; -use libc::c_uint; use rustc_data_structures::base_n; use rustc_data_structures::small_c_str::SmallCStr; @@ -33,7 +22,7 @@ use rustc::util::nodemap::FxHashMap; use rustc_target::spec::{HasTargetSpec, Target}; use rustc_codegen_ssa::callee::resolve_and_get_fn; use rustc_codegen_ssa::base::wants_msvc_seh; -use callee::get_fn; +use crate::callee::get_fn; use std::ffi::CStr; use std::cell::{Cell, RefCell}; @@ -41,7 +30,7 @@ use std::iter; use std::str; use std::sync::Arc; use syntax::symbol::LocalInternedString; -use abi::Abi; +use crate::abi::Abi; /// There is one `CodegenCx` per compilation unit. Each one has its own LLVM /// `llvm::Context` so that several compilation units may be optimized in parallel. @@ -60,7 +49,8 @@ pub struct CodegenCx<'ll, 'tcx: 'll> { /// Cache instances of monomorphic and polymorphic items pub instances: RefCell, &'ll Value>>, /// Cache generated vtables - pub vtables: RefCell, ty::PolyExistentialTraitRef<'tcx>), &'ll Value>>, + pub vtables: RefCell, Option>), &'ll Value>>, /// Cache of constant strings, pub const_cstr_cache: RefCell>, @@ -84,7 +74,7 @@ pub struct CodegenCx<'ll, 'tcx: 'll> { pub statics_to_rauw: RefCell>, /// Statics that will be placed in the llvm.used variable - /// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details + /// See for details pub used_statics: RefCell>, pub lltypes: RefCell, Option), &'ll Type>>, @@ -112,7 +102,7 @@ pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode { None => &sess.target.target.options.relocation_model[..], }; - match ::back::write::RELOC_MODEL_ARGS.iter().find( + match crate::back::write::RELOC_MODEL_ARGS.iter().find( |&&arg| arg.0 == reloc_model_arg) { Some(x) => x.1, _ => { @@ -130,7 +120,7 @@ fn get_tls_model(sess: &Session) -> llvm::ThreadLocalMode { None => &sess.target.target.options.tls_model[..], }; - match ::back::write::TLS_MODEL_ARGS.iter().find( + match crate::back::write::TLS_MODEL_ARGS.iter().find( |&&arg| arg.0 == tls_model_arg) { Some(x) => x.1, _ => { @@ -153,16 +143,17 @@ pub fn is_pie_binary(sess: &Session) -> bool { } pub unsafe fn create_module( - sess: &Session, + tcx: TyCtxt<'_, '_, '_>, llcx: &'ll llvm::Context, mod_name: &str, ) -> &'ll llvm::Module { + let sess = tcx.sess; let mod_name = SmallCStr::new(mod_name); let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx); // Ensure the data-layout values hardcoded remain the defaults. if sess.target.target.options.is_builtin { - let tm = ::back::write::create_target_machine(sess, false); + let tm = crate::back::write::create_informational_target_machine(&tcx.sess, false); llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm); llvm::LLVMRustDisposeTargetMachine(tm); @@ -220,7 +211,7 @@ pub unsafe fn create_module( impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { crate fn new(tcx: TyCtxt<'ll, 'tcx, 'tcx>, codegen_unit: Arc>, - llvm_module: &'ll ::ModuleLlvm) + llvm_module: &'ll crate::ModuleLlvm) -> Self { // An interesting part of Windows which MSVC forces our hand on (and // apparently MinGW didn't) is the usage of `dllimport` and `dllexport` @@ -321,7 +312,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn vtables(&self) -> &RefCell, - ty::PolyExistentialTraitRef<'tcx>), &'ll Value>> + Option>), &'ll Value>> { &self.vtables } @@ -334,10 +325,6 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { get_fn(self, instance) } - fn get_param(&self, llfn: &'ll Value, index: c_uint) -> &'ll Value { - llvm::get_param(llfn, index) - } - fn eh_personality(&self) -> &'ll Value { // The exception handling personality function. // @@ -385,7 +372,6 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { // Returns a Value of the "eh_unwind_resume" lang item if one is defined, // otherwise declares it as an external function. fn eh_unwind_resume(&self) -> &'ll Value { - use attributes; let unwresume = &self.eh_unwind_resume; if let Some(llfn) = unwresume.get() { return llfn; @@ -473,6 +459,20 @@ impl CodegenCx<'b, 'tcx> { self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key)) } + fn insert_intrinsic( + &self, name: &'static str, args: Option<&[&'b llvm::Type]>, ret: &'b llvm::Type + ) -> &'b llvm::Value { + let fn_ty = if let Some(args) = args { + self.type_func(args, ret) + } else { + self.type_variadic_func(&[], ret) + }; + let f = self.declare_cfn(name, fn_ty); + llvm::SetUnnamedAddr(f, false); + self.intrinsics.borrow_mut().insert(name, f.clone()); + f + } + fn declare_intrinsic( &self, key: &str @@ -480,26 +480,17 @@ impl CodegenCx<'b, 'tcx> { macro_rules! ifn { ($name:expr, fn() -> $ret:expr) => ( if key == $name { - let f = self.declare_cfn($name, self.type_func(&[], $ret)); - llvm::SetUnnamedAddr(f, false); - self.intrinsics.borrow_mut().insert($name, f.clone()); - return Some(f); + return Some(self.insert_intrinsic($name, Some(&[]), $ret)); } ); ($name:expr, fn(...) -> $ret:expr) => ( if key == $name { - let f = self.declare_cfn($name, self.type_variadic_func(&[], $ret)); - llvm::SetUnnamedAddr(f, false); - self.intrinsics.borrow_mut().insert($name, f.clone()); - return Some(f); + return Some(self.insert_intrinsic($name, None, $ret)); } ); ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( if key == $name { - let f = self.declare_cfn($name, self.type_func(&[$($arg),*], $ret)); - llvm::SetUnnamedAddr(f, false); - self.intrinsics.borrow_mut().insert($name, f.clone()); - return Some(f); + return Some(self.insert_intrinsic($name, Some(&[$($arg),*]), $ret)); } ); } @@ -518,14 +509,24 @@ impl CodegenCx<'b, 'tcx> { let t_f32 = self.type_f32(); let t_f64 = self.type_f64(); - let t_v2f32 = self.type_vector(t_f32, 2); - let t_v4f32 = self.type_vector(t_f32, 4); - let t_v8f32 = self.type_vector(t_f32, 8); - let t_v16f32 = self.type_vector(t_f32, 16); - - let t_v2f64 = self.type_vector(t_f64, 2); - let t_v4f64 = self.type_vector(t_f64, 4); - let t_v8f64 = self.type_vector(t_f64, 8); + macro_rules! vector_types { + ($id_out:ident: $elem_ty:ident, $len:expr) => { + let $id_out = self.type_vector($elem_ty, $len); + }; + ($($id_out:ident: $elem_ty:ident, $len:expr;)*) => { + $(vector_types!($id_out: $elem_ty, $len);)* + } + } + vector_types! { + t_v2f32: t_f32, 2; + t_v4f32: t_f32, 4; + t_v8f32: t_f32, 8; + t_v16f32: t_f32, 16; + + t_v2f64: t_f64, 2; + t_v4f64: t_f64, 4; + t_v8f64: t_f64, 8; + } ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void); ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void); @@ -765,6 +766,30 @@ impl CodegenCx<'b, 'tcx> { ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + ifn!("llvm.sadd.sat.i8", fn(t_i8, t_i8) -> t_i8); + ifn!("llvm.sadd.sat.i16", fn(t_i16, t_i16) -> t_i16); + ifn!("llvm.sadd.sat.i32", fn(t_i32, t_i32) -> t_i32); + ifn!("llvm.sadd.sat.i64", fn(t_i64, t_i64) -> t_i64); + ifn!("llvm.sadd.sat.i128", fn(t_i128, t_i128) -> t_i128); + + ifn!("llvm.uadd.sat.i8", fn(t_i8, t_i8) -> t_i8); + ifn!("llvm.uadd.sat.i16", fn(t_i16, t_i16) -> t_i16); + ifn!("llvm.uadd.sat.i32", fn(t_i32, t_i32) -> t_i32); + ifn!("llvm.uadd.sat.i64", fn(t_i64, t_i64) -> t_i64); + ifn!("llvm.uadd.sat.i128", fn(t_i128, t_i128) -> t_i128); + + ifn!("llvm.ssub.sat.i8", fn(t_i8, t_i8) -> t_i8); + ifn!("llvm.ssub.sat.i16", fn(t_i16, t_i16) -> t_i16); + ifn!("llvm.ssub.sat.i32", fn(t_i32, t_i32) -> t_i32); + ifn!("llvm.ssub.sat.i64", fn(t_i64, t_i64) -> t_i64); + ifn!("llvm.ssub.sat.i128", fn(t_i128, t_i128) -> t_i128); + + ifn!("llvm.usub.sat.i8", fn(t_i8, t_i8) -> t_i8); + ifn!("llvm.usub.sat.i16", fn(t_i16, t_i16) -> t_i16); + ifn!("llvm.usub.sat.i32", fn(t_i32, t_i32) -> t_i32); + ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64); + ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128); + ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void); ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void); @@ -791,7 +816,7 @@ impl CodegenCx<'b, 'tcx> { } impl<'b, 'tcx> CodegenCx<'b, 'tcx> { - /// Generate a new symbol name with the given prefix. This symbol name must + /// Generates a new symbol name with the given prefix. This symbol name must /// only be used for definitions with `internal` or `private` linkage. pub fn generate_local_symbol_name(&self, prefix: &str) -> String { let idx = self.local_gen_sym_counter.get(); diff --git a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs index c18e126e52003..c8ddf733ecf1f 100644 --- a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs +++ b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs @@ -1,20 +1,10 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc_codegen_ssa::debuginfo::{FunctionDebugContext, FunctionDebugContextData, MirDebugScope}; use super::metadata::file_metadata; use super::utils::{DIB, span_start}; -use llvm; -use llvm::debuginfo::{DIScope, DISubprogram}; -use common::CodegenCx; +use crate::llvm; +use crate::llvm::debuginfo::{DIScope, DISubprogram}; +use crate::common::CodegenCx; use rustc::mir::{Mir, SourceScope}; use libc::c_uint; @@ -26,11 +16,11 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use syntax_pos::BytePos; -/// Produce DIScope DIEs for each MIR Scope which has variables defined in it. +/// Produces DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. pub fn create_mir_scopes( cx: &CodegenCx<'ll, '_>, - mir: &Mir, + mir: &Mir<'_>, debug_context: &FunctionDebugContext<&'ll DISubprogram>, ) -> IndexVec> { let null_scope = MirDebugScope { @@ -65,7 +55,7 @@ pub fn create_mir_scopes( } fn make_mir_scope(cx: &CodegenCx<'ll, '_>, - mir: &Mir, + mir: &Mir<'_>, has_variables: &BitSet, debug_context: &FunctionDebugContextData<&'ll DISubprogram>, scope: SourceScope, diff --git a/src/librustc_codegen_llvm/debuginfo/doc.rs b/src/librustc_codegen_llvm/debuginfo/doc.rs index 5e2476e0918ff..daccfc9b242f9 100644 --- a/src/librustc_codegen_llvm/debuginfo/doc.rs +++ b/src/librustc_codegen_llvm/debuginfo/doc.rs @@ -1,23 +1,13 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! # Debug Info Module //! //! This module serves the purpose of generating debug symbols. We use LLVM's -//! [source level debugging](http://!llvm.org/docs/SourceLevelDebugging.html) +//! [source level debugging](https://llvm.org/docs/SourceLevelDebugging.html) //! features for generating the debug information. The general principle is //! this: //! //! Given the right metadata in the LLVM IR, the LLVM code generator is able to //! create DWARF debug symbols for the given code. The -//! [metadata](http://!llvm.org/docs/LangRef.html#metadata-type) is structured +//! [metadata](https://llvm.org/docs/LangRef.html#metadata-type) is structured //! much like DWARF *debugging information entries* (DIE), representing type //! information such as datatype layout, function signatures, block layout, //! variable location and scope information, etc. It is the purpose of this @@ -25,7 +15,7 @@ //! //! As the exact format of metadata trees may change between different LLVM //! versions, we now use LLVM -//! [DIBuilder](http://!llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html) +//! [DIBuilder](https://llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html) //! to create metadata where possible. This will hopefully ease the adaption of //! this module to future LLVM versions. //! @@ -170,7 +160,7 @@ //! //! This algorithm also provides a stable ID for types that are defined in one //! crate but instantiated from metadata within another crate. We just have to -//! take care to always map crate and node IDs back to the original crate +//! take care to always map crate and `NodeId`s back to the original crate //! context. //! //! As a side-effect these unique type IDs also help to solve a problem arising @@ -180,7 +170,7 @@ //! with different concrete substitutions for `'a`, and thus there will be N //! `Ty` instances for the type `Struct<'a>` even though it is not generic //! otherwise. Unfortunately this means that we cannot use `ty::type_id()` as -//! cheap identifier for type metadata---we have done this in the past, but it +//! cheap identifier for type metadata -- we have done this in the past, but it //! led to unnecessary metadata duplication in the best case and LLVM //! assertions in the worst. However, the unique type ID as described above //! *can* be used as identifier. Since it is comparatively expensive to diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index ff5ec20254ea1..91496ffbe557a 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -1,21 +1,11 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // .debug_gdb_scripts binary section. -use llvm; +use crate::llvm; -use common::CodegenCx; -use builder::Builder; +use crate::common::CodegenCx; +use crate::builder::Builder; +use crate::value::Value; use rustc::session::config::DebugInfo; -use value::Value; use rustc_codegen_ssa::traits::*; use syntax::attr; @@ -23,7 +13,7 @@ use syntax::attr; /// Inserts a side-effect free instruction sequence that makes sure that the /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. -pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder) { +pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) { if needs_gdb_debug_scripts_section(bx) { let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx); // Load just the first byte as that's all that's necessary to force @@ -74,7 +64,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>) }) } -pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx) -> bool { +pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool { let omit_gdb_pretty_printer_section = attr::contains_name(&cx.tcx.hir().krate_attrs(), "omit_gdb_pretty_printer_section"); diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs index d263b4e123780..6560ed0a8e686 100644 --- a/src/librustc_codegen_llvm/debuginfo/metadata.rs +++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use self::RecursiveTypeDescription::*; use self::MemberDescriptionFactory::*; use self::EnumDiscriminantInfo::*; @@ -17,23 +7,25 @@ use super::utils::{debug_context, DIB, span_start, use super::namespace::mangled_name_of_instance; use super::type_names::compute_debuginfo_type_name; use super::{CrateDebugContext}; +use crate::abi; +use crate::value::Value; use rustc_codegen_ssa::traits::*; -use abi; -use value::Value; -use llvm; -use llvm::debuginfo::{DIArray, DIType, DIFile, DIScope, DIDescriptor, - DICompositeType, DILexicalBlock, DIFlags}; -use llvm_util; +use crate::llvm; +use crate::llvm::debuginfo::{DIArray, DIType, DIFile, DIScope, DIDescriptor, + DICompositeType, DILexicalBlock, DIFlags, DebugEmissionKind}; +use crate::llvm_util; +use crate::common::CodegenCx; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc::hir::CodegenFnAttrFlags; use rustc::hir::def::CtorKind; use rustc::hir::def_id::{DefId, CrateNum, LOCAL_CRATE}; use rustc::ich::NodeIdHashingMode; +use rustc::mir::Field; +use rustc::mir::interpret::truncate; use rustc_data_structures::fingerprint::Fingerprint; use rustc::ty::Instance; -use common::CodegenCx; use rustc::ty::{self, AdtKind, ParamEnv, Ty, TyCtxt}; use rustc::ty::layout::{self, Align, Integer, IntegerExt, LayoutOf, PrimitiveExt, Size, TyLayout}; @@ -57,7 +49,7 @@ use syntax_pos::{self, Span, FileName}; impl PartialEq for llvm::Metadata { fn eq(&self, other: &Self) -> bool { - self as *const _ == other as *const _ + ptr::eq(self, other) } } @@ -70,7 +62,7 @@ impl Hash for llvm::Metadata { } impl fmt::Debug for llvm::Metadata { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { (self as *const Self).fmt(f) } } @@ -126,6 +118,32 @@ impl TypeMap<'ll, 'tcx> { } } + // Removes a Ty to metadata mapping + // This is useful when computing the metadata for a potentially + // recursive type (e.g. a function ptr of the form: + // + // fn foo() -> impl Copy { foo } + // + // This kind of type cannot be properly represented + // via LLVM debuginfo. As a workaround, + // we register a temporary Ty to metadata mapping + // for the function before we compute its actual metadata. + // If the metadata computation ends up recursing back to the + // original function, it will use the temporary mapping + // for the inner self-reference, preventing us from + // recursing forever. + // + // This function is used to remove the temporary metadata + // mapping after we've computed the actual metadata + fn remove_type( + &mut self, + type_: Ty<'tcx>, + ) { + if self.type_to_metadata.remove(type_).is_none() { + bug!("Type metadata Ty '{}' is not in the TypeMap!", type_); + } + } + // Adds a UniqueTypeId to metadata mapping to the TypeMap. The method will // fail if the mapping already exists. fn register_unique_id_with_metadata( @@ -198,6 +216,17 @@ impl TypeMap<'ll, 'tcx> { let interner_key = self.unique_id_interner.intern(&enum_variant_type_id); UniqueTypeId(interner_key) } + + // Get the unique type id string for an enum variant part. + // Variant parts are not types and shouldn't really have their own id, + // but it makes set_members_of_composite_type() simpler. + fn get_unique_type_id_str_of_enum_variant_part<'a>(&mut self, + enum_type_id: UniqueTypeId) -> &str { + let variant_part_type_id = format!("{}_variant_part", + self.get_unique_type_id_as_string(enum_type_id)); + let interner_key = self.unique_id_interner.intern(&variant_part_type_id); + self.unique_id_interner.get(interner_key) + } } // A description of some recursive type. It can either be already finished (as @@ -439,7 +468,8 @@ fn trait_pointer_metadata( // But it does not describe the trait's methods. let containing_scope = match trait_type.sty { - ty::Dynamic(ref data, ..) => Some(get_namespace_for_item(cx, data.principal().def_id())), + ty::Dynamic(ref data, ..) => + data.principal_def_id().map(|did| get_namespace_for_item(cx, did)), _ => { bug!("debuginfo: Unexpected trait-object type in \ trait_pointer_metadata(): {:?}", @@ -605,10 +635,7 @@ pub fn type_metadata( } } ty::FnDef(..) | ty::FnPtr(_) => { - let fn_metadata = subroutine_type_metadata(cx, - unique_type_id, - t.fn_sig(cx.tcx), - usage_site_span).metadata; + if let Some(metadata) = debug_context(cx).type_map .borrow() .find_metadata_for_unique_id(unique_type_id) @@ -616,6 +643,41 @@ pub fn type_metadata( return metadata; } + // It's possible to create a self-referential + // type in Rust by using 'impl trait': + // + // fn foo() -> impl Copy { foo } + // + // See TypeMap::remove_type for more detals + // about the workaround + + let temp_type = { + unsafe { + // The choice of type here is pretty arbitrary - + // anything reading the debuginfo for a recursive + // type is going to see *somthing* weird - the only + // question is what exactly it will see + let (size, align) = cx.size_and_align_of(t); + llvm::LLVMRustDIBuilderCreateBasicType( + DIB(cx), + SmallCStr::new("").as_ptr(), + size.bits(), + align.bits() as u32, + DW_ATE_unsigned) + } + }; + + let type_map = &debug_context(cx).type_map; + type_map.borrow_mut().register_type_with_metadata(t, temp_type); + + let fn_metadata = subroutine_type_metadata(cx, + unique_type_id, + t.fn_sig(cx.tcx), + usage_site_span).metadata; + + type_map.borrow_mut().remove_type(t); + + // This is actually a function pointer, so wrap it in pointer DI MetadataCreationResult::new(pointer_type_metadata(cx, t, fn_metadata), false) @@ -826,7 +888,7 @@ fn pointer_type_metadata( } } -pub fn compile_unit_metadata(tcx: TyCtxt, +pub fn compile_unit_metadata(tcx: TyCtxt<'_, '_, '_>, codegen_unit_name: &str, debug_context: &CrateDebugContext<'ll, '_>) -> &'ll DIDescriptor { @@ -855,6 +917,7 @@ pub fn compile_unit_metadata(tcx: TyCtxt, let producer = CString::new(producer).unwrap(); let flags = "\0"; let split_name = "\0"; + let kind = DebugEmissionKind::from_generic(tcx.sess.opts.debuginfo); unsafe { let file_metadata = llvm::LLVMRustDIBuilderCreateFile( @@ -868,7 +931,8 @@ pub fn compile_unit_metadata(tcx: TyCtxt, tcx.sess.opts.optimize != config::OptLevel::No, flags.as_ptr() as *const _, 0, - split_name.as_ptr() as *const _); + split_name.as_ptr() as *const _, + kind); if tcx.sess.opts.debugging_opts.profile { let cu_desc_metadata = llvm::LLVMRustMetadataAsValue(debug_context.llcontext, @@ -1163,17 +1227,22 @@ fn prepare_union_metadata( // Enums //=----------------------------------------------------------------------------- -// DWARF variant support is only available starting in LLVM 7. +// DWARF variant support is only available starting in LLVM 8. // Although the earlier enum debug info output did not work properly // in all situations, it is better for the time being to continue to // sometimes emit the old style rather than emit something completely -// useless when rust is compiled against LLVM 6 or older. This -// function decides which representation will be emitted. -fn use_enum_fallback(cx: &CodegenCx) -> bool { +// useless when rust is compiled against LLVM 6 or older. LLVM 7 +// contains an early version of the DWARF variant support, and will +// crash when handling the new debug info format. This function +// decides which representation will be emitted. +fn use_enum_fallback(cx: &CodegenCx<'_, '_>) -> bool { // On MSVC we have to use the fallback mode, because LLVM doesn't // lower variant parts to PDB. return cx.sess().target.target.options.is_like_msvc - || llvm_util::get_major_version() < 7; + // LLVM version 7 did not release with an important bug fix; + // but the required patch is in the LLVM 8. Rust LLVM reports + // 8 as well. + || llvm_util::get_major_version() < 8; } // Describes the members of an enum value: An enum is described as a union of @@ -1225,7 +1294,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { name: if fallback { String::new() } else { - adt.variants[index].name.as_str().to_string() + adt.variants[index].ident.as_str().to_string() }, type_metadata: variant_type_metadata, offset: Size::ZERO, @@ -1236,10 +1305,17 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { } ] } - layout::Variants::Tagged { ref variants, .. } => { + layout::Variants::Multiple { + discr_kind: layout::DiscriminantKind::Tag, + discr_index, + ref variants, + .. + } => { let discriminant_info = if fallback { - RegularDiscriminant(self.discriminant_type_metadata - .expect("")) + RegularDiscriminant { + discr_field: Field::from(discr_index), + discr_type_metadata: self.discriminant_type_metadata.unwrap() + } } else { // This doesn't matter in this case. NoDiscriminant @@ -1265,7 +1341,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { name: if fallback { String::new() } else { - adt.variants[i].name.as_str().to_string() + adt.variants[i].ident.as_str().to_string() }, type_metadata: variant_type_metadata, offset: Size::ZERO, @@ -1278,12 +1354,15 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { } }).collect() } - layout::Variants::NicheFilling { - ref niche_variants, - niche_start, + layout::Variants::Multiple { + discr_kind: layout::DiscriminantKind::Niche { + ref niche_variants, + niche_start, + dataful_variant, + }, + ref discr, ref variants, - dataful_variant, - ref niche, + discr_index, } => { if fallback { let variant = self.layout.for_variant(cx, dataful_variant); @@ -1329,9 +1408,9 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { } compute_field_path(cx, &mut name, self.layout, - self.layout.fields.offset(0), - self.layout.field(cx, 0).size); - name.push_str(&adt.variants[*niche_variants.start()].name.as_str()); + self.layout.fields.offset(discr_index), + self.layout.field(cx, discr_index).size); + name.push_str(&adt.variants[*niche_variants.start()].ident.as_str()); // Create the (singleton) list of descriptions of union members. vec![ @@ -1370,12 +1449,16 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { let value = (i.as_u32() as u128) .wrapping_sub(niche_variants.start().as_u32() as u128) .wrapping_add(niche_start); - let value = value & ((1u128 << niche.value.size(cx).bits()) - 1); + let value = truncate(value, discr.value.size(cx)); + // NOTE(eddyb) do *NOT* remove this assert, until + // we pass the full 128-bit value to LLVM, otherwise + // truncation will be silent and remain undetected. + assert_eq!(value as u64 as u128, value); Some(value as u64) }; MemberDescription { - name: adt.variants[i].name.as_str().to_string(), + name: adt.variants[i].ident.as_str().to_string(), type_metadata: variant_type_metadata, offset: Size::ZERO, size: self.layout.size, @@ -1408,6 +1491,8 @@ impl VariantMemberDescriptionFactory<'ll, 'tcx> { name: name.to_string(), type_metadata: if use_enum_fallback(cx) { match self.discriminant_type_metadata { + // Discriminant is always the first field of our variant + // when using the enum fallback. Some(metadata) if i == 0 => metadata, _ => type_metadata(cx, ty, self.span) } @@ -1426,7 +1511,7 @@ impl VariantMemberDescriptionFactory<'ll, 'tcx> { #[derive(Copy, Clone)] enum EnumDiscriminantInfo<'ll> { - RegularDiscriminant(&'ll DIType), + RegularDiscriminant{ discr_field: Field, discr_type_metadata: &'ll DIType }, OptimizedDiscriminant, NoDiscriminant } @@ -1443,7 +1528,7 @@ fn describe_enum_variant( containing_scope: &'ll DIScope, span: Span, ) -> (&'ll DICompositeType, MemberDescriptionFactory<'ll, 'tcx>) { - let variant_name = variant.name.as_str(); + let variant_name = variant.ident.as_str(); let unique_type_id = debug_context(cx).type_map .borrow_mut() .get_unique_type_id_of_enum_variant( @@ -1457,15 +1542,26 @@ fn describe_enum_variant( unique_type_id, Some(containing_scope)); + let arg_name = |i: usize| { + if variant.ctor_kind == CtorKind::Fn { + format!("__{}", i) + } else { + variant.fields[i].ident.to_string() + } + }; + // Build an array of (field name, field type) pairs to be captured in the factory closure. let (offsets, args) = if use_enum_fallback(cx) { // If this is not a univariant enum, there is also the discriminant field. let (discr_offset, discr_arg) = match discriminant_info { - RegularDiscriminant(_) => { + RegularDiscriminant { discr_field, .. } => { // We have the layout of an enum variant, we need the layout of the outer enum let enum_layout = cx.layout_of(layout.ty); - (Some(enum_layout.fields.offset(0)), - Some(("RUST$ENUM$DISR".to_owned(), enum_layout.field(cx, 0).ty))) + let offset = enum_layout.fields.offset(discr_field.as_usize()); + let args = ( + "RUST$ENUM$DISR".to_owned(), + enum_layout.field(cx, discr_field.as_usize()).ty); + (Some(offset), Some(args)) } _ => (None, None), }; @@ -1474,12 +1570,7 @@ fn describe_enum_variant( layout.fields.offset(i) })).collect(), discr_arg.into_iter().chain((0..layout.fields.count()).map(|i| { - let name = if variant.ctor_kind == CtorKind::Fn { - format!("__{}", i) - } else { - variant.fields[i].ident.to_string() - }; - (name, layout.field(cx, i).ty) + (arg_name(i), layout.field(cx, i).ty) })).collect() ) } else { @@ -1488,12 +1579,7 @@ fn describe_enum_variant( layout.fields.offset(i) }).collect(), (0..layout.fields.count()).map(|i| { - let name = if variant.ctor_kind == CtorKind::Fn { - format!("__{}", i) - } else { - variant.fields[i].ident.to_string() - }; - (name, layout.field(cx, i).ty) + (arg_name(i), layout.field(cx, i).ty) }).collect() ) }; @@ -1503,8 +1589,8 @@ fn describe_enum_variant( offsets, args, discriminant_type_metadata: match discriminant_info { - RegularDiscriminant(discriminant_type_metadata) => { - Some(discriminant_type_metadata) + RegularDiscriminant { discr_type_metadata, .. } => { + Some(discr_type_metadata) } _ => None }, @@ -1537,7 +1623,7 @@ fn prepare_enum_metadata( let enumerators_metadata: Vec<_> = def.discriminants(cx.tcx) .zip(&def.variants) .map(|((_, discr), v)| { - let name = SmallCStr::new(&v.name.as_str()); + let name = SmallCStr::new(&v.ident.as_str()); unsafe { Some(llvm::LLVMRustDIBuilderCreateEnumerator( DIB(cx), @@ -1587,8 +1673,11 @@ fn prepare_enum_metadata( let layout = cx.layout_of(enum_type); match (&layout.abi, &layout.variants) { - (&layout::Abi::Scalar(_), &layout::Variants::Tagged {ref tag, .. }) => - return FinalMetadata(discriminant_type_metadata(tag.value)), + (&layout::Abi::Scalar(_), &layout::Variants::Multiple { + discr_kind: layout::DiscriminantKind::Tag, + ref discr, + .. + }) => return FinalMetadata(discriminant_type_metadata(discr.value)), _ => {} } @@ -1600,9 +1689,16 @@ fn prepare_enum_metadata( if use_enum_fallback(cx) { let discriminant_type_metadata = match layout.variants { layout::Variants::Single { .. } | - layout::Variants::NicheFilling { .. } => None, - layout::Variants::Tagged { ref tag, .. } => { - Some(discriminant_type_metadata(tag.value)) + layout::Variants::Multiple { + discr_kind: layout::DiscriminantKind::Niche { .. }, + .. + } => None, + layout::Variants::Multiple { + discr_kind: layout::DiscriminantKind::Tag, + ref discr, + .. + } => { + Some(discriminant_type_metadata(discr.value)) } }; @@ -1637,16 +1733,21 @@ fn prepare_enum_metadata( ); } - let discriminator_metadata = match &layout.variants { + let discriminator_metadata = match layout.variants { // A single-variant enum has no discriminant. - &layout::Variants::Single { .. } => None, - - &layout::Variants::NicheFilling { ref niche, .. } => { + layout::Variants::Single { .. } => None, + + layout::Variants::Multiple { + discr_kind: layout::DiscriminantKind::Niche { .. }, + ref discr, + discr_index, + .. + } => { // Find the integer type of the correct size. - let size = niche.value.size(cx); - let align = niche.value.align(cx); + let size = discr.value.size(cx); + let align = discr.value.align(cx); - let discr_type = match niche.value { + let discr_type = match discr.value { layout::Int(t, _) => t, layout::Float(layout::FloatTy::F32) => Integer::I32, layout::Float(layout::FloatTy::F64) => Integer::I64, @@ -1663,14 +1764,19 @@ fn prepare_enum_metadata( UNKNOWN_LINE_NUMBER, size.bits(), align.abi.bits() as u32, - layout.fields.offset(0).bits(), + layout.fields.offset(discr_index).bits(), DIFlags::FlagArtificial, discr_metadata)) } }, - &layout::Variants::Tagged { ref tag, .. } => { - let discr_type = tag.value.to_ty(cx.tcx); + layout::Variants::Multiple { + discr_kind: layout::DiscriminantKind::Tag, + ref discr, + discr_index, + .. + } => { + let discr_type = discr.value.to_ty(cx.tcx); let (size, align) = cx.size_and_align_of(discr_type); let discr_metadata = basic_type_metadata(cx, discr_type); @@ -1683,13 +1789,18 @@ fn prepare_enum_metadata( UNKNOWN_LINE_NUMBER, size.bits(), align.bits() as u32, - layout.fields.offset(0).bits(), + layout.fields.offset(discr_index).bits(), DIFlags::FlagArtificial, discr_metadata)) } }, }; + let variant_part_unique_type_id_str = SmallCStr::new( + debug_context(cx).type_map + .borrow_mut() + .get_unique_type_id_str_of_enum_variant_part(unique_type_id) + ); let empty_array = create_DIArray(DIB(cx), &[]); let variant_part = unsafe { llvm::LLVMRustDIBuilderCreateVariantPart( @@ -1703,7 +1814,7 @@ fn prepare_enum_metadata( DIFlags::FlagZero, discriminator_metadata, empty_array, - unique_type_id_str.as_ptr()) + variant_part_unique_type_id_str.as_ptr()) }; // The variant part must be wrapped in a struct according to DWARF. @@ -1740,7 +1851,7 @@ fn prepare_enum_metadata( }), ); - fn get_enum_discriminant_name(cx: &CodegenCx, + fn get_enum_discriminant_name(cx: &CodegenCx<'_, '_>, def_id: DefId) -> InternedString { cx.tcx.item_name(def_id) @@ -1867,7 +1978,7 @@ fn compute_type_parameters(cx: &CodegenCx<'ll, 'tcx>, ty: Ty<'tcx>) -> Option<&' } return Some(create_DIArray(DIB(cx), &[])); - fn get_parameter_names(cx: &CodegenCx, + fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec { let mut names = generics.parent.map_or(vec![], |def_id| { diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 5b65b1fdda64e..57e4ac07d5e28 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // See doc.rs for documentation. mod doc; @@ -20,29 +10,29 @@ use self::type_names::compute_debuginfo_type_name; use self::metadata::{type_metadata, file_metadata, TypeMap}; use self::source_loc::InternalDebugLocation::{self, UnknownLocation}; -use llvm; -use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DISubprogram, DIArray, DIFlags, - DILexicalBlock}; +use crate::llvm; +use crate::llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DISubprogram, DIArray, DIFlags, + DISPFlags, DILexicalBlock}; use rustc::hir::CodegenFnAttrFlags; -use rustc::hir::def_id::{DefId, CrateNum}; -use rustc::ty::subst::{Substs, UnpackedKind}; - -use abi::Abi; -use common::CodegenCx; -use builder::Builder; -use monomorphize::Instance; +use rustc::hir::def_id::{DefId, CrateNum, LOCAL_CRATE}; +use rustc::ty::subst::{SubstsRef, UnpackedKind}; + +use crate::abi::Abi; +use crate::common::CodegenCx; +use crate::builder::Builder; +use crate::monomorphize::Instance; +use crate::value::Value; use rustc::ty::{self, ParamEnv, Ty, InstanceDef}; use rustc::mir; use rustc::session::config::{self, DebugInfo}; use rustc::util::nodemap::{DefIdMap, FxHashMap, FxHashSet}; use rustc_data_structures::small_c_str::SmallCStr; use rustc_data_structures::indexed_vec::IndexVec; -use value::Value; use rustc_codegen_ssa::debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, VariableKind, FunctionDebugContextData}; use libc::c_uint; -use std::cell::{Cell, RefCell}; +use std::cell::RefCell; use std::ffi::CString; use syntax_pos::{self, Span, Pos}; @@ -112,8 +102,8 @@ impl<'a, 'tcx> CrateDebugContext<'a, 'tcx> { } } -/// Create any deferred debug metadata nodes -pub fn finalize(cx: &CodegenCx) { +/// Creates any deferred debug metadata nodes +pub fn finalize(cx: &CodegenCx<'_, '_>) { if cx.dbg_cx.is_none() { return; } @@ -168,7 +158,7 @@ impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { variable_kind: VariableKind, span: Span, ) { - assert!(!dbg_context.get_ref(span).source_locations_enabled.get()); + assert!(!dbg_context.get_ref(span).source_locations_enabled); let cx = self.cx(); let file = span_start(cx, span).file; @@ -226,7 +216,7 @@ impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { fn set_source_location( &mut self, - debug_context: &FunctionDebugContext<&'ll DISubprogram>, + debug_context: &mut FunctionDebugContext<&'ll DISubprogram>, scope: Option<&'ll DIScope>, span: Span, ) { @@ -235,6 +225,13 @@ impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) { gdb::insert_reference_to_gdb_debug_scripts_section_global(self) } + + fn set_value_name(&mut self, value: &'ll Value, name: &str) { + let cname = SmallCStr::new(name); + unsafe { + llvm::LLVMSetValueName(value, cname.as_ptr()); + } + } } impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { @@ -243,7 +240,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { instance: Instance<'tcx>, sig: ty::FnSig<'tcx>, llfn: &'ll Value, - mir: &mir::Mir, + mir: &mir::Mir<'_>, ) -> FunctionDebugContext<&'ll DISubprogram> { if self.sess().opts.debuginfo == DebugInfo::None { return FunctionDebugContext::DebugInfoDisabled; @@ -293,24 +290,29 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { let linkage_name = mangled_name_of_instance(self, instance); let scope_line = span_start(self, span).line; - let is_local_to_unit = is_node_local_to_unit(self, def_id); let function_name = CString::new(name).unwrap(); let linkage_name = SmallCStr::new(&linkage_name.as_str()); let mut flags = DIFlags::FlagPrototyped; - let local_id = self.tcx().hir().as_local_node_id(def_id); - if let Some((id, _, _)) = *self.sess().entry_fn.borrow() { - if local_id == Some(id) { - flags |= DIFlags::FlagMainSubprogram; - } - } - if self.layout_of(sig.output()).abi.is_uninhabited() { flags |= DIFlags::FlagNoReturn; } + let mut spflags = DISPFlags::SPFlagDefinition; + if is_node_local_to_unit(self, def_id) { + spflags |= DISPFlags::SPFlagLocalToUnit; + } + if self.sess().opts.optimize != config::OptLevel::No { + spflags |= DISPFlags::SPFlagOptimized; + } + if let Some((id, _)) = self.tcx.entry_fn(LOCAL_CRATE) { + if id == def_id { + spflags |= DISPFlags::SPFlagMainSubprogram; + } + } + let fn_metadata = unsafe { llvm::LLVMRustDIBuilderCreateFunction( DIB(self), @@ -320,11 +322,9 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { file_metadata, loc.line as c_uint, function_type_metadata, - is_local_to_unit, - true, scope_line as c_uint, flags, - self.sess().opts.optimize != config::OptLevel::No, + spflags, llfn, template_parameters, None) @@ -333,7 +333,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { // Initialize fn debug context (including scope map and namespace map) let fn_debug_context = FunctionDebugContextData { fn_metadata, - source_locations_enabled: Cell::new(false), + source_locations_enabled: false, defining_crate: def_id.krate, }; @@ -405,7 +405,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn get_template_parameters<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, generics: &ty::Generics, - substs: &Substs<'tcx>, + substs: SubstsRef<'tcx>, file_metadata: &'ll DIFile, name_to_append_suffix_to: &mut String, ) -> &'ll DIArray { @@ -461,7 +461,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { return create_DIArray(DIB(cx), &template_params[..]); } - fn get_parameter_names(cx: &CodegenCx, + fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec { let mut names = generics.parent.map_or(vec![], |def_id| { @@ -524,8 +524,8 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn create_mir_scopes( &self, - mir: &mir::Mir, - debug_context: &FunctionDebugContext<&'ll DISubprogram>, + mir: &mir::Mir<'_>, + debug_context: &mut FunctionDebugContext<&'ll DISubprogram>, ) -> IndexVec> { create_scope_map::create_mir_scopes(self, mir, debug_context) } diff --git a/src/librustc_codegen_llvm/debuginfo/namespace.rs b/src/librustc_codegen_llvm/debuginfo/namespace.rs index 06f8a4b131b60..f7c377adf3529 100644 --- a/src/librustc_codegen_llvm/debuginfo/namespace.rs +++ b/src/librustc_codegen_llvm/debuginfo/namespace.rs @@ -1,25 +1,15 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // Namespace Handling. use super::metadata::{unknown_file_metadata, UNKNOWN_LINE_NUMBER}; use super::utils::{DIB, debug_context}; -use monomorphize::Instance; +use crate::monomorphize::Instance; use rustc::ty; -use llvm; -use llvm::debuginfo::DIScope; +use crate::llvm; +use crate::llvm::debuginfo::DIScope; +use crate::common::CodegenCx; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; -use common::CodegenCx; use rustc_data_structures::small_c_str::SmallCStr; diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs index 95196287ab6ee..dec93a65dbaf4 100644 --- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs +++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs @@ -1,22 +1,12 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use self::InternalDebugLocation::*; use super::utils::{debug_context, span_start}; use super::metadata::UNKNOWN_COLUMN_NUMBER; use rustc_codegen_ssa::debuginfo::FunctionDebugContext; -use llvm; -use llvm::debuginfo::DIScope; -use builder::Builder; +use crate::llvm; +use crate::llvm::debuginfo::DIScope; +use crate::builder::Builder; use rustc_codegen_ssa::traits::*; use libc::c_uint; @@ -40,7 +30,7 @@ pub fn set_source_location( FunctionDebugContext::RegularContext(ref data) => data }; - let dbg_loc = if function_debug_context.source_locations_enabled.get() { + let dbg_loc = if function_debug_context.source_locations_enabled { debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span)); let loc = span_start(bx.cx(), span); InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize()) diff --git a/src/librustc_codegen_llvm/debuginfo/type_names.rs b/src/librustc_codegen_llvm/debuginfo/type_names.rs index 2e827cc6d0601..eff7cd1bc8a48 100644 --- a/src/librustc_codegen_llvm/debuginfo/type_names.rs +++ b/src/librustc_codegen_llvm/debuginfo/type_names.rs @@ -1,20 +1,11 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // Type Names for Debug Info. -use common::CodegenCx; +use crate::common::CodegenCx; use rustc::hir::def_id::DefId; -use rustc::ty::subst::Substs; +use rustc::ty::subst::SubstsRef; use rustc::ty::{self, Ty}; use rustc_codegen_ssa::traits::*; +use rustc_data_structures::fx::FxHashSet; use rustc::hir; @@ -27,7 +18,8 @@ pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, qualified: bool) -> String { let mut result = String::with_capacity(64); - push_debuginfo_type_name(cx, t, qualified, &mut result); + let mut visited = FxHashSet::default(); + push_debuginfo_type_name(cx, t, qualified, &mut result, &mut visited); result } @@ -36,7 +28,9 @@ pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, t: Ty<'tcx>, qualified: bool, - output: &mut String) { + output: &mut String, + visited: &mut FxHashSet>) { + // When targeting MSVC, emit C++ style type names for compatibility with // .natvis visualizers (and perhaps other existing native debuggers?) let cpp_like_names = cx.sess().target.target.options.is_like_msvc; @@ -52,12 +46,12 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ty::Foreign(def_id) => push_item_name(cx, def_id, qualified, output), ty::Adt(def, substs) => { push_item_name(cx, def.did, qualified, output); - push_type_params(cx, substs, output); + push_type_params(cx, substs, output, visited); }, ty::Tuple(component_types) => { output.push('('); for &component_type in component_types { - push_debuginfo_type_name(cx, component_type, true, output); + push_debuginfo_type_name(cx, component_type, true, output, visited); output.push_str(", "); } if !component_types.is_empty() { @@ -75,7 +69,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, hir::MutMutable => output.push_str("mut "), } - push_debuginfo_type_name(cx, inner_type, true, output); + push_debuginfo_type_name(cx, inner_type, true, output, visited); if cpp_like_names { output.push('*'); @@ -89,7 +83,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, output.push_str("mut "); } - push_debuginfo_type_name(cx, inner_type, true, output); + push_debuginfo_type_name(cx, inner_type, true, output, visited); if cpp_like_names { output.push('*'); @@ -97,7 +91,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, }, ty::Array(inner_type, len) => { output.push('['); - push_debuginfo_type_name(cx, inner_type, true, output); + push_debuginfo_type_name(cx, inner_type, true, output, visited); output.push_str(&format!("; {}", len.unwrap_usize(cx.tcx))); output.push(']'); }, @@ -108,7 +102,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, output.push('['); } - push_debuginfo_type_name(cx, inner_type, true, output); + push_debuginfo_type_name(cx, inner_type, true, output, visited); if cpp_like_names { output.push('>'); @@ -117,21 +111,44 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, } }, ty::Dynamic(ref trait_data, ..) => { - let principal = cx.tcx.normalize_erasing_late_bound_regions( - ty::ParamEnv::reveal_all(), - &trait_data.principal(), - ); - push_item_name(cx, principal.def_id, false, output); - push_type_params(cx, principal.substs, output); + if let Some(principal) = trait_data.principal() { + let principal = cx.tcx.normalize_erasing_late_bound_regions( + ty::ParamEnv::reveal_all(), + &principal, + ); + push_item_name(cx, principal.def_id, false, output); + push_type_params(cx, principal.substs, output, visited); + } else { + output.push_str("dyn '_"); + } }, ty::FnDef(..) | ty::FnPtr(_) => { + // We've encountered a weird 'recursive type' + // Currently, the only way to generate such a type + // is by using 'impl trait': + // + // fn foo() -> impl Copy { foo } + // + // There's not really a sensible name we can generate, + // since we don't include 'impl trait' types (e.g. ty::Opaque) + // in the output + // + // Since we need to generate *something*, we just + // use a dummy string that should make it clear + // that something unusual is going on + if !visited.insert(t) { + output.push_str(""); + return; + } + + let sig = t.fn_sig(cx.tcx); if sig.unsafety() == hir::Unsafety::Unsafe { output.push_str("unsafe "); } let abi = sig.abi(); - if abi != ::abi::Abi::Rust { + if abi != crate::abi::Abi::Rust { output.push_str("extern \""); output.push_str(abi.name()); output.push_str("\" "); @@ -142,14 +159,14 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); if !sig.inputs().is_empty() { for ¶meter_type in sig.inputs() { - push_debuginfo_type_name(cx, parameter_type, true, output); + push_debuginfo_type_name(cx, parameter_type, true, output, visited); output.push_str(", "); } output.pop(); output.pop(); } - if sig.variadic { + if sig.c_variadic { if !sig.inputs().is_empty() { output.push_str(", ..."); } else { @@ -161,8 +178,20 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, if !sig.output().is_unit() { output.push_str(" -> "); - push_debuginfo_type_name(cx, sig.output(), true, output); + push_debuginfo_type_name(cx, sig.output(), true, output, visited); } + + + // We only keep the type in 'visited' + // for the duration of the body of this method. + // It's fine for a particular function type + // to show up multiple times in one overall type + // (e.g. MyType u8, fn() -> u8> + // + // We only care about avoiding recursing + // directly back to the type we're currently + // processing + visited.remove(t); }, ty::Closure(..) => { output.push_str("closure"); @@ -184,7 +213,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, } } - fn push_item_name(cx: &CodegenCx, + fn push_item_name(cx: &CodegenCx<'_, '_>, def_id: DefId, qualified: bool, output: &mut String) { @@ -199,14 +228,15 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, } } - // Pushes the type parameters in the given `Substs` to the output string. + // Pushes the type parameters in the given `InternalSubsts` to the output string. // This ignores region parameters, since they can't reliably be // reconstructed for items from non-local crates. For local crates, this // would be possible but with inlining and LTO we have to use the least // common denominator - otherwise we would run into conflicts. fn push_type_params<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, - substs: &Substs<'tcx>, - output: &mut String) { + substs: SubstsRef<'tcx>, + output: &mut String, + visited: &mut FxHashSet>) { if substs.types().next().is_none() { return; } @@ -214,7 +244,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, output.push('<'); for type_parameter in substs.types() { - push_debuginfo_type_name(cx, type_parameter, true, output); + push_debuginfo_type_name(cx, type_parameter, true, output, visited); output.push_str(", "); } diff --git a/src/librustc_codegen_llvm/debuginfo/utils.rs b/src/librustc_codegen_llvm/debuginfo/utils.rs index 4b6ef30b1385c..c64e0d9806b29 100644 --- a/src/librustc_codegen_llvm/debuginfo/utils.rs +++ b/src/librustc_codegen_llvm/debuginfo/utils.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // Utility Functions. use super::{CrateDebugContext}; @@ -16,14 +6,14 @@ use super::namespace::item_namespace; use rustc::hir::def_id::DefId; use rustc::ty::DefIdTree; -use llvm; -use llvm::debuginfo::{DIScope, DIBuilder, DIDescriptor, DIArray}; -use common::{CodegenCx}; +use crate::llvm; +use crate::llvm::debuginfo::{DIScope, DIBuilder, DIDescriptor, DIArray}; +use crate::common::{CodegenCx}; use rustc_codegen_ssa::traits::*; -use syntax_pos::{self, Span}; +use syntax_pos::Span; -pub fn is_node_local_to_unit(cx: &CodegenCx, def_id: DefId) -> bool +pub fn is_node_local_to_unit(cx: &CodegenCx<'_, '_>, def_id: DefId) -> bool { // The is_local_to_unit flag indicates whether a function is local to the // current compilation unit (i.e., if it is *static* in the C-sense). The @@ -46,8 +36,8 @@ pub fn create_DIArray( }; } -/// Return syntax_pos::Loc corresponding to the beginning of the span -pub fn span_start(cx: &CodegenCx, span: Span) -> syntax_pos::Loc { +/// Returns syntax_pos::Loc corresponding to the beginning of the span +pub fn span_start(cx: &CodegenCx<'_, '_>, span: Span) -> syntax_pos::Loc { cx.sess().source_map().lookup_char_pos(span.lo()) } diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index 2964f2e58470f..3febcb019ce29 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -1,12 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. //! Declare various LLVM values. //! //! Prefer using functions and methods from this module rather than calling LLVM @@ -20,18 +11,18 @@ //! * Use define_* family of methods when you might be defining the Value. //! * When in doubt, define. -use llvm; -use llvm::AttributePlace::Function; +use crate::llvm; +use crate::llvm::AttributePlace::Function; +use crate::abi::{FnType, FnTypeExt}; +use crate::attributes; +use crate::context::CodegenCx; +use crate::type_::Type; +use crate::value::Value; use rustc::ty::{self, PolyFnSig}; use rustc::ty::layout::LayoutOf; use rustc::session::config::Sanitizer; use rustc_data_structures::small_c_str::SmallCStr; -use abi::{FnType, FnTypeExt}; -use attributes; -use context::CodegenCx; -use type_::Type; use rustc_codegen_ssa::traits::*; -use value::Value; /// Declare a function. /// @@ -74,19 +65,8 @@ fn declare_raw_fn( } } - match cx.tcx.sess.opts.cg.opt_level.as_ref().map(String::as_ref) { - Some("s") => { - llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn); - }, - Some("z") => { - llvm::Attribute::MinSize.apply_llfn(Function, llfn); - llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn); - }, - _ => {}, - } - + attributes::default_optimisation_attrs(cx.tcx.sess, llfn); attributes::non_lazy_bind(cx.sess(), llfn); - llfn } diff --git a/src/librustc_codegen_llvm/diagnostics.rs b/src/librustc_codegen_llvm/diagnostics.rs index 94776f17c7989..872fa424e4cfb 100644 --- a/src/librustc_codegen_llvm/diagnostics.rs +++ b/src/librustc_codegen_llvm/diagnostics.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_snake_case)] register_long_diagnostics! { diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index e229f8d95cd5b..ceb08f943678b 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -1,37 +1,26 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_upper_case_globals)] -use attributes; -use intrinsics::{self, Intrinsic}; -use llvm; -use llvm_util; -use abi::{Abi, FnType, LlvmType, PassMode}; +use crate::attributes; +use crate::llvm; +use crate::llvm_util; +use crate::abi::{Abi, FnType, LlvmType, PassMode}; +use crate::context::CodegenCx; +use crate::type_::Type; +use crate::type_of::LayoutLlvmExt; +use crate::builder::Builder; +use crate::value::Value; +use crate::va_arg::emit_va_arg; use rustc_codegen_ssa::MemFlags; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::glue; use rustc_codegen_ssa::base::{to_immediate, wants_msvc_seh, compare_simd_types}; -use context::CodegenCx; -use type_::Type; -use type_of::LayoutLlvmExt; use rustc::ty::{self, Ty}; use rustc::ty::layout::{self, LayoutOf, HasTyCtxt, Primitive}; -use rustc_codegen_ssa::common::TypeKind; +use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; use rustc::hir; use syntax::ast::{self, FloatTy}; use syntax::symbol::Symbol; -use builder::Builder; -use value::Value; -use va_arg::emit_va_arg; use rustc_codegen_ssa::traits::*; @@ -39,7 +28,7 @@ use rustc::session::Session; use syntax_pos::Span; use std::cmp::Ordering; -use std::iter; +use std::{iter, i128, u128}; fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> { let llvm_name = match name { @@ -147,22 +136,18 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { let tp_ty = substs.type_at(0); self.const_usize(self.size_of(tp_ty).bytes()) } - func @ "va_start" | func @ "va_end" => { - let va_list = match (tcx.lang_items().va_list(), &result.layout.ty.sty) { - (Some(did), ty::Adt(def, _)) if def.did == did => args[0].immediate(), - (Some(_), _) => self.load(args[0].immediate(), - tcx.data_layout.pointer_align.abi), - (None, _) => bug!("va_list language item must be defined") - }; - let intrinsic = self.cx().get_intrinsic(&format!("llvm.{}", func)); - self.call(intrinsic, &[va_list], None) + "va_start" => { + self.va_start(args[0].immediate()) + } + "va_end" => { + self.va_end(args[0].immediate()) } "va_copy" => { let va_list = match (tcx.lang_items().va_list(), &result.layout.ty.sty) { (Some(did), ty::Adt(def, _)) if def.did == did => args[0].immediate(), (Some(_), _) => self.load(args[0].immediate(), tcx.data_layout.pointer_align.abi), - (None, _) => bug!("va_list language item must be defined") + (None, _) => bug!("`va_list` language item must be defined") }; let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy")); self.call(intrinsic, &[llresult, va_list], None); @@ -203,8 +188,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { "size_of_val" => { let tp_ty = substs.type_at(0); if let OperandValue::Pair(_, meta) = args[0].val { - let (llsize, _) = - glue::size_and_align_of_dst(self, tp_ty, Some(meta)); + let (llsize, _) = glue::size_and_align_of_dst(self, tp_ty, Some(meta)); llsize } else { self.const_usize(self.size_of(tp_ty).bytes()) @@ -217,8 +201,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { "min_align_of_val" => { let tp_ty = substs.type_at(0); if let OperandValue::Pair(_, meta) = args[0].val { - let (_, llalign) = - glue::size_and_align_of_dst(self, tp_ty, Some(meta)); + let (_, llalign) = glue::size_and_align_of_dst(self, tp_ty, Some(meta)); llalign } else { self.const_usize(self.align_of(tp_ty).bytes()) @@ -353,7 +336,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { "bitreverse" | "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" | "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" | - "rotate_left" | "rotate_right" => { + "rotate_left" | "rotate_right" | "saturating_add" | "saturating_sub" => { let ty = arg_tys[0]; match int_type_width_signed(ty, self) { Some((width, signed)) => @@ -479,6 +462,44 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { self.or(shift1, shift2) } }, + "saturating_add" | "saturating_sub" => { + let is_add = name == "saturating_add"; + let lhs = args[0].immediate(); + let rhs = args[1].immediate(); + if llvm_util::get_major_version() >= 8 { + let llvm_name = &format!("llvm.{}{}.sat.i{}", + if signed { 's' } else { 'u' }, + if is_add { "add" } else { "sub" }, + width); + let llfn = self.get_intrinsic(llvm_name); + self.call(llfn, &[lhs, rhs], None) + } else { + let llvm_name = &format!("llvm.{}{}.with.overflow.i{}", + if signed { 's' } else { 'u' }, + if is_add { "add" } else { "sub" }, + width); + let llfn = self.get_intrinsic(llvm_name); + let pair = self.call(llfn, &[lhs, rhs], None); + let val = self.extract_value(pair, 0); + let overflow = self.extract_value(pair, 1); + let llty = self.type_ix(width); + + let limit = if signed { + let limit_lo = self.const_uint_big( + llty, (i128::MIN >> (128 - width)) as u128); + let limit_hi = self.const_uint_big( + llty, (i128::MAX >> (128 - width)) as u128); + let neg = self.icmp( + IntPredicate::IntSLT, val, self.const_uint(llty, 0)); + self.select(neg, limit_hi, limit_lo) + } else if is_add { + self.const_uint_big(llty, u128::MAX >> (128 - width)) + } else { + self.const_uint(llty, 0) + }; + self.select(overflow, limit, val) + } + }, _ => bug!(), }, None => { @@ -492,8 +513,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { }, "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { - let sty = &arg_tys[0].sty; - match float_type_width(sty) { + match float_type_width(arg_tys[0]) { Some(_width) => match name { "fadd_fast" => self.fadd_fast(args[0].immediate(), args[1].immediate()), @@ -507,7 +527,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { span_invalid_monomorphization_error( tcx.sess, span, &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic float type, found `{}`", name, sty)); + expected basic float type, found `{}`", name, arg_tys[0])); return; } } @@ -668,142 +688,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { return; } - _ => { - let intr = match Intrinsic::find(&name) { - Some(intr) => intr, - None => bug!("unknown intrinsic '{}'", name), - }; - fn one(x: Vec) -> T { - assert_eq!(x.len(), 1); - x.into_iter().next().unwrap() - } - fn ty_to_type<'ll>( - cx: &CodegenCx<'ll, '_>, - t: &intrinsics::Type - ) -> Vec<&'ll Type> { - use intrinsics::Type::*; - match *t { - Void => vec![cx.type_void()], - Integer(_signed, _width, llvm_width) => { - vec![cx.type_ix( llvm_width as u64)] - } - Float(x) => { - match x { - 32 => vec![cx.type_f32()], - 64 => vec![cx.type_f64()], - _ => bug!() - } - } - Pointer(ref t, ref llvm_elem, _const) => { - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(cx, t)); - vec![cx.type_ptr_to(elem)] - } - Vector(ref t, ref llvm_elem, length) => { - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(cx, t)); - vec![cx.type_vector(elem, length as u64)] - } - Aggregate(false, ref contents) => { - let elems = contents.iter() - .map(|t| one(ty_to_type(cx, t))) - .collect::>(); - vec![cx.type_struct( &elems, false)] - } - Aggregate(true, ref contents) => { - contents.iter() - .flat_map(|t| ty_to_type(cx, t)) - .collect() - } - } - } - - // This allows an argument list like `foo, (bar, baz), - // qux` to be converted into `foo, bar, baz, qux`, integer - // arguments to be truncated as needed and pointers to be - // cast. - fn modify_as_needed<'ll, 'tcx>( - bx: &mut Builder<'_, 'll, 'tcx>, - t: &intrinsics::Type, - arg: &OperandRef<'tcx, &'ll Value>, - ) -> Vec<&'ll Value> { - match *t { - intrinsics::Type::Aggregate(true, ref contents) => { - // We found a tuple that needs squishing! So - // run over the tuple and load each field. - // - // This assumes the type is "simple", i.e., no - // destructors, and the contents are SIMD - // etc. - assert!(!bx.type_needs_drop(arg.layout.ty)); - let (ptr, align) = match arg.val { - OperandValue::Ref(ptr, None, align) => (ptr, align), - _ => bug!() - }; - let arg = PlaceRef::new_sized(ptr, arg.layout, align); - (0..contents.len()).map(|i| { - let field = arg.project_field(bx, i); - bx.load_operand(field).immediate() - }).collect() - } - intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { - let llvm_elem = one(ty_to_type(bx, llvm_elem)); - vec![bx.pointercast(arg.immediate(), bx.type_ptr_to(llvm_elem))] - } - intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { - let llvm_elem = one(ty_to_type(bx, llvm_elem)); - vec![ - bx.bitcast(arg.immediate(), - bx.type_vector(llvm_elem, length as u64)) - ] - } - intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { - // the LLVM intrinsic uses a smaller integer - // size than the C intrinsic's signature, so - // we have to trim it down here. - vec![bx.trunc(arg.immediate(), bx.type_ix(llvm_width as u64))] - } - _ => vec![arg.immediate()], - } - } - - - let inputs = intr.inputs.iter() - .flat_map(|t| ty_to_type(self, t)) - .collect::>(); - - let outputs = one(ty_to_type(self, &intr.output)); - - let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| { - modify_as_needed(self, t, arg) - }).collect(); - assert_eq!(inputs.len(), llargs.len()); - - let val = match intr.definition { - intrinsics::IntrinsicDef::Named(name) => { - let f = self.declare_cfn( - name, - self.type_func(&inputs, outputs), - ); - self.call(f, &llargs, None) - } - }; - - match *intr.output { - intrinsics::Type::Aggregate(flatten, ref elems) => { - // the output is a tuple so we need to munge it properly - assert!(!flatten); - - for i in 0..elems.len() { - let dest = result.project_field(self, i); - let val = self.extract_value(val, i as u64); - self.store(val, dest.llval, dest.align); - } - return; - } - _ => val, - } - } + _ => bug!("unknown intrinsic '{}'", name), }; if !fn_ty.ret.is_ignore() { @@ -832,6 +717,41 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { let expect = self.get_intrinsic(&"llvm.expect.i1"); self.call(expect, &[cond, self.const_bool(expected)], None) } + + fn va_start(&mut self, list: &'ll Value) -> &'ll Value { + let target = &self.cx.tcx.sess.target.target; + let arch = &target.arch; + // A pointer to the architecture specific structure is passed to this + // function. For pointer variants (i686, RISC-V, Windows, etc), we + // should do do nothing, as the address to the pointer is needed. For + // architectures with a architecture specific structure (`Aarch64`, + // `X86_64`, etc), this function should load the structure from the + // address provided. + let va_list = match &**arch { + _ if target.options.is_like_windows => list, + "aarch64" if target.target_os == "ios" => list, + "aarch64" | "x86_64" | "powerpc" => + self.load(list, self.tcx().data_layout.pointer_align.abi), + _ => list, + }; + let intrinsic = self.cx().get_intrinsic("llvm.va_start"); + self.call(intrinsic, &[va_list], None) + } + + fn va_end(&mut self, list: &'ll Value) -> &'ll Value { + let target = &self.cx.tcx.sess.target.target; + let arch = &target.arch; + // See the comment in `va_start` for the purpose of the following. + let va_list = match &**arch { + _ if target.options.is_like_windows => list, + "aarch64" if target.target_os == "ios" => list, + "aarch64" | "x86_64" | "powerpc" => + self.load(list, self.tcx().data_layout.pointer_align.abi), + _ => list, + }; + let intrinsic = self.cx().get_intrinsic("llvm.va_end"); + self.call(intrinsic, &[va_list], None) + } } fn copy_intrinsic( @@ -1313,10 +1233,56 @@ fn generic_simd_intrinsic( return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); } + if name == "simd_bitmask" { + // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a + // vector mask and returns an unsigned integer containing the most + // significant bit (MSB) of each lane. + use rustc_target::abi::HasDataLayout; + + // If the vector has less than 8 lanes, an u8 is returned with zeroed + // trailing bits. + let expected_int_bits = in_len.max(8); + match ret_ty.sty { + ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (), + _ => return_error!( + "bitmask `{}`, expected `u{}`", + ret_ty, expected_int_bits + ), + } + + // Integer vector : + let (i_xn, in_elem_bitwidth) = match in_elem.sty { + ty::Int(i) => ( + args[0].immediate(), + i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _) + ), + ty::Uint(i) => ( + args[0].immediate(), + i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _) + ), + _ => return_error!( + "vector argument `{}`'s element type `{}`, expected integer element type", + in_ty, in_elem + ), + }; + + // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position. + let shift_indices = vec![ + bx.cx.const_int(bx.type_ix(in_elem_bitwidth as _), (in_elem_bitwidth - 1) as _); in_len + ]; + let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice())); + // Truncate vector to an + let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len as _)); + // Bitcast to iN: + let i_ = bx.bitcast(i1xn, bx.type_ix(in_len as _)); + // Zero-extend iN to the bitmask type: + return Ok(bx.zext(i_, bx.type_ix(expected_int_bits as _))); + } + fn simd_simple_float_intrinsic( name: &str, - in_elem: &::rustc::ty::TyS, - in_ty: &::rustc::ty::TyS, + in_elem: &::rustc::ty::TyS<'_>, + in_ty: &::rustc::ty::TyS<'_>, in_len: usize, bx: &mut Builder<'a, 'll, 'tcx>, span: Span, @@ -1426,7 +1392,7 @@ fn generic_simd_intrinsic( // FIXME: use: // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81 - fn llvm_vector_str(elem_ty: ty::Ty, vec_len: usize, no_pointers: usize) -> String { + fn llvm_vector_str(elem_ty: ty::Ty<'_>, vec_len: usize, no_pointers: usize) -> String { let p0s: String = "p0".repeat(no_pointers); match elem_ty.sty { ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()), @@ -1436,7 +1402,7 @@ fn generic_simd_intrinsic( } } - fn llvm_vector_ty(cx: &CodegenCx<'ll, '_>, elem_ty: ty::Ty, vec_len: usize, + fn llvm_vector_ty(cx: &CodegenCx<'ll, '_>, elem_ty: ty::Ty<'_>, vec_len: usize, mut no_pointers: usize) -> &'ll Type { // FIXME: use cx.layout_of(ty).llvm_type() ? let mut elem_ty = match elem_ty.sty { @@ -1482,7 +1448,7 @@ fn generic_simd_intrinsic( in_ty, ret_ty); // This counts how many pointers - fn ptr_count(t: ty::Ty) -> usize { + fn ptr_count(t: ty::Ty<'_>) -> usize { match t.sty { ty::RawPtr(p) => 1 + ptr_count(p.ty), _ => 0, @@ -1490,7 +1456,7 @@ fn generic_simd_intrinsic( } // Non-ptr type - fn non_ptr(t: ty::Ty) -> ty::Ty { + fn non_ptr(t: ty::Ty<'_>) -> ty::Ty<'_> { match t.sty { ty::RawPtr(p) => non_ptr(p.ty), _ => t, @@ -1506,8 +1472,8 @@ fn generic_simd_intrinsic( require!(false, "expected element type `{}` of second argument `{}` \ to be a pointer to the element type `{}` of the first \ argument `{}`, found `{}` != `*_ {}`", - arg_tys[1].simd_type(tcx).sty, arg_tys[1], in_elem, in_ty, - arg_tys[1].simd_type(tcx).sty, in_elem); + arg_tys[1].simd_type(tcx), arg_tys[1], in_elem, in_ty, + arg_tys[1].simd_type(tcx), in_elem); unreachable!(); } }; @@ -1521,7 +1487,7 @@ fn generic_simd_intrinsic( _ => { require!(false, "expected element type `{}` of third argument `{}` \ to be a signed integer type", - arg_tys[2].simd_type(tcx).sty, arg_tys[2]); + arg_tys[2].simd_type(tcx), arg_tys[2]); } } @@ -1581,7 +1547,7 @@ fn generic_simd_intrinsic( arg_tys[2].simd_size(tcx)); // This counts how many pointers - fn ptr_count(t: ty::Ty) -> usize { + fn ptr_count(t: ty::Ty<'_>) -> usize { match t.sty { ty::RawPtr(p) => 1 + ptr_count(p.ty), _ => 0, @@ -1589,7 +1555,7 @@ fn generic_simd_intrinsic( } // Non-ptr type - fn non_ptr(t: ty::Ty) -> ty::Ty { + fn non_ptr(t: ty::Ty<'_>) -> ty::Ty<'_> { match t.sty { ty::RawPtr(p) => non_ptr(p.ty), _ => t, @@ -1606,8 +1572,8 @@ fn generic_simd_intrinsic( require!(false, "expected element type `{}` of second argument `{}` \ to be a pointer to the element type `{}` of the first \ argument `{}`, found `{}` != `*mut {}`", - arg_tys[1].simd_type(tcx).sty, arg_tys[1], in_elem, in_ty, - arg_tys[1].simd_type(tcx).sty, in_elem); + arg_tys[1].simd_type(tcx), arg_tys[1], in_elem, in_ty, + arg_tys[1].simd_type(tcx), in_elem); unreachable!(); } }; @@ -1621,7 +1587,7 @@ fn generic_simd_intrinsic( _ => { require!(false, "expected element type `{}` of third argument `{}` \ to be a signed integer type", - arg_tys[2].simd_type(tcx).sty, arg_tys[2]); + arg_tys[2].simd_type(tcx), arg_tys[2]); } } @@ -1912,7 +1878,52 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, simd_xor: Uint, Int => xor; simd_fmax: Float => maxnum; simd_fmin: Float => minnum; + } + + if name == "simd_saturating_add" || name == "simd_saturating_sub" { + let lhs = args[0].immediate(); + let rhs = args[1].immediate(); + let is_add = name == "simd_saturating_add"; + let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _; + let (signed, elem_width, elem_ty) = match in_elem.sty { + ty::Int(i) => + ( + true, + i.bit_width().unwrap_or(ptr_bits), + bx.cx.type_int_from_ty(i) + ), + ty::Uint(i) => + ( + false, + i.bit_width().unwrap_or(ptr_bits), + bx.cx.type_uint_from_ty(i) + ), + _ => { + return_error!( + "expected element type `{}` of vector type `{}` \ + to be a signed or unsigned integer type", + arg_tys[0].simd_type(tcx), arg_tys[0] + ); + } + }; + let llvm_intrinsic = &format!( + "llvm.{}{}.sat.v{}i{}", + if signed { 's' } else { 'u' }, + if is_add { "add" } else { "sub" }, + in_len, elem_width + ); + let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64); + + let f = bx.declare_cfn( + &llvm_intrinsic, + bx.type_func(&[vec_ty, vec_ty], vec_ty) + ); + llvm::SetUnnamedAddr(f, false); + let v = bx.call(f, &[lhs, rhs], None); + return Ok(v); + } + span_bug!(span, "unknown SIMD intrinsic"); } @@ -1920,7 +1931,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, // Returns None if the type is not an integer // FIXME: there’s multiple of this functions, investigate using some of the already existing // stuffs. -fn int_type_width_signed(ty: Ty, cx: &CodegenCx) -> Option<(u64, bool)> { +fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> { match ty.sty { ty::Int(t) => Some((match t { ast::IntTy::Isize => cx.tcx.sess.target.isize_ty.bit_width().unwrap() as u64, @@ -1942,10 +1953,10 @@ fn int_type_width_signed(ty: Ty, cx: &CodegenCx) -> Option<(u64, bool)> { } } -// Returns the width of a float TypeVariant +// Returns the width of a float Ty // Returns None if the type is not a float -fn float_type_width<'tcx>(sty: &ty::TyKind<'tcx>) -> Option { - match *sty { +fn float_type_width(ty: Ty<'_>) -> Option { + match ty.sty { ty::Float(t) => Some(t.bit_width() as u64), _ => None, } diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index ff06d3759bd94..0aae6b46e3def 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -1,22 +1,10 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The Rust compiler. //! //! # Note //! //! This API is completely unstable and subject to change. -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![feature(box_patterns)] #![feature(box_syntax)] @@ -27,33 +15,27 @@ #![allow(unused_attributes)] #![feature(libc)] #![feature(nll)] -#![feature(quote)] -#![feature(range_contains)] #![feature(rustc_diagnostic_macros)] -#![feature(slice_sort_by_cached_key)] #![feature(optin_builtin_traits)] #![feature(concat_idents)] #![feature(link_args)] #![feature(static_nobundle)] +#![feature(trusted_len)] +#![deny(rust_2018_idioms)] +#![allow(explicit_outlives_requirements)] -use back::write::create_target_machine; +use back::write::{create_target_machine, create_informational_target_machine}; use syntax_pos::symbol::Symbol; extern crate flate2; #[macro_use] extern crate bitflags; extern crate libc; #[macro_use] extern crate rustc; -extern crate jobserver; -extern crate num_cpus; extern crate rustc_mir; extern crate rustc_allocator; -extern crate rustc_apfloat; extern crate rustc_target; #[macro_use] extern crate rustc_data_structures; -extern crate rustc_demangle; extern crate rustc_incremental; -extern crate rustc_llvm; -extern crate rustc_platform_intrinsics as intrinsics; extern crate rustc_codegen_utils; extern crate rustc_codegen_ssa; extern crate rustc_fs_util; @@ -63,17 +45,14 @@ extern crate rustc_fs_util; extern crate syntax_pos; extern crate rustc_errors as errors; extern crate serialize; -extern crate cc; // Used to locate MSVC extern crate tempfile; -extern crate memmap; use rustc_codegen_ssa::traits::*; -use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig}; +use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig, FatLTOInput}; use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinModule}; use rustc_codegen_ssa::CompiledModule; use errors::{FatalError, Handler}; use rustc::dep_graph::WorkProduct; -use rustc::util::time_graph::Timeline; use syntax_pos::symbol::InternedString; use rustc::mir::mono::Stats; pub use llvm_util::target_features; @@ -83,11 +62,11 @@ use std::sync::{mpsc, Arc}; use rustc::dep_graph::DepGraph; use rustc::middle::allocator::AllocatorKind; use rustc::middle::cstore::{EncodedMetadata, MetadataLoader}; -use rustc::session::{Session, CompileIncomplete}; -use rustc::session::config::{OutputFilenames, OutputType, PrintRequest}; +use rustc::session::Session; +use rustc::session::config::{OutputFilenames, OutputType, PrintRequest, OptLevel}; use rustc::ty::{self, TyCtxt}; -use rustc::util::time_graph; use rustc::util::profiling::ProfileCategory; +use rustc::util::common::ErrorReported; use rustc_mir::monomorphize; use rustc_codegen_ssa::ModuleCodegen; use rustc_codegen_utils::codegen_backend::CodegenBackend; @@ -133,17 +112,23 @@ mod va_arg; pub struct LlvmCodegenBackend(()); impl ExtraBackendMethods for LlvmCodegenBackend { - fn new_metadata(&self, sess: &Session, mod_name: &str) -> ModuleLlvm { - ModuleLlvm::new(sess, mod_name) + fn new_metadata(&self, tcx: TyCtxt<'_, '_, '_>, mod_name: &str) -> ModuleLlvm { + ModuleLlvm::new_metadata(tcx, mod_name) } + fn write_metadata<'b, 'gcx>( &self, tcx: TyCtxt<'b, 'gcx, 'gcx>, - metadata: &ModuleLlvm + metadata: &mut ModuleLlvm ) -> EncodedMetadata { base::write_metadata(tcx, metadata) } - fn codegen_allocator(&self, tcx: TyCtxt, mods: &ModuleLlvm, kind: AllocatorKind) { + fn codegen_allocator<'b, 'gcx>( + &self, + tcx: TyCtxt<'b, 'gcx, 'gcx>, + mods: &mut ModuleLlvm, + kind: AllocatorKind + ) { unsafe { allocator::codegen(tcx, mods, kind) } } fn compile_codegen_unit<'a, 'tcx: 'a>( @@ -156,10 +141,11 @@ impl ExtraBackendMethods for LlvmCodegenBackend { fn target_machine_factory( &self, sess: &Session, + optlvl: OptLevel, find_features: bool ) -> Arc Result<&'static mut llvm::TargetMachine, String> + Send + Sync> { - back::write::target_machine_factory(sess, find_features) + back::write::target_machine_factory(sess, optlvl, find_features) } fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str { llvm_util::target_cpu(sess) @@ -178,49 +164,49 @@ impl WriteBackendMethods for LlvmCodegenBackend { } fn run_fat_lto( cgcx: &CodegenContext, - modules: Vec>, - timeline: &mut Timeline + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, ) -> Result, FatalError> { - back::lto::run_fat(cgcx, modules, timeline) + back::lto::run_fat(cgcx, modules, cached_modules) } fn run_thin_lto( cgcx: &CodegenContext, modules: Vec<(String, Self::ThinBuffer)>, cached_modules: Vec<(SerializedModule, WorkProduct)>, - timeline: &mut Timeline ) -> Result<(Vec>, Vec), FatalError> { - back::lto::run_thin(cgcx, modules, cached_modules, timeline) + back::lto::run_thin(cgcx, modules, cached_modules) } unsafe fn optimize( cgcx: &CodegenContext, diag_handler: &Handler, module: &ModuleCodegen, config: &ModuleConfig, - timeline: &mut Timeline ) -> Result<(), FatalError> { - back::write::optimize(cgcx, diag_handler, module, config, timeline) + back::write::optimize(cgcx, diag_handler, module, config) } unsafe fn optimize_thin( cgcx: &CodegenContext, thin: &mut ThinModule, - timeline: &mut Timeline ) -> Result, FatalError> { - back::lto::optimize_thin_module(thin, cgcx, timeline) + back::lto::optimize_thin_module(thin, cgcx) } unsafe fn codegen( cgcx: &CodegenContext, diag_handler: &Handler, module: ModuleCodegen, config: &ModuleConfig, - timeline: &mut Timeline ) -> Result { - back::write::codegen(cgcx, diag_handler, module, config, timeline) + back::write::codegen(cgcx, diag_handler, module, config) } fn prepare_thin( - cgcx: &CodegenContext, module: ModuleCodegen ) -> (String, Self::ThinBuffer) { - back::lto::prepare_thin(cgcx, module) + back::lto::prepare_thin(module) + } + fn serialize_module( + module: ModuleCodegen + ) -> (String, Self::ModuleBuffer) { + (module.name, back::lto::ModuleBuffer::new(module.module_llvm.llmod())) } fn run_lto_pass_manager( cgcx: &CodegenContext, @@ -293,14 +279,14 @@ impl CodegenBackend for LlvmCodegenBackend { box metadata::LlvmMetadataLoader } - fn provide(&self, providers: &mut ty::query::Providers) { + fn provide(&self, providers: &mut ty::query::Providers<'_>) { rustc_codegen_utils::symbol_names::provide(providers); rustc_codegen_ssa::back::symbol_export::provide(providers); rustc_codegen_ssa::base::provide_both(providers); attributes::provide(providers); } - fn provide_extern(&self, providers: &mut ty::query::Providers) { + fn provide_extern(&self, providers: &mut ty::query::Providers<'_>) { rustc_codegen_ssa::back::symbol_export::provide_extern(providers); rustc_codegen_ssa::base::provide_both(providers); attributes::provide_extern(providers); @@ -320,7 +306,7 @@ impl CodegenBackend for LlvmCodegenBackend { sess: &Session, dep_graph: &DepGraph, outputs: &OutputFilenames, - ) -> Result<(), CompileIncomplete>{ + ) -> Result<(), ErrorReported>{ use rustc::util::common::time; let (codegen_results, work_products) = ongoing_codegen.downcast:: @@ -344,12 +330,12 @@ impl CodegenBackend for LlvmCodegenBackend { // Run the linker on any artifacts that resulted from the LLVM run. // This should produce either a finished executable or library. - sess.profiler(|p| p.start_activity(ProfileCategory::Linking)); + sess.profiler(|p| p.start_activity(ProfileCategory::Linking, "link_crate")); time(sess, "linking", || { back::link::link_binary(sess, &codegen_results, outputs, &codegen_results.crate_name.as_str()); }); - sess.profiler(|p| p.end_activity(ProfileCategory::Linking)); + sess.profiler(|p| p.end_activity(ProfileCategory::Linking, "link_crate")); // Now that we won't touch anything in the incremental compilation directory // any more, we can finalize it (which involves renaming it) @@ -375,19 +361,55 @@ unsafe impl Send for ModuleLlvm { } unsafe impl Sync for ModuleLlvm { } impl ModuleLlvm { - fn new(sess: &Session, mod_name: &str) -> Self { + fn new(tcx: TyCtxt<'_, '_, '_>, mod_name: &str) -> Self { unsafe { - let llcx = llvm::LLVMRustContextCreate(sess.fewer_names()); - let llmod_raw = context::create_module(sess, llcx, mod_name) as *const _; + let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names()); + let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _; + ModuleLlvm { + llmod_raw, + llcx, + tm: create_target_machine(tcx, false), + } + } + } + fn new_metadata(tcx: TyCtxt<'_, '_, '_>, mod_name: &str) -> Self { + unsafe { + let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names()); + let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _; ModuleLlvm { llmod_raw, llcx, - tm: create_target_machine(sess, false), + tm: create_informational_target_machine(&tcx.sess, false), } } } + fn parse( + cgcx: &CodegenContext, + name: &str, + buffer: &back::lto::ModuleBuffer, + handler: &Handler, + ) -> Result { + unsafe { + let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); + let llmod_raw = buffer.parse(name, llcx, handler)?; + let tm = match (cgcx.tm_factory.0)() { + Ok(m) => m, + Err(e) => { + handler.struct_err(&e).emit(); + return Err(FatalError) + } + }; + + Ok(ModuleLlvm { + llmod_raw, + llcx, + tm, + }) + } + } + fn llmod(&self) -> &llvm::Module { unsafe { &*self.llmod_raw diff --git a/src/librustc_codegen_llvm/llvm/archive_ro.rs b/src/librustc_codegen_llvm/llvm/archive_ro.rs index d5c73fecf814a..0a8bb3250c5d5 100644 --- a/src/librustc_codegen_llvm/llvm/archive_ro.rs +++ b/src/librustc_codegen_llvm/llvm/archive_ro.rs @@ -1,13 +1,3 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A wrapper around LLVM's archive (.a) code use std::path::Path; @@ -46,7 +36,7 @@ impl ArchiveRO { }; } - pub fn iter(&self) -> Iter { + pub fn iter(&self) -> Iter<'_> { unsafe { Iter { raw: super::LLVMRustArchiveIteratorNew(self.raw), diff --git a/src/librustc_codegen_llvm/llvm/diagnostic.rs b/src/librustc_codegen_llvm/llvm/diagnostic.rs index b080c51c83a5b..04e65ac423300 100644 --- a/src/librustc_codegen_llvm/llvm/diagnostic.rs +++ b/src/librustc_codegen_llvm/llvm/diagnostic.rs @@ -1,20 +1,10 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! LLVM diagnostic reports. pub use self::OptimizationDiagnosticKind::*; pub use self::Diagnostic::*; use libc::c_uint; -use value::Value; +use crate::value::Value; use super::{DiagnosticInfo, Twine}; @@ -98,7 +88,7 @@ impl OptimizationDiagnostic<'ll> { pub struct InlineAsmDiagnostic<'ll> { pub cookie: c_uint, pub message: &'ll Twine, - pub instruction: &'ll Value, + pub instruction: Option<&'ll Value>, } impl InlineAsmDiagnostic<'ll> { @@ -117,7 +107,7 @@ impl InlineAsmDiagnostic<'ll> { InlineAsmDiagnostic { cookie, message: message.unwrap(), - instruction: instruction.unwrap(), + instruction, } } } diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index 4732db88ec1cb..76a8c855f8899 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -1,26 +1,14 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::debuginfo::{ DIBuilder, DIDescriptor, DIFile, DILexicalBlock, DISubprogram, DIType, DIBasicType, DIDerivedType, DICompositeType, DIScope, DIVariable, DIGlobalVariableExpression, DIArray, DISubrange, DITemplateTypeParameter, DIEnumerator, - DINameSpace, DIFlags, + DINameSpace, DIFlags, DISPFlags, DebugEmissionKind, }; use libc::{c_uint, c_int, size_t, c_char}; use libc::{c_ulonglong, c_void}; use std::marker::PhantomData; -use syntax; -use rustc_codegen_ssa; use super::RustString; @@ -55,6 +43,8 @@ pub enum CallConv { X86_64_Win64 = 79, X86_VectorCall = 80, X86_Intr = 83, + AvrNonBlockingInterrupt = 84, + AvrInterrupt = 85, AmdGpuKernel = 91, } @@ -125,6 +115,8 @@ pub enum Attribute { SanitizeAddress = 21, SanitizeMemory = 22, NonLazyBind = 23, + OptimizeNone = 24, + ReturnsTwice = 25, } /// LLVMIntPredicate @@ -598,7 +590,41 @@ pub mod debuginfo { const FlagIntroducedVirtual = (1 << 18); const FlagBitField = (1 << 19); const FlagNoReturn = (1 << 20); - const FlagMainSubprogram = (1 << 21); + } + } + + // These values **must** match with LLVMRustDISPFlags!! + bitflags! { + #[repr(C)] + #[derive(Default)] + pub struct DISPFlags: ::libc::uint32_t { + const SPFlagZero = 0; + const SPFlagVirtual = 1; + const SPFlagPureVirtual = 2; + const SPFlagLocalToUnit = (1 << 2); + const SPFlagDefinition = (1 << 3); + const SPFlagOptimized = (1 << 4); + const SPFlagMainSubprogram = (1 << 5); + } + } + + /// LLVMRustDebugEmissionKind + #[derive(Copy, Clone)] + #[repr(C)] + pub enum DebugEmissionKind { + NoDebug, + FullDebug, + LineTablesOnly, + } + + impl DebugEmissionKind { + pub fn from_generic(kind: rustc::session::config::DebugInfo) -> Self { + use rustc::session::config::DebugInfo; + match kind { + DebugInfo::None => DebugEmissionKind::NoDebug, + DebugInfo::Limited => DebugEmissionKind::LineTablesOnly, + DebugInfo::Full => DebugEmissionKind::FullDebug, + } } } } @@ -1259,7 +1285,7 @@ extern "C" { SingleThreaded: Bool) -> &'a Value; - pub fn LLVMRustBuildAtomicFence(B: &Builder, + pub fn LLVMRustBuildAtomicFence(B: &Builder<'_>, Order: AtomicOrdering, Scope: SynchronizationScope); @@ -1287,17 +1313,17 @@ extern "C" { pub fn LLVMPassManagerBuilderUseInlinerWithThreshold(PMB: &PassManagerBuilder, threshold: c_uint); pub fn LLVMPassManagerBuilderPopulateModulePassManager(PMB: &PassManagerBuilder, - PM: &PassManager); + PM: &PassManager<'_>); pub fn LLVMPassManagerBuilderPopulateFunctionPassManager(PMB: &PassManagerBuilder, - PM: &PassManager); + PM: &PassManager<'_>); pub fn LLVMPassManagerBuilderPopulateLTOPassManager(PMB: &PassManagerBuilder, - PM: &PassManager, + PM: &PassManager<'_>, Internalize: Bool, RunInliner: Bool); pub fn LLVMRustPassManagerBuilderPopulateThinLTOPassManager( PMB: &PassManagerBuilder, - PM: &PassManager); + PM: &PassManager<'_>); // Stuff that's in rustllvm/ because it's not upstream yet. @@ -1312,15 +1338,15 @@ extern "C" { pub fn LLVMGetSections(ObjFile: &'a ObjectFile) -> &'a mut SectionIterator<'a>; /// Destroys a section iterator. pub fn LLVMDisposeSectionIterator(SI: &'a mut SectionIterator<'a>); - /// Returns true if the section iterator is at the end of the section + /// Returns `true` if the section iterator is at the end of the section /// list: pub fn LLVMIsSectionIteratorAtEnd(ObjFile: &'a ObjectFile, SI: &SectionIterator<'a>) -> Bool; /// Moves the section iterator to point to the next section. - pub fn LLVMMoveToNextSection(SI: &SectionIterator); + pub fn LLVMMoveToNextSection(SI: &SectionIterator<'_>); /// Returns the current section size. - pub fn LLVMGetSectionSize(SI: &SectionIterator) -> c_ulonglong; + pub fn LLVMGetSectionSize(SI: &SectionIterator<'_>) -> c_ulonglong; /// Returns the current section contents as a string buffer. - pub fn LLVMGetSectionContents(SI: &SectionIterator) -> *const c_char; + pub fn LLVMGetSectionContents(SI: &SectionIterator<'_>) -> *const c_char; /// Reads the given file and returns it as a memory buffer. Use /// LLVMDisposeMemoryBuffer() to get rid of it. @@ -1368,7 +1394,7 @@ extern "C" { pub fn LLVMRustDIBuilderDispose(Builder: &'a mut DIBuilder<'a>); - pub fn LLVMRustDIBuilderFinalize(Builder: &DIBuilder); + pub fn LLVMRustDIBuilderFinalize(Builder: &DIBuilder<'_>); pub fn LLVMRustDIBuilderCreateCompileUnit(Builder: &DIBuilder<'a>, Lang: c_uint, @@ -1377,7 +1403,8 @@ extern "C" { isOptimized: bool, Flags: *const c_char, RuntimeVer: c_uint, - SplitName: *const c_char) + SplitName: *const c_char, + kind: DebugEmissionKind) -> &'a DIDescriptor; pub fn LLVMRustDIBuilderCreateFile(Builder: &DIBuilder<'a>, @@ -1397,11 +1424,9 @@ extern "C" { File: &'a DIFile, LineNo: c_uint, Ty: &'a DIType, - isLocalToUnit: bool, - isDefinition: bool, ScopeLine: c_uint, Flags: DIFlags, - isOptimized: bool, + SPFlags: DISPFlags, Fn: &'a Value, TParam: &'a DIArray, Decl: Option<&'a DIDescriptor>) @@ -1539,7 +1564,7 @@ extern "C" { AlignInBits: u32, Elements: &'a DIArray, ClassType: &'a DIType, - IsFixed: bool) + IsScoped: bool) -> &'a DIType; pub fn LLVMRustDIBuilderCreateUnionType(Builder: &DIBuilder<'a>, @@ -1601,21 +1626,18 @@ extern "C" { -> &'a Value; pub fn LLVMRustDIBuilderCreateOpDeref() -> i64; pub fn LLVMRustDIBuilderCreateOpPlusUconst() -> i64; -} -#[allow(improper_ctypes)] // FIXME(#52456) needed for RustString. -extern "C" { + #[allow(improper_ctypes)] pub fn LLVMRustWriteTypeToString(Type: &Type, s: &RustString); + #[allow(improper_ctypes)] pub fn LLVMRustWriteValueToString(value_ref: &Value, s: &RustString); -} -extern "C" { pub fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&Value>; pub fn LLVMIsAConstantFP(value_ref: &Value) -> Option<&Value>; pub fn LLVMRustPassKind(Pass: &Pass) -> PassKind; pub fn LLVMRustFindAndCreatePass(Pass: *const c_char) -> Option<&'static mut Pass>; - pub fn LLVMRustAddPass(PM: &PassManager, Pass: &'static mut Pass); + pub fn LLVMRustAddPass(PM: &PassManager<'_>, Pass: &'static mut Pass); pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool; @@ -1667,7 +1689,8 @@ extern "C" { Demangle: extern fn(*const c_char, size_t, *mut c_char, - size_t) -> size_t); + size_t) -> size_t, + ) -> LLVMRustResult; pub fn LLVMRustSetLLVMOptions(Argc: c_int, Argv: *const *const c_char); pub fn LLVMRustPrintPasses(); pub fn LLVMRustSetNormalizedTarget(M: &Module, triple: *const c_char); @@ -1680,28 +1703,22 @@ extern "C" { pub fn LLVMRustArchiveIteratorNext( AIR: &ArchiveIterator<'a>, ) -> Option<&'a mut ArchiveChild<'a>>; - pub fn LLVMRustArchiveChildName(ACR: &ArchiveChild, size: &mut size_t) -> *const c_char; - pub fn LLVMRustArchiveChildData(ACR: &ArchiveChild, size: &mut size_t) -> *const c_char; + pub fn LLVMRustArchiveChildName(ACR: &ArchiveChild<'_>, size: &mut size_t) -> *const c_char; + pub fn LLVMRustArchiveChildData(ACR: &ArchiveChild<'_>, size: &mut size_t) -> *const c_char; pub fn LLVMRustArchiveChildFree(ACR: &'a mut ArchiveChild<'a>); pub fn LLVMRustArchiveIteratorFree(AIR: &'a mut ArchiveIterator<'a>); pub fn LLVMRustDestroyArchive(AR: &'static mut Archive); - pub fn LLVMRustGetSectionName(SI: &SectionIterator, data: &mut *const c_char) -> size_t; -} + pub fn LLVMRustGetSectionName(SI: &SectionIterator<'_>, data: &mut *const c_char) -> size_t; -#[allow(improper_ctypes)] // FIXME(#52456) needed for RustString. -extern "C" { + #[allow(improper_ctypes)] pub fn LLVMRustWriteTwineToString(T: &Twine, s: &RustString); -} -extern "C" { pub fn LLVMContextSetDiagnosticHandler(C: &Context, Handler: DiagnosticHandler, DiagnosticContext: *mut c_void); -} -#[allow(improper_ctypes)] // FIXME(#52456) needed for RustString. -extern "C" { + #[allow(improper_ctypes)] pub fn LLVMRustUnpackOptimizationDiagnostic(DI: &'a DiagnosticInfo, pass_name_out: &RustString, function_out: &mut Option<&'a Value>, @@ -1709,37 +1726,26 @@ extern "C" { loc_column_out: &mut c_uint, loc_filename_out: &RustString, message_out: &RustString); -} -extern "C" { pub fn LLVMRustUnpackInlineAsmDiagnostic(DI: &'a DiagnosticInfo, cookie_out: &mut c_uint, message_out: &mut Option<&'a Twine>, instruction_out: &mut Option<&'a Value>); -} -#[allow(improper_ctypes)] // FIXME(#52456) needed for RustString. -extern "C" { + #[allow(improper_ctypes)] pub fn LLVMRustWriteDiagnosticInfoToString(DI: &DiagnosticInfo, s: &RustString); -} - -extern "C" { pub fn LLVMRustGetDiagInfoKind(DI: &DiagnosticInfo) -> DiagnosticKind; pub fn LLVMRustSetInlineAsmDiagnosticHandler(C: &Context, H: InlineAsmDiagHandler, CX: *mut c_void); -} -#[allow(improper_ctypes)] // FIXME(#52456) needed for RustString. -extern "C" { + #[allow(improper_ctypes)] pub fn LLVMRustWriteSMDiagnosticToString(d: &SMDiagnostic, s: &RustString); -} -extern "C" { pub fn LLVMRustWriteArchive(Dst: *const c_char, NumMembers: size_t, - Members: *const &RustArchiveMember, + Members: *const &RustArchiveMember<'_>, WriteSymbtab: bool, Kind: ArchiveKind) -> LLVMRustResult; @@ -1800,7 +1806,7 @@ extern "C" { CallbackPayload: *mut c_void, ); pub fn LLVMRustFreeThinLTOData(Data: &'static mut ThinLTOData); - pub fn LLVMRustParseBitcodeForThinLTO( + pub fn LLVMRustParseBitcodeForLTO( Context: &Context, Data: *const u8, len: usize, @@ -1812,7 +1818,7 @@ extern "C" { pub fn LLVMRustThinLTOPatchDICompileUnit(M: &Module, CU: *mut c_void); pub fn LLVMRustLinkerNew(M: &'a Module) -> &'a mut Linker<'a>; - pub fn LLVMRustLinkerAdd(linker: &Linker, + pub fn LLVMRustLinkerAdd(linker: &Linker<'_>, bytecode: *const c_char, bytecode_len: usize) -> bool; pub fn LLVMRustLinkerFree(linker: &'a mut Linker<'a>); diff --git a/src/librustc_codegen_llvm/llvm/mod.rs b/src/librustc_codegen_llvm/llvm/mod.rs index 3764c122dea29..543cc912930fd 100644 --- a/src/librustc_codegen_llvm/llvm/mod.rs +++ b/src/librustc_codegen_llvm/llvm/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] @@ -26,7 +16,7 @@ use std::string::FromUtf8Error; use std::slice; use std::ffi::CStr; use std::cell::RefCell; -use libc::{self, c_uint, c_char, size_t}; +use libc::{c_uint, c_char, size_t}; use rustc_data_structures::small_c_str::SmallCStr; pub mod archive_ro; diff --git a/src/librustc_codegen_llvm/llvm_util.rs b/src/librustc_codegen_llvm/llvm_util.rs index 82b1d7e8b40e4..5fea9c8747e0f 100644 --- a/src/librustc_codegen_llvm/llvm_util.rs +++ b/src/librustc_codegen_llvm/llvm_util.rs @@ -1,18 +1,9 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - +use crate::back::write::create_informational_target_machine; +use crate::llvm; use syntax_pos::symbol::Symbol; -use back::write::create_target_machine; -use llvm; use rustc::session::Session; use rustc::session::config::PrintRequest; +use rustc_target::spec::MergeFunctions; use libc::c_int; use std::ffi::CString; use syntax::feature_gate::UnstableFeatures; @@ -71,9 +62,20 @@ unsafe fn configure_llvm(sess: &Session) { add("-disable-preinline"); } if llvm::LLVMRustIsRustLLVM() { - add("-mergefunc-use-aliases"); + match sess.opts.debugging_opts.merge_functions + .unwrap_or(sess.target.target.options.merge_functions) { + MergeFunctions::Disabled | + MergeFunctions::Trampolines => {} + MergeFunctions::Aliases => { + add("-mergefunc-use-aliases"); + } + } } + // HACK(eddyb) LLVM inserts `llvm.assume` calls to preserve align attributes + // during inlining. Unfortunately these may block other optimizations. + add("-preserve-alignment-assumptions-during-inlining=false"); + for arg in &sess.opts.cg.llvm_args { add(&(*arg)); } @@ -98,9 +100,11 @@ const ARM_WHITELIST: &[(&str, Option<&str>)] = &[ ("dsp", Some("arm_target_feature")), ("neon", Some("arm_target_feature")), ("v5te", Some("arm_target_feature")), + ("v6", Some("arm_target_feature")), ("v6k", Some("arm_target_feature")), ("v6t2", Some("arm_target_feature")), ("v7", Some("arm_target_feature")), + ("v8", Some("arm_target_feature")), ("vfp2", Some("arm_target_feature")), ("vfp3", Some("arm_target_feature")), ("vfp4", Some("arm_target_feature")), @@ -145,6 +149,7 @@ const X86_WHITELIST: &[(&str, Option<&str>)] = &[ ("fxsr", None), ("lzcnt", None), ("mmx", Some("mmx_target_feature")), + ("movbe", Some("movbe_target_feature")), ("pclmulqdq", None), ("popcnt", None), ("rdrand", None), @@ -221,7 +226,7 @@ pub fn to_llvm_feature<'a>(sess: &Session, s: &'a str) -> &'a str { } pub fn target_features(sess: &Session) -> Vec { - let target_machine = create_target_machine(sess, true); + let target_machine = create_informational_target_machine(sess, true); target_feature_whitelist(sess) .iter() .filter_map(|&(feature, gate)| { @@ -274,7 +279,7 @@ pub fn print_passes() { pub(crate) fn print(req: PrintRequest, sess: &Session) { require_inited(); - let tm = create_target_machine(sess, true); + let tm = create_informational_target_machine(sess, true); unsafe { match req { PrintRequest::TargetCPUs => llvm::LLVMRustPrintTargetCPUs(tm), diff --git a/src/librustc_codegen_llvm/metadata.rs b/src/librustc_codegen_llvm/metadata.rs index 5605f64c2e72c..a2df687d58f5a 100644 --- a/src/librustc_codegen_llvm/metadata.rs +++ b/src/librustc_codegen_llvm/metadata.rs @@ -1,18 +1,8 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - +use crate::llvm; +use crate::llvm::{False, ObjectFile, mk_section_iter}; +use crate::llvm::archive_ro::ArchiveRO; use rustc::middle::cstore::MetadataLoader; use rustc_target::spec::Target; -use llvm; -use llvm::{False, ObjectFile, mk_section_iter}; -use llvm::archive_ro::ArchiveRO; use rustc_data_structures::owning_ref::OwningRef; use std::path::Path; diff --git a/src/librustc_codegen_llvm/mono_item.rs b/src/librustc_codegen_llvm/mono_item.rs index 9c69d7d8cf4ff..7f0cdb9f58008 100644 --- a/src/librustc_codegen_llvm/mono_item.rs +++ b/src/librustc_codegen_llvm/mono_item.rs @@ -1,19 +1,9 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use attributes; -use base; -use context::CodegenCx; -use llvm; -use monomorphize::Instance; -use type_of::LayoutLlvmExt; +use crate::attributes; +use crate::base; +use crate::context::CodegenCx; +use crate::llvm; +use crate::monomorphize::Instance; +use crate::type_of::LayoutLlvmExt; use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::mir::mono::{Linkage, Visibility}; use rustc::ty::TypeFoldable; @@ -46,10 +36,10 @@ impl PreDefineMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn predefine_fn(&self, - instance: Instance<'tcx>, - linkage: Linkage, - visibility: Visibility, - symbol_name: &str) { + instance: Instance<'tcx>, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str) { assert!(!instance.substs.needs_infer() && !instance.substs.has_param_types()); diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 313ab1f974fde..a5ed64a66a39d 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -1,46 +1,36 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_upper_case_globals)] -pub use llvm::Type; +pub use crate::llvm::Type; -use llvm; -use llvm::{Bool, False, True}; -use context::CodegenCx; +use crate::llvm; +use crate::llvm::{Bool, False, True}; +use crate::context::CodegenCx; +use crate::value::Value; use rustc_codegen_ssa::traits::*; -use value::Value; -use rustc::util::nodemap::FxHashMap; +use crate::common; +use crate::type_of::LayoutLlvmExt; +use crate::abi::{LlvmType, FnTypeExt}; +use syntax::ast; use rustc::ty::Ty; -use rustc::ty::layout::TyLayout; +use rustc::ty::layout::{self, Align, Size, TyLayout}; use rustc_target::abi::call::{CastTarget, FnType, Reg}; use rustc_data_structures::small_c_str::SmallCStr; -use common; use rustc_codegen_ssa::common::TypeKind; -use type_of::LayoutLlvmExt; -use abi::{LlvmType, FnTypeExt}; use std::fmt; -use std::cell::RefCell; +use std::ptr; use libc::c_uint; impl PartialEq for Type { fn eq(&self, other: &Self) -> bool { - self as *const _ == other as *const _ + ptr::eq(self, other) } } impl fmt::Debug for Type { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&llvm::build_string(|s| unsafe { llvm::LLVMRustWriteTypeToString(self, s); }).expect("non-UTF8 type description from LLVM")) @@ -61,21 +51,116 @@ impl CodegenCx<'ll, 'tcx> { els.len() as c_uint, packed as Bool) } } -} -impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { - fn type_void(&self) -> &'ll Type { + crate fn type_void(&self) -> &'ll Type { unsafe { llvm::LLVMVoidTypeInContext(self.llcx) } } - fn type_metadata(&self) -> &'ll Type { + crate fn type_metadata(&self) -> &'ll Type { unsafe { llvm::LLVMRustMetadataTypeInContext(self.llcx) } } + ///x Creates an integer type with the given number of bits, e.g., i24 + crate fn type_ix(&self, num_bits: u64) -> &'ll Type { + unsafe { + llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) + } + } + + crate fn type_x86_mmx(&self) -> &'ll Type { + unsafe { + llvm::LLVMX86MMXTypeInContext(self.llcx) + } + } + + crate fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { + unsafe { + llvm::LLVMVectorType(ty, len as c_uint) + } + } + + crate fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { + unsafe { + let n_args = llvm::LLVMCountParamTypes(ty) as usize; + let mut args = Vec::with_capacity(n_args); + llvm::LLVMGetParamTypes(ty, args.as_mut_ptr()); + args.set_len(n_args); + args + } + } + + crate fn type_bool(&self) -> &'ll Type { + self.type_i8() + } + + crate fn type_int_from_ty(&self, t: ast::IntTy) -> &'ll Type { + match t { + ast::IntTy::Isize => self.type_isize(), + ast::IntTy::I8 => self.type_i8(), + ast::IntTy::I16 => self.type_i16(), + ast::IntTy::I32 => self.type_i32(), + ast::IntTy::I64 => self.type_i64(), + ast::IntTy::I128 => self.type_i128(), + } + } + + crate fn type_uint_from_ty(&self, t: ast::UintTy) -> &'ll Type { + match t { + ast::UintTy::Usize => self.type_isize(), + ast::UintTy::U8 => self.type_i8(), + ast::UintTy::U16 => self.type_i16(), + ast::UintTy::U32 => self.type_i32(), + ast::UintTy::U64 => self.type_i64(), + ast::UintTy::U128 => self.type_i128(), + } + } + + crate fn type_float_from_ty(&self, t: ast::FloatTy) -> &'ll Type { + match t { + ast::FloatTy::F32 => self.type_f32(), + ast::FloatTy::F64 => self.type_f64(), + } + } + + crate fn type_pointee_for_align(&self, align: Align) -> &'ll Type { + // FIXME(eddyb) We could find a better approximation if ity.align < align. + let ity = layout::Integer::approximate_align(self, align); + self.type_from_integer(ity) + } + + /// Return a LLVM type that has at most the required alignment, + /// and exactly the required size, as a best-effort padding array. + crate fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type { + let unit = layout::Integer::approximate_align(self, align); + let size = size.bytes(); + let unit_size = unit.size().bytes(); + assert_eq!(size % unit_size, 0); + self.type_array(self.type_from_integer(unit), size / unit_size) + } + + crate fn type_variadic_func( + &self, + args: &[&'ll Type], + ret: &'ll Type + ) -> &'ll Type { + unsafe { + llvm::LLVMFunctionType(ret, args.as_ptr(), + args.len() as c_uint, True) + } + } + + crate fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type { + unsafe { + llvm::LLVMRustArrayType(ty, len) + } + } +} + +impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn type_i1(&self) -> &'ll Type { unsafe { llvm::LLVMInt1TypeInContext(self.llcx) @@ -91,7 +176,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn type_i16(&self) -> &'ll Type { unsafe { - llvm::LLVMInt16TypeInContext(self.llcx) } } @@ -114,12 +198,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn type_ix(&self, num_bits: u64) -> &'ll Type { - unsafe { - llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) - } - } - fn type_isize(&self) -> &'ll Type { self.isize_ty } @@ -136,12 +214,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn type_x86_mmx(&self) -> &'ll Type { - unsafe { - llvm::LLVMX86MMXTypeInContext(self.llcx) - } - } - fn type_func( &self, args: &[&'ll Type], @@ -153,17 +225,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn type_variadic_func( - &self, - args: &[&'ll Type], - ret: &'ll Type - ) -> &'ll Type { - unsafe { - llvm::LLVMFunctionType(ret, args.as_ptr(), - args.len() as c_uint, True) - } - } - fn type_struct( &self, els: &[&'ll Type], @@ -176,19 +237,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - - fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type { - unsafe { - llvm::LLVMRustArrayType(ty, len) - } - } - - fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { - unsafe { - llvm::LLVMVectorType(ty, len as c_uint) - } - } - fn type_kind(&self, ty: &'ll Type) -> TypeKind { unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() @@ -213,16 +261,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { - unsafe { - let n_args = llvm::LLVMCountParamTypes(ty) as usize; - let mut args = Vec::with_capacity(n_args); - llvm::LLVMGetParamTypes(ty, args.as_mut_ptr()); - args.set_len(n_args); - args - } - } - fn float_width(&self, ty: &'ll Type) -> usize { match self.type_kind(ty) { TypeKind::Float => 32, @@ -242,10 +280,6 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn val_ty(&self, v: &'ll Value) -> &'ll Type { common::val_ty(v) } - - fn scalar_lltypes(&self) -> &RefCell, Self::Type>> { - &self.scalar_lltypes - } } impl Type { @@ -304,9 +338,6 @@ impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type { ty.llvm_type(self) } - fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type { - ty.llvm_type(self) - } fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type { ty.ptr_to_llvm_type(self) } diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs index 52b560c662540..d42fa8291618c 100644 --- a/src/librustc_codegen_llvm/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -1,22 +1,12 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use abi::{FnType, FnTypeExt}; -use common::*; +use crate::abi::{FnType, FnTypeExt}; +use crate::common::*; +use crate::type_::Type; use rustc::hir; use rustc::ty::{self, Ty, TypeFoldable}; use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout}; use rustc_target::abi::FloatTy; use rustc_mir::monomorphize::item::DefPathBasedNames; use rustc_codegen_ssa::traits::*; -use type_::Type; use std::fmt::Write; @@ -65,12 +55,12 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, ty::Str => { let mut name = String::with_capacity(32); let printer = DefPathBasedNames::new(cx.tcx, true, true); - printer.push_type_name(layout.ty, &mut name); + printer.push_type_name(layout.ty, &mut name, false); if let (&ty::Adt(def, _), &layout::Variants::Single { index }) = (&layout.ty.sty, &layout.variants) { if def.is_enum() && !def.variants.is_empty() { - write!(&mut name, "::{}", def.variants[index].name).unwrap(); + write!(&mut name, "::{}", def.variants[index].ident).unwrap(); } } Some(name) @@ -236,7 +226,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } } - /// Get the LLVM type corresponding to a Rust type, i.e., `rustc::ty::Ty`. + /// Gets the LLVM type corresponding to a Rust type, i.e., `rustc::ty::Ty`. /// The pointee type of the pointer in `PlaceRef` is always this type. /// For sized types, it is also the right LLVM type for an `alloca` /// containing a value of that type, and most immediates (except `bool`). @@ -462,25 +452,27 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { _ => { let mut data_variant = match self.variants { - layout::Variants::NicheFilling { dataful_variant, .. } => { - // Only the niche itself is always initialized, - // so only check for a pointer at its offset. - // - // If the niche is a pointer, it's either valid - // (according to its type), or null (which the - // niche field's scalar validity range encodes). - // This allows using `dereferenceable_or_null` - // for e.g., `Option<&T>`, and this will continue - // to work as long as we don't start using more - // niches than just null (e.g., the first page - // of the address space, or unaligned pointers). - if self.fields.offset(0) == offset { - Some(self.for_variant(cx, dataful_variant)) - } else { - None - } - } - _ => Some(*self) + // Within the discriminant field, only the niche itself is + // always initialized, so we only check for a pointer at its + // offset. + // + // If the niche is a pointer, it's either valid (according + // to its type), or null (which the niche field's scalar + // validity range encodes). This allows using + // `dereferenceable_or_null` for e.g., `Option<&T>`, and + // this will continue to work as long as we don't start + // using more niches than just null (e.g., the first page of + // the address space, or unaligned pointers). + layout::Variants::Multiple { + discr_kind: layout::DiscriminantKind::Niche { + dataful_variant, + .. + }, + discr_index, + .. + } if self.fields.offset(discr_index) == offset => + Some(self.for_variant(cx, dataful_variant)), + _ => Some(*self), }; if let Some(variant) = data_variant { diff --git a/src/librustc_codegen_llvm/va_arg.rs b/src/librustc_codegen_llvm/va_arg.rs index 1e5bb03ddb12b..7fc17d17f99df 100644 --- a/src/librustc_codegen_llvm/va_arg.rs +++ b/src/librustc_codegen_llvm/va_arg.rs @@ -1,21 +1,11 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use builder::Builder; +use crate::builder::Builder; +use crate::type_::Type; +use crate::type_of::LayoutLlvmExt; +use crate::value::Value; use rustc_codegen_ssa::mir::operand::OperandRef; use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods}; use rustc::ty::layout::{Align, HasDataLayout, HasTyCtxt, LayoutOf, Size}; use rustc::ty::Ty; -use type_::Type; -use type_of::LayoutLlvmExt; -use value::Value; #[allow(dead_code)] fn round_pointer_up_to_alignment( @@ -118,13 +108,13 @@ pub(super) fn emit_va_arg( emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true) } - // Windows Aarch64 - ("aarch4", true) => { + // Windows AArch64 + ("aarch64", true) => { emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false) } - // iOS Aarch64 - ("aarch4", _) if target.target_os == "ios" => { + // iOS AArch64 + ("aarch64", _) if target.target_os == "ios" => { emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true) } diff --git a/src/librustc_codegen_llvm/value.rs b/src/librustc_codegen_llvm/value.rs index 4bf5b09baa629..eadbe754e8e41 100644 --- a/src/librustc_codegen_llvm/value.rs +++ b/src/librustc_codegen_llvm/value.rs @@ -1,23 +1,14 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +pub use crate::llvm::Value; -pub use llvm::Value; - -use llvm; +use crate::llvm; use std::fmt; use std::hash::{Hash, Hasher}; +use std::ptr; impl PartialEq for Value { fn eq(&self, other: &Self) -> bool { - self as *const _ == other as *const _ + ptr::eq(self, other) } } @@ -31,7 +22,7 @@ impl Hash for Value { impl fmt::Debug for Value { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&llvm::build_string(|s| unsafe { llvm::LLVMRustWriteValueToString(self, s); }).expect("non-UTF8 value description from LLVM")) diff --git a/src/librustc_codegen_ssa/Cargo.toml b/src/librustc_codegen_ssa/Cargo.toml index 50994497c2843..4702e34aa19e7 100644 --- a/src/librustc_codegen_ssa/Cargo.toml +++ b/src/librustc_codegen_ssa/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustc_codegen_ssa" version = "0.0.0" +edition = "2018" [lib] name = "rustc_codegen_ssa" @@ -18,6 +19,7 @@ memmap = "0.6" log = "0.4.5" libc = "0.2.44" jobserver = "0.1.11" +parking_lot = "0.7" serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } diff --git a/src/librustc_codegen_ssa/back/archive.rs b/src/librustc_codegen_ssa/back/archive.rs index b5e1deb0d5df3..0a16d1b03e2a1 100644 --- a/src/librustc_codegen_ssa/back/archive.rs +++ b/src/librustc_codegen_ssa/back/archive.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::session::Session; use std::path::PathBuf; diff --git a/src/librustc_codegen_ssa/back/command.rs b/src/librustc_codegen_ssa/back/command.rs index 9ebbdd7c3c936..78570cce57dd9 100644 --- a/src/librustc_codegen_ssa/back/command.rs +++ b/src/librustc_codegen_ssa/back/command.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A thin wrapper around `Command` in the standard library which allows us to //! read the arguments that are built up. @@ -169,7 +159,7 @@ impl Command { } impl fmt::Debug for Command { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.command().fmt(f) } } diff --git a/src/librustc_codegen_ssa/back/link.rs b/src/librustc_codegen_ssa/back/link.rs index 59102e09d4cbc..a0e2dcd646df8 100644 --- a/src/librustc_codegen_ssa/back/link.rs +++ b/src/librustc_codegen_ssa/back/link.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// For all the linkers we support, and information they might /// need out of the shared crate context before we get rid of it. @@ -19,7 +9,7 @@ use rustc_target::spec::LinkerFlavor; use rustc::hir::def_id::CrateNum; use super::command::Command; -use CrateInfo; +use crate::CrateInfo; use cc::windows_registry; use std::fs; @@ -159,19 +149,23 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { LinkerFlavor::Ld => "ld", LinkerFlavor::Msvc => "link.exe", LinkerFlavor::Lld(_) => "lld", + LinkerFlavor::PtxLinker => "rust-ptx-linker", }), flavor)), (Some(linker), None) => { - let stem = if linker.extension().and_then(|ext| ext.to_str()) == Some("exe") { - linker.file_stem().and_then(|stem| stem.to_str()) - } else { - linker.to_str() - }.unwrap_or_else(|| { - sess.fatal("couldn't extract file stem from specified linker"); - }).to_owned(); + let stem = linker + .file_stem() + .and_then(|stem| stem.to_str()) + .unwrap_or_else(|| { + sess.fatal("couldn't extract file stem from specified linker") + }); let flavor = if stem == "emcc" { LinkerFlavor::Em - } else if stem == "gcc" || stem.ends_with("-gcc") { + } else if stem == "gcc" + || stem.ends_with("-gcc") + || stem == "clang" + || stem.ends_with("-clang") + { LinkerFlavor::Gcc } else if stem == "ld" || stem == "ld.lld" || stem.ends_with("-ld") { LinkerFlavor::Ld diff --git a/src/librustc_codegen_ssa/back/linker.rs b/src/librustc_codegen_ssa/back/linker.rs index 4960c8922b9f9..c99fc17dd89a1 100644 --- a/src/librustc_codegen_ssa/back/linker.rs +++ b/src/librustc_codegen_ssa/back/linker.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::symbol_export; use super::command::Command; use super::archive; @@ -23,7 +13,7 @@ use rustc::hir::def_id::{LOCAL_CRATE, CrateNum}; use rustc::middle::dependency_format::Linkage; use rustc::session::Session; use rustc::session::config::{self, CrateType, OptLevel, DebugInfo, - CrossLangLto}; + LinkerPluginLto, Lto}; use rustc::ty::TyCtxt; use rustc_target::spec::{LinkerFlavor, LldFlavor}; use serialize::{json, Encoder}; @@ -35,7 +25,7 @@ pub struct LinkerInfo { } impl LinkerInfo { - pub fn new(tcx: TyCtxt) -> LinkerInfo { + pub fn new(tcx: TyCtxt<'_, '_, '_>) -> LinkerInfo { LinkerInfo { exports: tcx.sess.crate_types.borrow().iter().map(|&c| { (c, exported_symbols(tcx, c)) @@ -91,17 +81,17 @@ impl LinkerInfo { } LinkerFlavor::Lld(LldFlavor::Wasm) => { - Box::new(WasmLd { - cmd, - sess, - info: self - }) as Box + Box::new(WasmLd::new(cmd, sess, self)) as Box + } + + LinkerFlavor::PtxLinker => { + Box::new(PtxLinker { cmd, sess }) as Box } } } } -/// Linker abstraction used by back::link to build up the command to invoke a +/// Linker abstraction used by `back::link` to build up the command to invoke a /// linker. /// /// This trait is the total list of requirements needed by `back::link` and @@ -137,7 +127,7 @@ pub trait Linker { fn subsystem(&mut self, subsystem: &str); fn group_start(&mut self); fn group_end(&mut self); - fn cross_lang_lto(&mut self); + fn linker_plugin_lto(&mut self); // Should have been finalize(self), but we don't support self-by-value on trait objects (yet?). fn finalize(&mut self) -> Command; } @@ -155,7 +145,7 @@ pub struct GccLinker<'a> { impl<'a> GccLinker<'a> { /// Argument that must be passed *directly* to the linker /// - /// These arguments need to be prepended with '-Wl,' when a gcc-style linker is used + /// These arguments need to be prepended with `-Wl`, when a GCC-style linker is used. fn linker_arg(&mut self, arg: S) -> &mut Self where S: AsRef { @@ -170,7 +160,16 @@ impl<'a> GccLinker<'a> { } fn takes_hints(&self) -> bool { - !self.sess.target.target.options.is_like_osx + // Really this function only returns true if the underlying linker + // configured for a compiler is binutils `ld.bfd` and `ld.gold`. We + // don't really have a foolproof way to detect that, so rule out some + // platforms where currently this is guaranteed to *not* be the case: + // + // * On OSX they have their own linker, not binutils' + // * For WebAssembly the only functional linker is LLD, which doesn't + // support hint flags + !self.sess.target.target.options.is_like_osx && + self.sess.target.target.arch != "wasm32" } // Some platforms take hints about whether a library is static or dynamic. @@ -193,7 +192,7 @@ impl<'a> GccLinker<'a> { } } - fn push_cross_lang_lto_args(&mut self, plugin_path: Option<&OsStr>) { + fn push_linker_plugin_lto_args(&mut self, plugin_path: Option<&OsStr>) { if let Some(plugin_path) = plugin_path { let mut arg = OsString::from("-plugin="); arg.push(plugin_path); @@ -385,6 +384,13 @@ impl<'a> Linker for GccLinker<'a> { return } + // Symbol visibility takes care of this for the WebAssembly. + // Additionally the only known linker, LLD, doesn't support the script + // arguments just yet + if self.sess.target.target.arch == "wasm32" { + return; + } + let mut arg = OsString::new(); let path = tmpdir.join("list"); @@ -392,20 +398,19 @@ impl<'a> Linker for GccLinker<'a> { if self.sess.target.target.options.is_like_osx { // Write a plain, newline-separated list of symbols - let res = (|| -> io::Result<()> { + let res: io::Result<()> = try { let mut f = BufWriter::new(File::create(&path)?); for sym in self.info.exports[&crate_type].iter() { debug!(" _{}", sym); writeln!(f, "_{}", sym)?; } - Ok(()) - })(); + }; if let Err(e) = res { self.sess.fatal(&format!("failed to write lib.def file: {}", e)); } } else { // Write an LD version script - let res = (|| -> io::Result<()> { + let res: io::Result<()> = try { let mut f = BufWriter::new(File::create(&path)?); writeln!(f, "{{\n global:")?; for sym in self.info.exports[&crate_type].iter() { @@ -413,8 +418,7 @@ impl<'a> Linker for GccLinker<'a> { writeln!(f, " {};", sym)?; } writeln!(f, "\n local:\n *;\n}};")?; - Ok(()) - })(); + }; if let Err(e) = res { self.sess.fatal(&format!("failed to write version script: {}", e)); } @@ -453,27 +457,27 @@ impl<'a> Linker for GccLinker<'a> { } fn group_start(&mut self) { - if !self.sess.target.target.options.is_like_osx { + if self.takes_hints() { self.linker_arg("--start-group"); } } fn group_end(&mut self) { - if !self.sess.target.target.options.is_like_osx { + if self.takes_hints() { self.linker_arg("--end-group"); } } - fn cross_lang_lto(&mut self) { - match self.sess.opts.debugging_opts.cross_lang_lto { - CrossLangLto::Disabled => { + fn linker_plugin_lto(&mut self) { + match self.sess.opts.cg.linker_plugin_lto { + LinkerPluginLto::Disabled => { // Nothing to do } - CrossLangLto::LinkerPluginAuto => { - self.push_cross_lang_lto_args(None); + LinkerPluginLto::LinkerPluginAuto => { + self.push_linker_plugin_lto_args(None); } - CrossLangLto::LinkerPlugin(ref path) => { - self.push_cross_lang_lto_args(Some(path.as_os_str())); + LinkerPluginLto::LinkerPlugin(ref path) => { + self.push_linker_plugin_lto_args(Some(path.as_os_str())); } } } @@ -608,18 +612,6 @@ impl<'a> Linker for MsvcLinker<'a> { // This will cause the Microsoft linker to embed .natvis info into the PDB file let natvis_dir_path = self.sess.sysroot.join("lib\\rustlib\\etc"); if let Ok(natvis_dir) = fs::read_dir(&natvis_dir_path) { - // LLVM 5.0.0's lld-link frontend doesn't yet recognize, and chokes - // on, the /NATVIS:... flags. LLVM 6 (or earlier) should at worst ignore - // them, eventually mooting this workaround, per this landed patch: - // https://github.com/llvm-mirror/lld/commit/27b9c4285364d8d76bb43839daa100 - if let Some(ref linker_path) = self.sess.opts.cg.linker { - if let Some(linker_name) = Path::new(&linker_path).file_stem() { - if linker_name.to_str().unwrap().to_lowercase() == "lld-link" { - self.sess.warn("not embedding natvis: lld-link may not support the flag"); - return; - } - } - } for entry in natvis_dir { match entry { Ok(entry) => { @@ -654,7 +646,7 @@ impl<'a> Linker for MsvcLinker<'a> { tmpdir: &Path, crate_type: CrateType) { let path = tmpdir.join("lib.def"); - let res = (|| -> io::Result<()> { + let res: io::Result<()> = try { let mut f = BufWriter::new(File::create(&path)?); // Start off with the standard module name header and then go @@ -665,8 +657,7 @@ impl<'a> Linker for MsvcLinker<'a> { debug!(" _{}", symbol); writeln!(f, " {}", symbol)?; } - Ok(()) - })(); + }; if let Err(e) = res { self.sess.fatal(&format!("failed to write lib.def file: {}", e)); } @@ -707,7 +698,7 @@ impl<'a> Linker for MsvcLinker<'a> { fn group_start(&mut self) {} fn group_end(&mut self) {} - fn cross_lang_lto(&mut self) { + fn linker_plugin_lto(&mut self) { // Do nothing } } @@ -875,7 +866,7 @@ impl<'a> Linker for EmLinker<'a> { fn group_start(&mut self) {} fn group_end(&mut self) {} - fn cross_lang_lto(&mut self) { + fn linker_plugin_lto(&mut self) { // Do nothing } } @@ -886,6 +877,12 @@ pub struct WasmLd<'a> { info: &'a LinkerInfo, } +impl<'a> WasmLd<'a> { + fn new(cmd: Command, sess: &'a Session, info: &'a LinkerInfo) -> WasmLd<'a> { + WasmLd { cmd, sess, info } + } +} + impl<'a> Linker for WasmLd<'a> { fn link_dylib(&mut self, lib: &str) { self.cmd.arg("-l").arg(lib); @@ -977,6 +974,7 @@ impl<'a> Linker for WasmLd<'a> { } fn build_dylib(&mut self, _out_filename: &Path) { + self.cmd.arg("--no-entry"); } fn export_symbols(&mut self, _tmpdir: &Path, crate_type: CrateType) { @@ -992,61 +990,6 @@ impl<'a> Linker for WasmLd<'a> { } fn finalize(&mut self) -> Command { - // There have been reports in the wild (rustwasm/wasm-bindgen#119) of - // using threads causing weird hangs and bugs. Disable it entirely as - // this isn't yet the bottleneck of compilation at all anyway. - self.cmd.arg("--no-threads"); - - // By default LLD only gives us one page of stack (64k) which is a - // little small. Default to a larger stack closer to other PC platforms - // (1MB) and users can always inject their own link-args to override this. - self.cmd.arg("-z").arg("stack-size=1048576"); - - // By default LLD's memory layout is: - // - // 1. First, a blank page - // 2. Next, all static data - // 3. Finally, the main stack (which grows down) - // - // This has the unfortunate consequence that on stack overflows you - // corrupt static data and can cause some exceedingly weird bugs. To - // help detect this a little sooner we instead request that the stack is - // placed before static data. - // - // This means that we'll generate slightly larger binaries as references - // to static data will take more bytes in the ULEB128 encoding, but - // stack overflow will be guaranteed to trap as it underflows instead of - // corrupting static data. - self.cmd.arg("--stack-first"); - - // FIXME we probably shouldn't pass this but instead pass an explicit - // whitelist of symbols we'll allow to be undefined. Unfortunately - // though we can't handle symbols like `log10` that LLVM injects at a - // super late date without actually parsing object files. For now let's - // stick to this and hopefully fix it before stabilization happens. - self.cmd.arg("--allow-undefined"); - - // For now we just never have an entry symbol - self.cmd.arg("--no-entry"); - - // Make the default table accessible - self.cmd.arg("--export-table"); - - // Rust code should never have warnings, and warnings are often - // indicative of bugs, let's prevent them. - self.cmd.arg("--fatal-warnings"); - - // The symbol visibility story is a bit in flux right now with LLD. - // It's... not entirely clear to me what's going on, but this looks to - // make everything work when `export_symbols` isn't otherwise called for - // things like executables. - self.cmd.arg("--export-dynamic"); - - // LLD only implements C++-like demangling, which doesn't match our own - // mangling scheme. Tell LLD to not demangle anything and leave it up to - // us to demangle these symbols later. - self.cmd.arg("--no-demangle"); - ::std::mem::replace(&mut self.cmd, Command::new("")) } @@ -1054,12 +997,12 @@ impl<'a> Linker for WasmLd<'a> { fn group_start(&mut self) {} fn group_end(&mut self) {} - fn cross_lang_lto(&mut self) { + fn linker_plugin_lto(&mut self) { // Do nothing for now } } -fn exported_symbols(tcx: TyCtxt, crate_type: CrateType) -> Vec { +fn exported_symbols(tcx: TyCtxt<'_, '_, '_>, crate_type: CrateType) -> Vec { if let Some(ref exports) = tcx.sess.target.target.options.override_export_symbols { return exports.clone() } @@ -1091,3 +1034,129 @@ fn exported_symbols(tcx: TyCtxt, crate_type: CrateType) -> Vec { symbols } + +/// Much simplified and explicit CLI for the NVPTX linker. The linker operates +/// with bitcode and uses LLVM backend to generate a PTX assembly. +pub struct PtxLinker<'a> { + cmd: Command, + sess: &'a Session, +} + +impl<'a> Linker for PtxLinker<'a> { + fn link_rlib(&mut self, path: &Path) { + self.cmd.arg("--rlib").arg(path); + } + + fn link_whole_rlib(&mut self, path: &Path) { + self.cmd.arg("--rlib").arg(path); + } + + fn include_path(&mut self, path: &Path) { + self.cmd.arg("-L").arg(path); + } + + fn debuginfo(&mut self) { + self.cmd.arg("--debug"); + } + + fn add_object(&mut self, path: &Path) { + self.cmd.arg("--bitcode").arg(path); + } + + fn args(&mut self, args: &[String]) { + self.cmd.args(args); + } + + fn optimize(&mut self) { + match self.sess.lto() { + Lto::Thin | Lto::Fat | Lto::ThinLocal => { + self.cmd.arg("-Olto"); + }, + + Lto::No => { }, + }; + } + + fn output_filename(&mut self, path: &Path) { + self.cmd.arg("-o").arg(path); + } + + fn finalize(&mut self) -> Command { + // Provide the linker with fallback to internal `target-cpu`. + self.cmd.arg("--fallback-arch").arg(match self.sess.opts.cg.target_cpu { + Some(ref s) => s, + None => &self.sess.target.target.options.cpu + }); + + ::std::mem::replace(&mut self.cmd, Command::new("")) + } + + fn link_dylib(&mut self, _lib: &str) { + panic!("external dylibs not supported") + } + + fn link_rust_dylib(&mut self, _lib: &str, _path: &Path) { + panic!("external dylibs not supported") + } + + fn link_staticlib(&mut self, _lib: &str) { + panic!("staticlibs not supported") + } + + fn link_whole_staticlib(&mut self, _lib: &str, _search_path: &[PathBuf]) { + panic!("staticlibs not supported") + } + + fn framework_path(&mut self, _path: &Path) { + panic!("frameworks not supported") + } + + fn link_framework(&mut self, _framework: &str) { + panic!("frameworks not supported") + } + + fn position_independent_executable(&mut self) { + } + + fn full_relro(&mut self) { + } + + fn partial_relro(&mut self) { + } + + fn no_relro(&mut self) { + } + + fn build_static_executable(&mut self) { + } + + fn gc_sections(&mut self, _keep_metadata: bool) { + } + + fn pgo_gen(&mut self) { + } + + fn no_default_libraries(&mut self) { + } + + fn build_dylib(&mut self, _out_filename: &Path) { + } + + fn export_symbols(&mut self, _tmpdir: &Path, _crate_type: CrateType) { + } + + fn subsystem(&mut self, _subsystem: &str) { + } + + fn no_position_independent_executable(&mut self) { + } + + fn group_start(&mut self) { + } + + fn group_end(&mut self) { + } + + fn linker_plugin_lto(&mut self) { + } +} diff --git a/src/librustc_codegen_ssa/back/lto.rs b/src/librustc_codegen_ssa/back/lto.rs index 8d03edca004f6..47e5d9af33ba4 100644 --- a/src/librustc_codegen_ssa/back/lto.rs +++ b/src/librustc_codegen_ssa/back/lto.rs @@ -1,18 +1,7 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::write::CodegenContext; -use traits::*; -use ModuleCodegen; +use crate::traits::*; +use crate::ModuleCodegen; -use rustc::util::time_graph::Timeline; use rustc_errors::FatalError; use std::sync::Arc; @@ -77,7 +66,6 @@ impl LtoModuleCodegen { pub unsafe fn optimize( &mut self, cgcx: &CodegenContext, - timeline: &mut Timeline ) -> Result, FatalError> { match *self { LtoModuleCodegen::Fat { ref mut module, .. } => { @@ -85,11 +73,10 @@ impl LtoModuleCodegen { { let config = cgcx.config(module.kind); B::run_lto_pass_manager(cgcx, &module, config, false); - timeline.record("fat-done"); } Ok(module) } - LtoModuleCodegen::Thin(ref mut thin) => B::optimize_thin(cgcx, thin, timeline), + LtoModuleCodegen::Thin(ref mut thin) => B::optimize_thin(cgcx, thin), } } diff --git a/src/librustc_codegen_ssa/back/mod.rs b/src/librustc_codegen_ssa/back/mod.rs index 3d7ead74d1c5d..888108408fbe3 100644 --- a/src/librustc_codegen_ssa/back/mod.rs +++ b/src/librustc_codegen_ssa/back/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - pub mod write; pub mod linker; pub mod lto; diff --git a/src/librustc_codegen_ssa/back/symbol_export.rs b/src/librustc_codegen_ssa/back/symbol_export.rs index a17a00ddb29ff..336f41b784a81 100644 --- a/src/librustc_codegen_ssa/back/symbol_export.rs +++ b/src/librustc_codegen_ssa/back/symbol_export.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc_data_structures::sync::Lrc; use std::sync::Arc; @@ -21,7 +11,7 @@ use rustc::middle::exported_symbols::{SymbolExportLevel, ExportedSymbol, metadat use rustc::session::config; use rustc::ty::{TyCtxt, SymbolName}; use rustc::ty::query::Providers; -use rustc::ty::subst::Substs; +use rustc::ty::subst::SubstsRef; use rustc::util::nodemap::{FxHashMap, DefIdMap}; use rustc_allocator::ALLOCATOR_METHODS; use rustc_data_structures::indexed_vec::IndexVec; @@ -32,7 +22,7 @@ pub type ExportedSymbols = FxHashMap< Arc>, >; -pub fn threshold(tcx: TyCtxt) -> SymbolExportLevel { +pub fn threshold(tcx: TyCtxt<'_, '_, '_>) -> SymbolExportLevel { crates_export_threshold(&tcx.sess.crate_types.borrow()) } @@ -79,7 +69,7 @@ fn reachable_non_generics_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let mut reachable_non_generics: DefIdMap<_> = tcx.reachable_set(LOCAL_CRATE).0 .iter() - .filter_map(|&node_id| { + .filter_map(|&hir_id| { // We want to ignore some FFI functions that are not exposed from // this crate. Reachable FFI functions can be lumped into two // categories: @@ -93,9 +83,9 @@ fn reachable_non_generics_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // // As a result, if this id is an FFI item (foreign item) then we only // let it through if it's included statically. - match tcx.hir().get(node_id) { + match tcx.hir().get_by_hir_id(hir_id) { Node::ForeignItem(..) => { - let def_id = tcx.hir().local_def_id(node_id); + let def_id = tcx.hir().local_def_id_from_hir_id(hir_id); if tcx.is_statically_included_foreign_item(def_id) { Some(def_id) } else { @@ -115,7 +105,7 @@ fn reachable_non_generics_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, node: hir::ImplItemKind::Method(..), .. }) => { - let def_id = tcx.hir().local_def_id(node_id); + let def_id = tcx.hir().local_def_id_from_hir_id(hir_id); let generics = tcx.generics_of(def_id); if !generics.requires_monomorphization(tcx) && // Functions marked with #[inline] are only ever codegened @@ -157,14 +147,12 @@ fn reachable_non_generics_provider<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }) .collect(); - if let Some(id) = *tcx.sess.proc_macro_decls_static.get() { - let def_id = tcx.hir().local_def_id(id); - reachable_non_generics.insert(def_id, SymbolExportLevel::C); + if let Some(id) = tcx.proc_macro_decls_static(LOCAL_CRATE) { + reachable_non_generics.insert(id, SymbolExportLevel::C); } - if let Some(id) = *tcx.sess.plugin_registrar_fn.get() { - let def_id = tcx.hir().local_def_id(id); - reachable_non_generics.insert(def_id, SymbolExportLevel::C); + if let Some(id) = tcx.plugin_registrar_fn(LOCAL_CRATE) { + reachable_non_generics.insert(id, SymbolExportLevel::C); } Lrc::new(reachable_non_generics) @@ -206,7 +194,7 @@ fn exported_symbols_provider_local<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }) .collect(); - if tcx.sess.entry_fn.borrow().is_some() { + if tcx.entry_fn(LOCAL_CRATE).is_some() { let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new("main")); symbols.push((exported_symbol, SymbolExportLevel::C)); @@ -275,7 +263,7 @@ fn exported_symbols_provider_local<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def: InstanceDef::Item(def_id), substs, }) = mono_item { - if substs.types().next().is_some() { + if substs.non_erasable_generics().next().is_some() { symbols.push((ExportedSymbol::Generic(def_id, substs), SymbolExportLevel::Rust)); } @@ -294,7 +282,7 @@ fn exported_symbols_provider_local<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn upstream_monomorphizations_provider<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, cnum: CrateNum) - -> Lrc, CrateNum>>>> + -> Lrc, CrateNum>>>> { debug_assert!(cnum == LOCAL_CRATE); @@ -346,7 +334,7 @@ fn upstream_monomorphizations_provider<'a, 'tcx>( fn upstream_monomorphizations_for_provider<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) - -> Option, CrateNum>>> + -> Option, CrateNum>>> { debug_assert!(!def_id.is_local()); tcx.upstream_monomorphizations(LOCAL_CRATE) @@ -354,16 +342,16 @@ fn upstream_monomorphizations_for_provider<'a, 'tcx>( .cloned() } -fn is_unreachable_local_definition_provider(tcx: TyCtxt, def_id: DefId) -> bool { - if let Some(node_id) = tcx.hir().as_local_node_id(def_id) { - !tcx.reachable_set(LOCAL_CRATE).0.contains(&node_id) +fn is_unreachable_local_definition_provider(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> bool { + if let Some(hir_id) = tcx.hir().as_local_hir_id(def_id) { + !tcx.reachable_set(LOCAL_CRATE).0.contains(&hir_id) } else { bug!("is_unreachable_local_definition called with non-local DefId: {:?}", def_id) } } -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { providers.reachable_non_generics = reachable_non_generics_provider; providers.is_reachable_non_generic = is_reachable_non_generic_provider_local; providers.exported_symbols = exported_symbols_provider_local; @@ -371,12 +359,12 @@ pub fn provide(providers: &mut Providers) { providers.is_unreachable_local_definition = is_unreachable_local_definition_provider; } -pub fn provide_extern(providers: &mut Providers) { +pub fn provide_extern(providers: &mut Providers<'_>) { providers.is_reachable_non_generic = is_reachable_non_generic_provider_extern; providers.upstream_monomorphizations_for = upstream_monomorphizations_for_provider; } -fn symbol_export_level(tcx: TyCtxt, sym_def_id: DefId) -> SymbolExportLevel { +fn symbol_export_level(tcx: TyCtxt<'_, '_, '_>, sym_def_id: DefId) -> SymbolExportLevel { // We export anything that's not mangled at the "C" layer as it probably has // to do with ABI concerns. We do not, however, apply such treatment to // special symbols in the standard library for various plumbing between diff --git a/src/librustc_codegen_ssa/back/write.rs b/src/librustc_codegen_ssa/back/write.rs index ec790e6c6652a..fa8c4177eafe2 100644 --- a/src/librustc_codegen_ssa/back/write.rs +++ b/src/librustc_codegen_ssa/back/write.rs @@ -1,22 +1,12 @@ -// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use {ModuleCodegen, ModuleKind, CachedModuleCodegen, CompiledModule, CrateInfo, CodegenResults, - RLIB_BYTECODE_EXTENSION}; +use crate::{ModuleCodegen, ModuleKind, CachedModuleCodegen, CompiledModule, CrateInfo, + CodegenResults, RLIB_BYTECODE_EXTENSION}; use super::linker::LinkerInfo; use super::lto::{self, SerializedModule}; use super::link::{self, remove, get_linker}; use super::command::Command; use super::symbol_export::ExportedSymbols; -use memmap; +use crate::traits::*; use rustc_incremental::{copy_cgu_workproducts_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess}; use rustc::dep_graph::{WorkProduct, WorkProductId, WorkProductFileKind}; @@ -25,22 +15,24 @@ use rustc::middle::cstore::EncodedMetadata; use rustc::session::config::{self, OutputFilenames, OutputType, Passes, Sanitizer, Lto}; use rustc::session::Session; use rustc::util::nodemap::FxHashMap; -use rustc::util::time_graph::{self, TimeGraph, Timeline}; -use traits::*; use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; use rustc::ty::TyCtxt; use rustc::util::common::{time_depth, set_time_depth, print_time_passes_entry}; +use rustc::util::profiling::{ProfileCategory, SelfProfiler}; use rustc_fs_util::link_or_copy; use rustc_data_structures::svh::Svh; use rustc_errors::{Handler, Level, DiagnosticBuilder, FatalError, DiagnosticId}; use rustc_errors::emitter::{Emitter}; +use rustc_target::spec::MergeFunctions; use syntax::attr; use syntax::ext::hygiene::Mark; use syntax_pos::MultiSpan; use syntax_pos::symbol::Symbol; use jobserver::{Client, Acquired}; +use parking_lot::Mutex as PlMutex; use std::any::Any; +use std::borrow::Cow; use std::fs; use std::io; use std::mem; @@ -51,7 +43,7 @@ use std::sync::mpsc::{channel, Sender, Receiver}; use std::time::Instant; use std::thread; -const PRE_THIN_LTO_BC_EXT: &str = "pre-thin-lto.bc"; +const PRE_LTO_BC_EXT: &str = "pre-lto.bc"; /// Module-specific configuration for `optimize_and_codegen`. pub struct ModuleConfig { @@ -68,7 +60,7 @@ pub struct ModuleConfig { pub pgo_use: String, // Flags indicating which outputs to produce. - pub emit_pre_thin_lto_bc: bool, + pub emit_pre_lto_bc: bool, pub emit_no_opt_bc: bool, pub emit_bc: bool, pub emit_bc_compressed: bool, @@ -106,7 +98,7 @@ impl ModuleConfig { pgo_use: String::new(), emit_no_opt_bc: false, - emit_pre_thin_lto_bc: false, + emit_pre_lto_bc: false, emit_bc: false, emit_bc_compressed: false, emit_lto_bc: false, @@ -133,10 +125,10 @@ impl ModuleConfig { self.verify_llvm_ir = sess.verify_llvm_ir(); self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes; self.no_builtins = no_builtins || sess.target.target.options.no_builtins; - self.time_passes = sess.time_passes(); + self.time_passes = sess.time_extended(); self.inline_threshold = sess.opts.cg.inline_threshold; self.obj_is_bitcode = sess.target.target.options.obj_is_bitcode || - sess.opts.debugging_opts.cross_lang_lto.enabled(); + sess.opts.cg.linker_plugin_lto.enabled(); let embed_bitcode = sess.target.target.options.embed_bitcode || sess.opts.debugging_opts.embed_bitcode; if embed_bitcode { @@ -162,8 +154,24 @@ impl ModuleConfig { sess.opts.optimize == config::OptLevel::Aggressive && !sess.target.target.options.is_like_emscripten; - self.merge_functions = sess.opts.optimize == config::OptLevel::Default || - sess.opts.optimize == config::OptLevel::Aggressive; + // Some targets (namely, NVPTX) interact badly with the MergeFunctions + // pass. This is because MergeFunctions can generate new function calls + // which may interfere with the target calling convention; e.g. for the + // NVPTX target, PTX kernels should not call other PTX kernels. + // MergeFunctions can also be configured to generate aliases instead, + // but aliases are not supported by some backends (again, NVPTX). + // Therefore, allow targets to opt out of the MergeFunctions pass, + // but otherwise keep the pass enabled (at O2 and O3) since it can be + // useful for reducing code size. + self.merge_functions = match sess.opts.debugging_opts.merge_functions + .unwrap_or(sess.target.target.options.merge_functions) { + MergeFunctions::Disabled => false, + MergeFunctions::Trampolines | + MergeFunctions::Aliases => { + sess.opts.optimize == config::OptLevel::Default || + sess.opts.optimize == config::OptLevel::Aggressive + } + }; } pub fn bitcode_needed(&self) -> bool { @@ -189,12 +197,47 @@ impl Clone for TargetMachineFactory { } } +pub struct ProfileGenericActivityTimer { + profiler: Option>>, + category: ProfileCategory, + label: Cow<'static, str>, +} + +impl ProfileGenericActivityTimer { + pub fn start( + profiler: Option>>, + category: ProfileCategory, + label: Cow<'static, str>, + ) -> ProfileGenericActivityTimer { + if let Some(profiler) = &profiler { + let mut p = profiler.lock(); + p.start_activity(category, label.clone()); + } + + ProfileGenericActivityTimer { + profiler, + category, + label, + } + } +} + +impl Drop for ProfileGenericActivityTimer { + fn drop(&mut self) { + if let Some(profiler) = &self.profiler { + let mut p = profiler.lock(); + p.end_activity(self.category, self.label.clone()); + } + } +} + /// Additional resources used by optimize_and_codegen (not module specific) #[derive(Clone)] pub struct CodegenContext { // Resources needed when running LTO pub backend: B, pub time_passes: bool, + pub profiler: Option>>, pub lto: Lto, pub no_landing_pads: bool, pub save_temps: bool, @@ -229,16 +272,13 @@ pub struct CodegenContext { pub cgu_reuse_tracker: CguReuseTracker, // Channel back to the main control thread to send messages to pub coordinator_send: Sender>, - // A reference to the TimeGraph so we can register timings. None means that - // measuring is disabled. - pub time_graph: Option, // The assembler command if no_integrated_as option is enabled, None otherwise pub assembler_cmd: Option> } impl CodegenContext { pub fn create_diag_handler(&self) -> Handler { - Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) + Handler::with_emitter(true, None, Box::new(self.diag_emitter.clone())) } pub fn config(&self, kind: ModuleKind) -> &ModuleConfig { @@ -248,33 +288,60 @@ impl CodegenContext { ModuleKind::Allocator => &self.allocator_module_config, } } + + #[inline(never)] + #[cold] + fn profiler_active ()>(&self, f: F) { + match &self.profiler { + None => bug!("profiler_active() called but there was no profiler active"), + Some(profiler) => { + let mut p = profiler.lock(); + + f(&mut p); + } + } + } + + #[inline(always)] + pub fn profile ()>(&self, f: F) { + if unlikely!(self.profiler.is_some()) { + self.profiler_active(f) + } + } + + pub fn profile_activity( + &self, + category: ProfileCategory, + label: impl Into>, + ) -> ProfileGenericActivityTimer { + ProfileGenericActivityTimer::start(self.profiler.clone(), category, label.into()) + } } fn generate_lto_work( cgcx: &CodegenContext, - needs_fat_lto: Vec>, + needs_fat_lto: Vec>, needs_thin_lto: Vec<(String, B::ThinBuffer)>, import_only_modules: Vec<(SerializedModule, WorkProduct)> ) -> Vec<(WorkItem, u64)> { - let mut timeline = cgcx.time_graph.as_ref().map(|tg| { - tg.start(CODEGEN_WORKER_TIMELINE, - CODEGEN_WORK_PACKAGE_KIND, - "generate lto") - }).unwrap_or(Timeline::noop()); + cgcx.profile(|p| p.start_activity(ProfileCategory::Linking, "codegen_run_lto")); let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() { assert!(needs_thin_lto.is_empty()); - assert!(import_only_modules.is_empty()); - let lto_module = B::run_fat_lto(cgcx, needs_fat_lto, &mut timeline) - .unwrap_or_else(|e| e.raise()); + let lto_module = B::run_fat_lto( + cgcx, + needs_fat_lto, + import_only_modules, + ) + .unwrap_or_else(|e| e.raise()); (vec![lto_module], vec![]) } else { assert!(needs_fat_lto.is_empty()); - B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules, &mut timeline) + B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules) .unwrap_or_else(|e| e.raise()) }; - lto_modules.into_iter().map(|module| { + let result = lto_modules.into_iter().map(|module| { let cost = module.cost(); (WorkItem::LTO(module), cost) }).chain(copy_jobs.into_iter().map(|wp| { @@ -282,7 +349,11 @@ fn generate_lto_work( name: wp.cgu_name.clone(), source: wp, }), 0) - })).collect() + })).collect(); + + cgcx.profile(|p| p.end_activity(ProfileCategory::Linking, "codegen_run_lto")); + + result } pub struct CompiledModules { @@ -296,14 +367,14 @@ fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { sess.opts.output_types.contains_key(&OutputType::Exe) } -fn need_pre_thin_lto_bitcode_for_incr_comp(sess: &Session) -> bool { +fn need_pre_lto_bitcode_for_incr_comp(sess: &Session) -> bool { if sess.opts.incremental.is_none() { return false } match sess.lto() { - Lto::Fat | Lto::No => false, + Lto::Fat | Lto::Thin | Lto::ThinLocal => true, } @@ -311,8 +382,7 @@ fn need_pre_thin_lto_bitcode_for_incr_comp(sess: &Session) -> bool { pub fn start_async_codegen( backend: B, - tcx: TyCtxt, - time_graph: Option, + tcx: TyCtxt<'_, '_, '_>, metadata: EncodedMetadata, coordinator_receive: Receiver>, total_cgus: usize @@ -369,7 +439,7 @@ pub fn start_async_codegen( // Save all versions of the bytecode if we're saving our temporaries. if sess.opts.cg.save_temps { modules_config.emit_no_opt_bc = true; - modules_config.emit_pre_thin_lto_bc = true; + modules_config.emit_pre_lto_bc = true; modules_config.emit_bc = true; modules_config.emit_lto_bc = true; metadata_config.emit_bc = true; @@ -384,8 +454,8 @@ pub fn start_async_codegen( allocator_config.emit_bc_compressed = true; } - modules_config.emit_pre_thin_lto_bc = - need_pre_thin_lto_bitcode_for_incr_comp(sess); + modules_config.emit_pre_lto_bc = + need_pre_lto_bitcode_for_incr_comp(sess); modules_config.no_integrated_as = tcx.sess.opts.cg.no_integrated_as || tcx.sess.target.target.options.no_integrated_as; @@ -436,7 +506,6 @@ pub fn start_async_codegen( coordinator_receive, total_cgus, sess.jobserver.clone(), - time_graph.clone(), Arc::new(modules_config), Arc::new(metadata_config), Arc::new(allocator_config)); @@ -450,7 +519,6 @@ pub fn start_async_codegen( linker_info, crate_info, - time_graph, coordinator_send: tcx.tx_to_llvm_workers.lock().clone(), codegen_worker_receive, shared_emitter_main, @@ -656,7 +724,7 @@ pub enum WorkItem { /// Copy the post-LTO artifacts from the incremental cache to the output /// directory. CopyPostLtoArtifacts(CachedModuleCodegen), - /// Perform (Thin)LTO on the given module. + /// Performs (Thin)LTO on the given module. LTO(lto::LtoModuleCodegen), } @@ -680,26 +748,33 @@ impl WorkItem { enum WorkItemResult { Compiled(CompiledModule), - NeedsFatLTO(ModuleCodegen), + NeedsFatLTO(FatLTOInput), NeedsThinLTO(String, B::ThinBuffer), } +pub enum FatLTOInput { + Serialized { + name: String, + buffer: B::ModuleBuffer, + }, + InMemory(ModuleCodegen), +} + fn execute_work_item( cgcx: &CodegenContext, work_item: WorkItem, - timeline: &mut Timeline ) -> Result, FatalError> { let module_config = cgcx.config(work_item.module_kind()); match work_item { WorkItem::Optimize(module) => { - execute_optimize_work_item(cgcx, module, module_config, timeline) + execute_optimize_work_item(cgcx, module, module_config) } WorkItem::CopyPostLtoArtifacts(module) => { - execute_copy_from_cache_work_item(cgcx, module, module_config, timeline) + execute_copy_from_cache_work_item(cgcx, module, module_config) } WorkItem::LTO(module) => { - execute_lto_work_item(cgcx, module, module_config, timeline) + execute_lto_work_item(cgcx, module, module_config) } } } @@ -715,12 +790,11 @@ fn execute_optimize_work_item( cgcx: &CodegenContext, module: ModuleCodegen, module_config: &ModuleConfig, - timeline: &mut Timeline ) -> Result, FatalError> { let diag_handler = cgcx.create_diag_handler(); unsafe { - B::optimize(cgcx, &diag_handler, &module, module_config, timeline)?; + B::optimize(cgcx, &diag_handler, &module, module_config)?; } // After we've done the initial round of optimizations we need to @@ -731,7 +805,7 @@ fn execute_optimize_work_item( // If the linker does LTO, we don't have to do it. Note that we // keep doing full LTO, if it is requested, as not to break the // assumption that the output will be a single module. - let linker_does_lto = cgcx.opts.debugging_opts.cross_lang_lto.enabled(); + let linker_does_lto = cgcx.opts.cg.linker_plugin_lto.enabled(); // When we're automatically doing ThinLTO for multi-codegen-unit // builds we don't actually want to LTO the allocator modules if @@ -765,18 +839,47 @@ fn execute_optimize_work_item( } }; + // If we're doing some form of incremental LTO then we need to be sure to + // save our module to disk first. + let bitcode = if cgcx.config(module.kind).emit_pre_lto_bc { + let filename = pre_lto_bitcode_filename(&module.name); + cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename)) + } else { + None + }; + Ok(match lto_type { ComputedLtoType::No => { let module = unsafe { - B::codegen(cgcx, &diag_handler, module, module_config, timeline)? + B::codegen(cgcx, &diag_handler, module, module_config)? }; WorkItemResult::Compiled(module) } ComputedLtoType::Thin => { - let (name, thin_buffer) = B::prepare_thin(cgcx, module); + let (name, thin_buffer) = B::prepare_thin(module); + if let Some(path) = bitcode { + fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| { + panic!("Error writing pre-lto-bitcode file `{}`: {}", + path.display(), + e); + }); + } WorkItemResult::NeedsThinLTO(name, thin_buffer) } - ComputedLtoType::Fat => WorkItemResult::NeedsFatLTO(module), + ComputedLtoType::Fat => { + match bitcode { + Some(path) => { + let (name, buffer) = B::serialize_module(module); + fs::write(&path, buffer.data()).unwrap_or_else(|e| { + panic!("Error writing pre-lto-bitcode file `{}`: {}", + path.display(), + e); + }); + WorkItemResult::NeedsFatLTO(FatLTOInput::Serialized { name, buffer }) + } + None => WorkItemResult::NeedsFatLTO(FatLTOInput::InMemory(module)), + } + } }) } @@ -784,7 +887,6 @@ fn execute_copy_from_cache_work_item( cgcx: &CodegenContext, module: CachedModuleCodegen, module_config: &ModuleConfig, - _: &mut Timeline ) -> Result, FatalError> { let incr_comp_session_dir = cgcx.incr_comp_session_dir .as_ref() @@ -846,13 +948,12 @@ fn execute_lto_work_item( cgcx: &CodegenContext, mut module: lto::LtoModuleCodegen, module_config: &ModuleConfig, - timeline: &mut Timeline ) -> Result, FatalError> { let diag_handler = cgcx.create_diag_handler(); unsafe { - let module = module.optimize(cgcx, timeline)?; - let module = B::codegen(cgcx, &diag_handler, module, module_config, timeline)?; + let module = module.optimize(cgcx)?; + let module = B::codegen(cgcx, &diag_handler, module, module_config)?; Ok(WorkItemResult::Compiled(module)) } } @@ -860,7 +961,7 @@ fn execute_lto_work_item( pub enum Message { Token(io::Result), NeedsFatLTO { - result: ModuleCodegen, + result: FatLTOInput, worker_id: usize, }, NeedsThinLTO { @@ -900,14 +1001,13 @@ enum MainThreadWorkerState { fn start_executing_work( backend: B, - tcx: TyCtxt, + tcx: TyCtxt<'_, '_, '_>, crate_info: &CrateInfo, shared_emitter: SharedEmitter, codegen_worker_send: Sender>, coordinator_receive: Receiver>, total_cgus: usize, jobserver: Client, - time_graph: Option, modules_config: Arc, metadata_config: Arc, allocator_config: Arc @@ -975,6 +1075,13 @@ fn start_executing_work( None }; + let ol = if tcx.sess.opts.debugging_opts.no_codegen + || !tcx.sess.opts.output_types.should_codegen() { + // If we know that we won’t be doing codegen, create target machines without optimisation. + config::OptLevel::No + } else { + tcx.backend_optimization_level(LOCAL_CRATE) + }; let cgcx = CodegenContext:: { backend: backend.clone(), crate_types: sess.crate_types.borrow().clone(), @@ -984,7 +1091,8 @@ fn start_executing_work( fewer_names: sess.fewer_names(), save_temps: sess.opts.cg.save_temps, opts: Arc::new(sess.opts.clone()), - time_passes: sess.time_passes(), + time_passes: sess.time_extended(), + profiler: sess.self_profiling.clone(), exported_symbols, plugin_passes: sess.plugin_llvm_passes.borrow().clone(), remark: sess.opts.cg.remark.clone(), @@ -993,12 +1101,11 @@ fn start_executing_work( cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(), coordinator_send, diag_emitter: shared_emitter.clone(), - time_graph, output_filenames: tcx.output_filenames(LOCAL_CRATE), regular_module_config: modules_config, metadata_module_config: metadata_config, allocator_module_config: allocator_config, - tm_factory: TargetMachineFactory(backend.target_machine_factory(tcx.sess, false)), + tm_factory: TargetMachineFactory(backend.target_machine_factory(tcx.sess, ol, false)), total_cgus, msvc_imps_needed: msvc_imps_needed(tcx), target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(), @@ -1498,12 +1605,6 @@ fn start_executing_work( } pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX; -pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId = - time_graph::TimelineId(CODEGEN_WORKER_ID); -pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = - time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); -const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = - time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]); fn spawn_work( cgcx: CodegenContext, @@ -1553,13 +1654,12 @@ fn spawn_work( // as a diagnostic was already sent off to the main thread - just // surface that there was an error in this worker. bomb.result = { - let timeline = cgcx.time_graph.as_ref().map(|tg| { - tg.start(time_graph::TimelineId(cgcx.worker), - LLVM_WORK_PACKAGE_KIND, - &work.name()) - }); - let mut timeline = timeline.unwrap_or(Timeline::noop()); - execute_work_item(&cgcx, work, &mut timeline).ok() + let label = work.name(); + cgcx.profile(|p| p.start_activity(ProfileCategory::Codegen, label.clone())); + let result = execute_work_item(&cgcx, work).ok(); + cgcx.profile(|p| p.end_activity(ProfileCategory::Codegen, label)); + + result }; }); } @@ -1635,7 +1735,7 @@ impl SharedEmitter { } impl Emitter for SharedEmitter { - fn emit(&mut self, db: &DiagnosticBuilder) { + fn emit(&mut self, db: &DiagnosticBuilder<'_>) { drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { msg: db.message(), code: db.code.clone(), @@ -1713,7 +1813,6 @@ pub struct OngoingCodegen { pub windows_subsystem: Option, pub linker_info: LinkerInfo, pub crate_info: CrateInfo, - pub time_graph: Option, pub coordinator_send: Sender>, pub codegen_worker_receive: Receiver>, pub shared_emitter_main: SharedEmitterMain, @@ -1742,10 +1841,6 @@ impl OngoingCodegen { sess.abort_if_errors(); - if let Some(time_graph) = self.time_graph { - time_graph.dump(&format!("{}-timings", self.crate_name)); - } - let work_products = copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules); @@ -1774,7 +1869,7 @@ impl OngoingCodegen { } pub fn submit_pre_codegened_module_to_llvm(&self, - tcx: TyCtxt, + tcx: TyCtxt<'_, '_, '_>, module: ModuleCodegen) { self.wait_for_signal_to_codegen_item(); self.check_for_errors(tcx.sess); @@ -1784,13 +1879,13 @@ impl OngoingCodegen { submit_codegened_module_to_llvm(&self.backend, tcx, module, cost); } - pub fn codegen_finished(&self, tcx: TyCtxt) { + pub fn codegen_finished(&self, tcx: TyCtxt<'_, '_, '_>) { self.wait_for_signal_to_codegen_item(); self.check_for_errors(tcx.sess); drop(self.coordinator_send.send(Box::new(Message::CodegenComplete::))); } - /// Consume this context indicating that codegen was entirely aborted, and + /// Consumes this context indicating that codegen was entirely aborted, and /// we need to exit as quickly as possible. /// /// This method blocks the current thread until all worker threads have @@ -1823,7 +1918,7 @@ impl OngoingCodegen { pub fn submit_codegened_module_to_llvm( _backend: &B, - tcx: TyCtxt, + tcx: TyCtxt<'_, '_, '_>, module: ModuleCodegen, cost: u64 ) { @@ -1836,7 +1931,7 @@ pub fn submit_codegened_module_to_llvm( pub fn submit_post_lto_module_to_llvm( _backend: &B, - tcx: TyCtxt, + tcx: TyCtxt<'_, '_, '_>, module: CachedModuleCodegen ) { let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module); @@ -1848,7 +1943,7 @@ pub fn submit_post_lto_module_to_llvm( pub fn submit_pre_lto_module_to_llvm( _backend: &B, - tcx: TyCtxt, + tcx: TyCtxt<'_, '_, '_>, module: CachedModuleCodegen ) { let filename = pre_lto_bitcode_filename(&module.name); @@ -1870,13 +1965,13 @@ pub fn submit_pre_lto_module_to_llvm( } pub fn pre_lto_bitcode_filename(module_name: &str) -> String { - format!("{}.{}", module_name, PRE_THIN_LTO_BC_EXT) + format!("{}.{}", module_name, PRE_LTO_BC_EXT) } -fn msvc_imps_needed(tcx: TyCtxt) -> bool { +fn msvc_imps_needed(tcx: TyCtxt<'_, '_, '_>) -> bool { // This should never be true (because it's not supported). If it is true, // something is wrong with commandline arg validation. - assert!(!(tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && + assert!(!(tcx.sess.opts.cg.linker_plugin_lto.enabled() && tcx.sess.target.target.options.is_like_msvc && tcx.sess.opts.cg.prefer_dynamic)); @@ -1884,6 +1979,6 @@ fn msvc_imps_needed(tcx: TyCtxt) -> bool { tcx.sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateType::Rlib) && // ThinLTO can't handle this workaround in all cases, so we don't // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing - // dynamic linking when cross-language LTO is enabled. - !tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() + // dynamic linking when linker plugin LTO is enabled. + !tcx.sess.opts.cg.linker_plugin_lto.enabled() } diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs index 8a5b8bd2babbb..27e3e30669905 100644 --- a/src/librustc_codegen_ssa/base.rs +++ b/src/librustc_codegen_ssa/base.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Codegen the completed AST to the LLVM IR. //! //! Some functions here, such as codegen_block and codegen_expr, return a value -- @@ -17,16 +7,16 @@ //! //! Hopefully useful general knowledge about codegen: //! -//! * There's no way to find out the Ty type of a Value. Doing so -//! would be "trying to get the eggs out of an omelette" (credit: -//! pcwalton). You can, instead, find out its llvm::Type by calling val_ty, -//! but one llvm::Type corresponds to many `Ty`s; for instance, tup(int, int, -//! int) and rec(x=int, y=int, z=int) will have the same llvm::Type. +//! * There's no way to find out the `Ty` type of a Value. Doing so +//! would be "trying to get the eggs out of an omelette" (credit: +//! pcwalton). You can, instead, find out its `llvm::Type` by calling `val_ty`, +//! but one `llvm::Type` corresponds to many `Ty`s; for instance, `tup(int, int, +//! int)` and `rec(x=int, y=int, z=int)` will have the same `llvm::Type`. -use {ModuleCodegen, ModuleKind, CachedModuleCodegen}; +use crate::{ModuleCodegen, ModuleKind, CachedModuleCodegen}; use rustc::dep_graph::cgu_reuse_tracker::CguReuse; -use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; +use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::middle::lang_items::StartFnLangItem; use rustc::middle::weak_lang_items; use rustc::mir::mono::{Stats, CodegenUnitNameBuilder}; @@ -38,26 +28,25 @@ use rustc::util::common::{time, print_time_passes_entry}; use rustc::util::profiling::ProfileCategory; use rustc::session::config::{self, EntryFnType, Lto}; use rustc::session::Session; -use mir::place::PlaceRef; -use back::write::{OngoingCodegen, start_async_codegen, submit_pre_lto_module_to_llvm, - submit_post_lto_module_to_llvm}; -use {MemFlags, CrateInfo}; -use callee; use rustc_mir::monomorphize::item::DefPathBasedNames; -use common::{RealPredicate, TypeKind, IntPredicate}; -use meth; -use mir; -use rustc::util::time_graph; use rustc_mir::monomorphize::Instance; use rustc_mir::monomorphize::partitioning::{CodegenUnit, CodegenUnitExt}; -use mono_item::MonoItem; use rustc::util::nodemap::FxHashMap; use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::sync::Lrc; use rustc_codegen_utils::{symbol_names_test, check_for_rustc_errors_attr}; use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; +use crate::mir::place::PlaceRef; +use crate::back::write::{OngoingCodegen, start_async_codegen, submit_pre_lto_module_to_llvm, + submit_post_lto_module_to_llvm}; +use crate::{MemFlags, CrateInfo}; +use crate::callee; +use crate::common::{RealPredicate, TypeKind, IntPredicate}; +use crate::meth; +use crate::mir; +use crate::mono_item::MonoItem; -use traits::*; +use crate::traits::*; use std::any::Any; use std::cmp; @@ -68,7 +57,7 @@ use syntax_pos::Span; use syntax::attr; use rustc::hir; -use mir::operand::OperandValue; +use crate::mir::operand::OperandValue; use std::marker::PhantomData; @@ -166,7 +155,7 @@ pub fn compare_simd_types<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( bx.sext(cmp, ret_ty) } -/// Retrieve the information we are losing (making dynamic) in an unsizing +/// Retrieves the information we are losing (making dynamic) in an unsizing /// adjustment. /// /// The `old_info` argument is a bit funny. It is intended for use @@ -357,7 +346,7 @@ fn cast_shift_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( } } -/// Returns whether this session's target will use SEH-based unwinding. +/// Returns `true` if this session's target will use SEH-based unwinding. /// /// This is only true for MSVC targets, and even then the 64-bit MSVC target /// currently uses SEH-ish unwinding with DWARF info tables to the side (same as @@ -380,7 +369,7 @@ pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( pub fn to_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( bx: &mut Bx, val: Bx::Value, - layout: layout::TyLayout, + layout: layout::TyLayout<'_>, ) -> Bx::Value { if let layout::Abi::Scalar(ref scalar) = layout.abi { return to_immediate_scalar(bx, val, scalar); @@ -446,15 +435,13 @@ pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( mir::codegen_mir::(cx, lldecl, &mir, instance, sig); } -/// Create the `main` function which will initialize the rust runtime and call +/// Creates the `main` function which will initialize the rust runtime and call /// users main function. pub fn maybe_create_entry_wrapper<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( cx: &'a Bx::CodegenCx ) { - let (main_def_id, span) = match *cx.sess().entry_fn.borrow() { - Some((id, span, _)) => { - (cx.tcx().hir().local_def_id(id), span) - } + let (main_def_id, span) = match cx.tcx().entry_fn(LOCAL_CRATE) { + Some((def_id, _)) => { (def_id, cx.tcx().def_span(def_id)) }, None => return, }; @@ -468,7 +455,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( let main_llfn = cx.get_fn(instance); - let et = cx.sess().entry_fn.get().map(|e| e.2); + let et = cx.tcx().entry_fn(LOCAL_CRATE).map(|e| e.1); match et { Some(EntryFnType::Main) => create_entry_fn::(cx, span, main_llfn, main_def_id, true), Some(EntryFnType::Start) => create_entry_fn::(cx, span, main_llfn, main_def_id, false), @@ -514,8 +501,8 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( bx.insert_reference_to_gdb_debug_scripts_section_global(); // Params from native main() used as args for rust start function - let param_argc = cx.get_param(llfn, 0); - let param_argv = cx.get_param(llfn, 1); + let param_argc = bx.get_param(0); + let param_argv = bx.get_param(1); let arg_argc = bx.intcast(param_argc, cx.type_isize(), true); let arg_argv = param_argv; @@ -540,11 +527,6 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( } pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX; -pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId = - time_graph::TimelineId(CODEGEN_WORKER_ID); -pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = - time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); - pub fn codegen_crate( backend: B, @@ -557,17 +539,17 @@ pub fn codegen_crate( let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx); // Codegen the metadata. - tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen)); + tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen, "codegen crate metadata")); let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")).as_str() .to_string(); - let metadata_llvm_module = backend.new_metadata(tcx.sess, &metadata_cgu_name); + let mut metadata_llvm_module = backend.new_metadata(tcx, &metadata_cgu_name); let metadata = time(tcx.sess, "write metadata", || { - backend.write_metadata(tcx, &metadata_llvm_module) + backend.write_metadata(tcx, &mut metadata_llvm_module) }); - tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen)); + tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen, "codegen crate metadata")); let metadata_module = ModuleCodegen { name: metadata_cgu_name, @@ -575,19 +557,12 @@ pub fn codegen_crate( kind: ModuleKind::Metadata, }; - let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph { - Some(time_graph::TimeGraph::new()) - } else { - None - }; - // Skip crate items and just output metadata in -Z no-codegen mode. if tcx.sess.opts.debugging_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() { let ongoing_codegen = start_async_codegen( backend, tcx, - time_graph, metadata, rx, 1); @@ -621,7 +596,6 @@ pub fn codegen_crate( let ongoing_codegen = start_async_codegen( backend.clone(), tcx, - time_graph.clone(), metadata, rx, codegen_units.len()); @@ -648,9 +622,9 @@ pub fn codegen_crate( &["crate"], Some("allocator")).as_str() .to_string(); - let modules = backend.new_metadata(tcx.sess, &llmod_id); + let mut modules = backend.new_metadata(tcx, &llmod_id); time(tcx.sess, "write allocator module", || { - backend.codegen_allocator(tcx, &modules, kind) + backend.codegen_allocator(tcx, &mut modules, kind) }); Some(ModuleCodegen { @@ -688,15 +662,14 @@ pub fn codegen_crate( match cgu_reuse { CguReuse::No => { - let _timing_guard = time_graph.as_ref().map(|time_graph| { - time_graph.start(CODEGEN_WORKER_TIMELINE, - CODEGEN_WORK_PACKAGE_KIND, - &format!("codegen {}", cgu.name())) - }); + tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen, + format!("codegen {}", cgu.name()))); let start_time = Instant::now(); let stats = backend.compile_codegen_unit(tcx, *cgu.name()); all_stats.extend(stats); total_codegen_time += start_time.elapsed(); + tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen, + format!("codegen {}", cgu.name()))); false } CguReuse::PreLto => { @@ -814,7 +787,7 @@ fn assert_and_save_dep_graph<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>) { } impl CrateInfo { - pub fn new(tcx: TyCtxt) -> CrateInfo { + pub fn new(tcx: TyCtxt<'_, '_, '_>) -> CrateInfo { let mut info = CrateInfo { panic_runtime: None, compiler_builtins: None, @@ -828,21 +801,11 @@ impl CrateInfo { used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic), used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic), used_crate_source: Default::default(), - wasm_imports: Default::default(), lang_item_to_crate: Default::default(), missing_lang_items: Default::default(), }; let lang_items = tcx.lang_items(); - let load_wasm_items = tcx.sess.crate_types.borrow() - .iter() - .any(|c| *c != config::CrateType::Rlib) && - tcx.sess.opts.target_triple.triple() == "wasm32-unknown-unknown"; - - if load_wasm_items { - info.load_wasm_imports(tcx, LOCAL_CRATE); - } - let crates = tcx.crates(); let n_crates = crates.len(); @@ -870,9 +833,6 @@ impl CrateInfo { if tcx.is_no_builtins(cnum) { info.is_no_builtins.insert(cnum); } - if load_wasm_items { - info.load_wasm_imports(tcx, cnum); - } let missing = tcx.missing_lang_items(cnum); for &item in missing.iter() { if let Ok(id) = lang_items.require(item) { @@ -891,24 +851,48 @@ impl CrateInfo { return info } - - fn load_wasm_imports(&mut self, tcx: TyCtxt, cnum: CrateNum) { - self.wasm_imports.extend(tcx.wasm_import_module_map(cnum).iter().map(|(&id, module)| { - let instance = Instance::mono(tcx, id); - let import_name = tcx.symbol_name(instance); - - (import_name.to_string(), module.clone()) - })); - } } -fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool { +fn is_codegened_item(tcx: TyCtxt<'_, '_, '_>, id: DefId) -> bool { let (all_mono_items, _) = tcx.collect_and_partition_mono_items(LOCAL_CRATE); all_mono_items.contains(&id) } -pub fn provide_both(providers: &mut Providers) { +pub fn provide_both(providers: &mut Providers<'_>) { + providers.backend_optimization_level = |tcx, cratenum| { + let for_speed = match tcx.sess.opts.optimize { + // If globally no optimisation is done, #[optimize] has no effect. + // + // This is done because if we ended up "upgrading" to `-O2` here, we’d populate the + // pass manager and it is likely that some module-wide passes (such as inliner or + // cross-function constant propagation) would ignore the `optnone` annotation we put + // on the functions, thus necessarily involving these functions into optimisations. + config::OptLevel::No => return config::OptLevel::No, + // If globally optimise-speed is already specified, just use that level. + config::OptLevel::Less => return config::OptLevel::Less, + config::OptLevel::Default => return config::OptLevel::Default, + config::OptLevel::Aggressive => return config::OptLevel::Aggressive, + // If globally optimize-for-size has been requested, use -O2 instead (if optimize(size) + // are present). + config::OptLevel::Size => config::OptLevel::Default, + config::OptLevel::SizeMin => config::OptLevel::Default, + }; + + let (defids, _) = tcx.collect_and_partition_mono_items(cratenum); + for id in &*defids { + let hir::CodegenFnAttrs { optimize, .. } = tcx.codegen_fn_attrs(*id); + match optimize { + attr::OptimizeAttr::None => continue, + attr::OptimizeAttr::Size => continue, + attr::OptimizeAttr::Speed => { + return for_speed; + } + } + } + return tcx.sess.opts.optimize; + }; + providers.dllimport_foreign_items = |tcx, krate| { let module_map = tcx.foreign_modules(krate); let module_map = module_map.iter() diff --git a/src/librustc_codegen_ssa/callee.rs b/src/librustc_codegen_ssa/callee.rs index 5ff1d9b59923a..4744dd6302fb3 100644 --- a/src/librustc_codegen_ssa/callee.rs +++ b/src/librustc_codegen_ssa/callee.rs @@ -1,22 +1,12 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use traits::*; +use crate::traits::*; use rustc::ty; -use rustc::ty::subst::Substs; +use rustc::ty::subst::SubstsRef; use rustc::hir::def_id::DefId; pub fn resolve_and_get_fn<'tcx, Cx: CodegenMethods<'tcx>>( cx: &Cx, def_id: DefId, - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, ) -> Cx::Value { cx.get_fn( ty::Instance::resolve( @@ -33,7 +23,7 @@ pub fn resolve_and_get_fn_for_vtable<'tcx, >( cx: &Cx, def_id: DefId, - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, ) -> Cx::Value { cx.get_fn( ty::Instance::resolve_for_vtable( diff --git a/src/librustc_codegen_ssa/common.rs b/src/librustc_codegen_ssa/common.rs index 70b7729b78b40..0e1885fe29ba6 100644 --- a/src/librustc_codegen_ssa/common.rs +++ b/src/librustc_codegen_ssa/common.rs @@ -1,36 +1,15 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. #![allow(non_camel_case_types, non_snake_case)] -use rustc::ty::{self, Ty, TyCtxt}; -use syntax_pos::{DUMMY_SP, Span}; +use rustc::ty::{Ty, TyCtxt}; +use syntax_pos::Span; use rustc::hir::def_id::DefId; use rustc::middle::lang_items::LangItem; -use base; -use traits::*; +use crate::base; +use crate::traits::*; use rustc::hir; -use traits::BuilderMethods; - -pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.needs_drop(tcx, ty::ParamEnv::reveal_all()) -} - -pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_sized(tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) -} - -pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP) -} +use crate::traits::BuilderMethods; pub enum IntPredicate { IntEQ, @@ -132,7 +111,7 @@ pub enum TypeKind { mod temp_stable_hash_impls { use rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, HashStable}; - use ModuleCodegen; + use crate::ModuleCodegen; impl HashStable for ModuleCodegen { fn hash_stable(&self, @@ -143,7 +122,7 @@ mod temp_stable_hash_impls { } } -pub fn langcall(tcx: TyCtxt, +pub fn langcall(tcx: TyCtxt<'_, '_, '_>, span: Option, msg: &str, li: LangItem) diff --git a/src/librustc_codegen_ssa/debuginfo.rs b/src/librustc_codegen_ssa/debuginfo.rs index bcf6d7b6bf8f2..aa7cdbed99446 100644 --- a/src/librustc_codegen_ssa/debuginfo.rs +++ b/src/librustc_codegen_ssa/debuginfo.rs @@ -1,16 +1,5 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use syntax_pos::{BytePos, Span}; use rustc::hir::def_id::CrateNum; -use std::cell::Cell; pub enum FunctionDebugContext { RegularContext(FunctionDebugContextData), @@ -46,10 +35,10 @@ impl FunctionDebugContext { /// they are disabled when beginning to codegen a new function. This functions /// switches source location emitting on and must therefore be called before the /// first real statement/expression of the function is codegened. -pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext) { +pub fn start_emitting_source_locations(dbg_context: &mut FunctionDebugContext) { match *dbg_context { - FunctionDebugContext::RegularContext(ref data) => { - data.source_locations_enabled.set(true) + FunctionDebugContext::RegularContext(ref mut data) => { + data.source_locations_enabled = true; }, _ => { /* safe to ignore */ } } @@ -57,7 +46,7 @@ pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext) pub struct FunctionDebugContextData { pub fn_metadata: D, - pub source_locations_enabled: Cell, + pub source_locations_enabled: bool, pub defining_crate: CrateNum, } diff --git a/src/librustc_codegen_ssa/diagnostics.rs b/src/librustc_codegen_ssa/diagnostics.rs index abe19068889c1..e7ef178cfabfb 100644 --- a/src/librustc_codegen_ssa/diagnostics.rs +++ b/src/librustc_codegen_ssa/diagnostics.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_snake_case)] register_long_diagnostics! { diff --git a/src/librustc_codegen_ssa/glue.rs b/src/librustc_codegen_ssa/glue.rs index b3257dbc36b90..e2b49de05bd11 100644 --- a/src/librustc_codegen_ssa/glue.rs +++ b/src/librustc_codegen_ssa/glue.rs @@ -1,23 +1,11 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! // // Code relating to drop glue. -use std; - -use common::IntPredicate; -use meth; use rustc::ty::{self, Ty}; -use traits::*; +use crate::common::IntPredicate; +use crate::meth; +use crate::traits::*; pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( bx: &mut Bx, diff --git a/src/librustc_codegen_ssa/lib.rs b/src/librustc_codegen_ssa/lib.rs index d0cdb8924dfff..e2917578c0ece 100644 --- a/src/librustc_codegen_ssa/lib.rs +++ b/src/librustc_codegen_ssa/lib.rs @@ -1,54 +1,32 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![feature(box_patterns)] #![feature(box_syntax)] +#![feature(core_intrinsics)] #![feature(custom_attribute)] #![feature(libc)] #![feature(rustc_diagnostic_macros)] +#![feature(stmt_expr_attributes)] +#![feature(try_blocks)] #![feature(in_band_lifetimes)] -#![feature(slice_sort_by_cached_key)] #![feature(nll)] +#![feature(trusted_len)] #![allow(unused_attributes)] #![allow(dead_code)] -#![feature(quote)] +#![deny(rust_2018_idioms)] +#![cfg_attr(not(stage0), deny(internal))] +#![allow(explicit_outlives_requirements)] + +#![recursion_limit="256"] //! This crate contains codegen code that is used by all codegen backends (LLVM and others). //! The backend-agnostic functions of this crate use functions defined in various traits that //! have to be implemented by each backends. -#[macro_use] extern crate bitflags; #[macro_use] extern crate log; -extern crate rustc_apfloat; -#[macro_use] extern crate rustc; -extern crate rustc_target; -extern crate rustc_mir; +#[macro_use] extern crate rustc; +#[macro_use] extern crate rustc_data_structures; #[macro_use] extern crate syntax; -extern crate syntax_pos; -extern crate rustc_incremental; -extern crate rustc_codegen_utils; -extern crate rustc_data_structures; -extern crate rustc_allocator; -extern crate rustc_fs_util; -extern crate serialize; -extern crate rustc_errors; -extern crate rustc_demangle; -extern crate cc; -extern crate libc; -extern crate jobserver; -extern crate memmap; -extern crate num_cpus; use std::path::PathBuf; use rustc::dep_graph::WorkProduct; @@ -79,7 +57,7 @@ pub mod back; pub struct ModuleCodegen { /// The name of the module. When the crate may be saved between /// compilations, incremental compilation requires that name be - /// unique amongst **all** crates. Therefore, it should contain + /// unique amongst **all** crates. Therefore, it should contain /// something unique to this crate (e.g., a module path) as well /// as the crate name and disambiguator. /// We currently generate these names via CodegenUnit::build_cgu_name(). @@ -144,7 +122,7 @@ pub enum ModuleKind { Allocator, } -bitflags! { +bitflags::bitflags! { pub struct MemFlags: u8 { const VOLATILE = 1 << 0; const NONTEMPORAL = 1 << 1; @@ -152,7 +130,7 @@ bitflags! { } } -/// Misc info we load from metadata to persist beyond the tcx +/// Misc info we load from metadata to persist beyond the tcx. pub struct CrateInfo { pub panic_runtime: Option, pub compiler_builtins: Option, @@ -166,7 +144,6 @@ pub struct CrateInfo { pub used_crate_source: FxHashMap>, pub used_crates_static: Vec<(CrateNum, LibSource)>, pub used_crates_dynamic: Vec<(CrateNum, LibSource)>, - pub wasm_imports: FxHashMap, pub lang_item_to_crate: FxHashMap, pub missing_lang_items: FxHashMap>, } diff --git a/src/librustc_codegen_ssa/meth.rs b/src/librustc_codegen_ssa/meth.rs index 3880935f0f426..49f3c87ee2d9d 100644 --- a/src/librustc_codegen_ssa/meth.rs +++ b/src/librustc_codegen_ssa/meth.rs @@ -1,18 +1,8 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc_target::abi::call::FnType; -use callee; use rustc_mir::monomorphize; -use traits::*; +use crate::callee; +use crate::traits::*; use rustc::ty::{self, Ty}; @@ -79,7 +69,7 @@ impl<'a, 'tcx: 'a> VirtualIndex { pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>( cx: &Cx, ty: Ty<'tcx>, - trait_ref: ty::PolyExistentialTraitRef<'tcx>, + trait_ref: Option>, ) -> Cx::Value { let tcx = cx.tcx(); @@ -93,8 +83,15 @@ pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>( // Not in the cache. Build it. let nullptr = cx.const_null(cx.type_i8p()); - let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty)); - let methods = methods.iter().cloned().map(|opt_mth| { + let methods_root; + let methods = if let Some(trait_ref) = trait_ref { + methods_root = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty)); + methods_root.iter() + } else { + (&[]).iter() + }; + + let methods = methods.cloned().map(|opt_mth| { opt_mth.map_or(nullptr, |(def_id, substs)| { callee::resolve_and_get_fn_for_vtable(cx, def_id, substs) }) diff --git a/src/librustc_codegen_ssa/mir/analyze.rs b/src/librustc_codegen_ssa/mir/analyze.rs index 81da7f5fb5cac..8253a1672454d 100644 --- a/src/librustc_codegen_ssa/mir/analyze.rs +++ b/src/librustc_codegen_ssa/mir/analyze.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! An analysis to determine which locals require allocas and //! which do not. @@ -20,7 +10,7 @@ use rustc::mir::traversal; use rustc::ty; use rustc::ty::layout::{LayoutOf, HasTyCtxt}; use super::FunctionCx; -use traits::*; +use crate::traits::*; pub fn non_ssa_locals<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( fx: &FunctionCx<'a, 'tcx, Bx> @@ -113,7 +103,7 @@ impl<'mir, 'a: 'mir, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx> location: Location) { debug!("visit_assign(block={:?}, place={:?}, rvalue={:?})", block, place, rvalue); - if let mir::Place::Local(index) = *place { + if let mir::Place::Base(mir::PlaceBase::Local(index)) = *place { self.assign(index, location); if !self.fx.rvalue_creates_operand(rvalue) { self.not_ssa(index); @@ -182,14 +172,14 @@ impl<'mir, 'a: 'mir, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx> // ZSTs don't require any actual memory access. let elem_ty = base_ty .projection_ty(cx.tcx(), &proj.elem) - .to_ty(cx.tcx()); + .ty; let elem_ty = self.fx.monomorphize(&elem_ty); if cx.layout_of(elem_ty).is_zst() { return; } if let mir::ProjectionElem::Field(..) = proj.elem { - let layout = cx.layout_of(base_ty.to_ty(cx.tcx())); + let layout = cx.layout_of(base_ty.ty); if cx.is_backend_immediate(layout) || cx.is_backend_scalar_pair(layout) { // Recurse with the same context, instead of `Projection`, // potentially stopping at non-operand projections, @@ -255,8 +245,9 @@ impl<'mir, 'a: 'mir, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx> } PlaceContext::MutatingUse(MutatingUseContext::Drop) => { - let ty = mir::Place::Local(local).ty(self.fx.mir, self.fx.cx.tcx()); - let ty = self.fx.monomorphize(&ty.to_ty(self.fx.cx.tcx())); + let ty = mir::Place::Base(mir::PlaceBase::Local(local)).ty(self.fx.mir, + self.fx.cx.tcx()); + let ty = self.fx.monomorphize(&ty.ty); // Only need the place if we're actually dropping it. if self.fx.cx.type_needs_drop(ty) { diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs index a3bfbc2211ce3..e64c847db651b 100644 --- a/src/librustc_codegen_ssa/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -1,27 +1,19 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::middle::lang_items; use rustc::ty::{self, Ty, TypeFoldable}; use rustc::ty::layout::{self, LayoutOf, HasTyCtxt}; -use rustc::mir; -use rustc::mir::interpret::EvalErrorKind; -use rustc_target::abi::call::{ArgType, FnType, PassMode}; +use rustc::mir::{self, Place, PlaceBase, Static, StaticKind}; +use rustc::mir::interpret::InterpError; +use rustc_target::abi::call::{ArgType, FnType, PassMode, IgnoreMode}; use rustc_target::spec::abi::Abi; -use base; -use MemFlags; -use common::{self, IntPredicate}; -use meth; use rustc_mir::monomorphize; +use crate::base; +use crate::MemFlags; +use crate::common::{self, IntPredicate}; +use crate::meth; + +use crate::traits::*; -use traits::*; +use std::borrow::Cow; use syntax::symbol::Symbol; use syntax_pos::Pos; @@ -31,718 +23,858 @@ use super::place::PlaceRef; use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; -impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { - pub fn codegen_block( - &mut self, - bb: mir::BasicBlock, - ) { - let mut bx = self.build_block(bb); - let data = &self.mir[bb]; +/// Used by `FunctionCx::codegen_terminator` for emitting common patterns +/// e.g., creating a basic block, calling a function, etc. +struct TerminatorCodegenHelper<'a, 'tcx> { + bb: &'a mir::BasicBlock, + terminator: &'a mir::Terminator<'tcx>, + funclet_bb: Option, +} - debug!("codegen_block({:?}={:?})", bb, data); +impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> { + /// Returns the associated funclet from `FunctionCx::funclets` for the + /// `funclet_bb` member if it is not `None`. + fn funclet<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>( + &self, + fx: &'c mut FunctionCx<'b, 'tcx, Bx>, + ) -> Option<&'c Bx::Funclet> { + match self.funclet_bb { + Some(funcl) => fx.funclets[funcl].as_ref(), + None => None, + } + } - for statement in &data.statements { - bx = self.codegen_statement(bx, statement); + fn lltarget<'b, 'c, Bx: BuilderMethods<'b, 'tcx>>( + &self, + fx: &'c mut FunctionCx<'b, 'tcx, Bx>, + target: mir::BasicBlock, + ) -> (Bx::BasicBlock, bool) { + let span = self.terminator.source_info.span; + let lltarget = fx.blocks[target]; + let target_funclet = fx.cleanup_kinds[target].funclet_bb(target); + match (self.funclet_bb, target_funclet) { + (None, None) => (lltarget, false), + (Some(f), Some(t_f)) if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) => + (lltarget, false), + // jump *into* cleanup - need a landing pad if GNU + (None, Some(_)) => (fx.landing_pad_to(target), false), + (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator), + (Some(_), Some(_)) => (fx.landing_pad_to(target), true), } + } - self.codegen_terminator(bx, bb, data.terminator()); + /// Create a basic block. + fn llblock<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>( + &self, + fx: &'c mut FunctionCx<'b, 'tcx, Bx>, + target: mir::BasicBlock, + ) -> Bx::BasicBlock { + let (lltarget, is_cleanupret) = self.lltarget(fx, target); + if is_cleanupret { + // MSVC cross-funclet jump - need a trampoline + + debug!("llblock: creating cleanup trampoline for {:?}", target); + let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target); + let mut trampoline = fx.new_block(name); + trampoline.cleanup_ret(self.funclet(fx).unwrap(), + Some(lltarget)); + trampoline.llbb() + } else { + lltarget + } } - fn codegen_terminator( - &mut self, - mut bx: Bx, - bb: mir::BasicBlock, - terminator: &mir::Terminator<'tcx> + fn funclet_br<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>( + &self, + fx: &'c mut FunctionCx<'b, 'tcx, Bx>, + bx: &mut Bx, + target: mir::BasicBlock, ) { - debug!("codegen_terminator: {:?}", terminator); - - // Create the cleanup bundle, if needed. - let tcx = self.cx.tcx(); - let span = terminator.source_info.span; - let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb); + let (lltarget, is_cleanupret) = self.lltarget(fx, target); + if is_cleanupret { + // micro-optimization: generate a `ret` rather than a jump + // to a trampoline. + bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget)); + } else { + bx.br(lltarget); + } + } - // HACK(eddyb) force the right lifetimes, NLL can't figure them out. - fn funclet_closure_factory<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( - funclet_bb: Option - ) -> impl for<'b> Fn( - &'b FunctionCx<'a, 'tcx, Bx>, - ) -> Option<&'b Bx::Funclet> { - move |this| { - match funclet_bb { - Some(funclet_bb) => this.funclets[funclet_bb].as_ref(), - None => None, - } + /// Call `fn_ptr` of `fn_ty` with the arguments `llargs`, the optional + /// return destination `destination` and the cleanup function `cleanup`. + fn do_call<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>( + &self, + fx: &'c mut FunctionCx<'b, 'tcx, Bx>, + bx: &mut Bx, + fn_ty: FnType<'tcx, Ty<'tcx>>, + fn_ptr: Bx::Value, + llargs: &[Bx::Value], + destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>, + cleanup: Option, + ) { + if let Some(cleanup) = cleanup { + let ret_bx = if let Some((_, target)) = destination { + fx.blocks[target] + } else { + fx.unreachable_block() + }; + let invokeret = bx.invoke(fn_ptr, + &llargs, + ret_bx, + self.llblock(fx, cleanup), + self.funclet(fx)); + bx.apply_attrs_callsite(&fn_ty, invokeret); + + if let Some((ret_dest, target)) = destination { + let mut ret_bx = fx.build_block(target); + fx.set_debug_loc(&mut ret_bx, self.terminator.source_info); + fx.store_return(&mut ret_bx, ret_dest, &fn_ty.ret, invokeret); } - } - let funclet = funclet_closure_factory(funclet_bb); - - let lltarget = |this: &mut Self, target: mir::BasicBlock| { - let lltarget = this.blocks[target]; - let target_funclet = this.cleanup_kinds[target].funclet_bb(target); - match (funclet_bb, target_funclet) { - (None, None) => (lltarget, false), - (Some(f), Some(t_f)) - if f == t_f || !base::wants_msvc_seh(tcx.sess) - => (lltarget, false), - (None, Some(_)) => { - // jump *into* cleanup - need a landing pad if GNU - (this.landing_pad_to(target), false) - } - (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", terminator), - (Some(_), Some(_)) => { - (this.landing_pad_to(target), true) - } + } else { + let llret = bx.call(fn_ptr, &llargs, self.funclet(fx)); + bx.apply_attrs_callsite(&fn_ty, llret); + if fx.mir[*self.bb].is_cleanup { + // Cleanup is always the cold path. Don't inline + // drop glue. Also, when there is a deeply-nested + // struct, there are "symmetry" issues that cause + // exponential inlining - see issue #41696. + bx.do_not_inline(llret); } - }; - let llblock = |this: &mut Self, target: mir::BasicBlock| { - let (lltarget, is_cleanupret) = lltarget(this, target); - if is_cleanupret { - // MSVC cross-funclet jump - need a trampoline - - debug!("llblock: creating cleanup trampoline for {:?}", target); - let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); - let mut trampoline = this.new_block(name); - trampoline.cleanup_ret(funclet(this).unwrap(), Some(lltarget)); - trampoline.llbb() + if let Some((ret_dest, target)) = destination { + fx.store_return(bx, ret_dest, &fn_ty.ret, llret); + self.funclet_br(fx, bx, target); } else { - lltarget + bx.unreachable(); } - }; - - let funclet_br = - |this: &mut Self, bx: &mut Bx, target: mir::BasicBlock| { - let (lltarget, is_cleanupret) = lltarget(this, target); - if is_cleanupret { - // micro-optimization: generate a `ret` rather than a jump - // to a trampoline. - bx.cleanup_ret(funclet(this).unwrap(), Some(lltarget)); - } else { - bx.br(lltarget); - } - }; + } + } +} - let do_call = | - this: &mut Self, - bx: &mut Bx, - fn_ty: FnType<'tcx, Ty<'tcx>>, - fn_ptr: Bx::Value, - llargs: &[Bx::Value], - destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>, - cleanup: Option - | { - if let Some(cleanup) = cleanup { - let ret_bx = if let Some((_, target)) = destination { - this.blocks[target] - } else { - this.unreachable_block() - }; - let invokeret = bx.invoke(fn_ptr, - &llargs, - ret_bx, - llblock(this, cleanup), - funclet(this)); - bx.apply_attrs_callsite(&fn_ty, invokeret); - - if let Some((ret_dest, target)) = destination { - let mut ret_bx = this.build_block(target); - this.set_debug_loc(&mut ret_bx, terminator.source_info); - this.store_return(&mut ret_bx, ret_dest, &fn_ty.ret, invokeret); - } +/// Codegen implementations for some terminator variants. +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + /// Generates code for a `Resume` terminator. + fn codegen_resume_terminator<'b>( + &mut self, + helper: TerminatorCodegenHelper<'b, 'tcx>, + mut bx: Bx, + ) { + if let Some(funclet) = helper.funclet(self) { + bx.cleanup_ret(funclet, None); + } else { + let slot = self.get_personality_slot(&mut bx); + let lp0 = slot.project_field(&mut bx, 0); + let lp0 = bx.load_operand(lp0).immediate(); + let lp1 = slot.project_field(&mut bx, 1); + let lp1 = bx.load_operand(lp1).immediate(); + slot.storage_dead(&mut bx); + + if !bx.sess().target.target.options.custom_unwind_resume { + let mut lp = bx.const_undef(self.landing_pad_type()); + lp = bx.insert_value(lp, lp0, 0); + lp = bx.insert_value(lp, lp1, 1); + bx.resume(lp); } else { - let llret = bx.call(fn_ptr, &llargs, funclet(this)); - bx.apply_attrs_callsite(&fn_ty, llret); - if this.mir[bb].is_cleanup { - // Cleanup is always the cold path. Don't inline - // drop glue. Also, when there is a deeply-nested - // struct, there are "symmetry" issues that cause - // exponential inlining - see issue #41696. - bx.do_not_inline(llret); - } + bx.call(bx.eh_unwind_resume(), &[lp0], + helper.funclet(self)); + bx.unreachable(); + } + } + } - if let Some((ret_dest, target)) = destination { - this.store_return(bx, ret_dest, &fn_ty.ret, llret); - funclet_br(this, bx, target); + fn codegen_switchint_terminator<'b>( + &mut self, + helper: TerminatorCodegenHelper<'b, 'tcx>, + mut bx: Bx, + discr: &mir::Operand<'tcx>, + switch_ty: Ty<'tcx>, + values: &Cow<'tcx, [u128]>, + targets: &Vec, + ) { + let discr = self.codegen_operand(&mut bx, &discr); + if targets.len() == 2 { + // If there are two targets, emit br instead of switch + let lltrue = helper.llblock(self, targets[0]); + let llfalse = helper.llblock(self, targets[1]); + if switch_ty == bx.tcx().types.bool { + // Don't generate trivial icmps when switching on bool + if let [0] = values[..] { + bx.cond_br(discr.immediate(), llfalse, lltrue); } else { - bx.unreachable(); + assert_eq!(&values[..], &[1]); + bx.cond_br(discr.immediate(), lltrue, llfalse); } + } else { + let switch_llty = bx.immediate_backend_type( + bx.layout_of(switch_ty) + ); + let llval = bx.const_uint_big(switch_llty, values[0]); + let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); + bx.cond_br(cmp, lltrue, llfalse); } - }; + } else { + let (otherwise, targets) = targets.split_last().unwrap(); + bx.switch( + discr.immediate(), + helper.llblock(self, *otherwise), + values.iter().zip(targets).map(|(&value, target)| { + (value, helper.llblock(self, *target)) + }) + ); + } + } - self.set_debug_loc(&mut bx, terminator.source_info); - match terminator.kind { - mir::TerminatorKind::Resume => { - if let Some(funclet) = funclet(self) { - bx.cleanup_ret(funclet, None); - } else { - let slot = self.get_personality_slot(&mut bx); - let lp0 = slot.project_field(&mut bx, 0); - let lp0 = bx.load_operand(lp0).immediate(); - let lp1 = slot.project_field(&mut bx, 1); - let lp1 = bx.load_operand(lp1).immediate(); - slot.storage_dead(&mut bx); - - if !bx.sess().target.target.options.custom_unwind_resume { - let mut lp = bx.const_undef(self.landing_pad_type()); - lp = bx.insert_value(lp, lp0, 0); - lp = bx.insert_value(lp, lp1, 1); - bx.resume(lp); - } else { - bx.call(bx.eh_unwind_resume(), &[lp0], funclet(self)); - bx.unreachable(); - } + fn codegen_return_terminator<'b>( + &mut self, + mut bx: Bx, + ) { + if self.fn_ty.c_variadic { + match self.va_list_ref { + Some(va_list) => { + bx.va_end(va_list.llval); + } + None => { + bug!("C-variadic function must have a `va_list_ref`"); } } - - mir::TerminatorKind::Abort => { - bx.abort(); - bx.unreachable(); + } + if self.fn_ty.ret.layout.abi.is_uninhabited() { + // Functions with uninhabited return values are marked `noreturn`, + // so we should make sure that we never actually do. + bx.abort(); + bx.unreachable(); + return; + } + let llval = match self.fn_ty.ret.mode { + PassMode::Ignore(IgnoreMode::Zst) | PassMode::Indirect(..) => { + bx.ret_void(); + return; } - mir::TerminatorKind::Goto { target } => { - funclet_br(self, &mut bx, target); + PassMode::Ignore(IgnoreMode::CVarArgs) => { + bug!("C-variadic arguments should never be the return type"); } - mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { - let discr = self.codegen_operand(&mut bx, discr); - if targets.len() == 2 { - // If there are two targets, emit br instead of switch - let lltrue = llblock(self, targets[0]); - let llfalse = llblock(self, targets[1]); - if switch_ty == bx.tcx().types.bool { - // Don't generate trivial icmps when switching on bool - if let [0] = values[..] { - bx.cond_br(discr.immediate(), llfalse, lltrue); - } else { - assert_eq!(&values[..], &[1]); - bx.cond_br(discr.immediate(), lltrue, llfalse); - } - } else { - let switch_llty = bx.immediate_backend_type( - bx.layout_of(switch_ty) - ); - let llval = bx.const_uint_big(switch_llty, values[0]); - let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); - bx.cond_br(cmp, lltrue, llfalse); - } + PassMode::Direct(_) | PassMode::Pair(..) => { + let op = + self.codegen_consume(&mut bx, &mir::Place::RETURN_PLACE); + if let Ref(llval, _, align) = op.val { + bx.load(llval, align) } else { - let (otherwise, targets) = targets.split_last().unwrap(); - let switch = bx.switch(discr.immediate(), - llblock(self, *otherwise), - values.len()); - let switch_llty = bx.immediate_backend_type( - bx.layout_of(switch_ty) - ); - for (&value, target) in values.iter().zip(targets) { - let llval = bx.const_uint_big(switch_llty, value); - let llbb = llblock(self, *target); - bx.add_case(switch, llval, llbb) - } + op.immediate_or_packed_pair(&mut bx) } } - mir::TerminatorKind::Return => { - let llval = match self.fn_ty.ret.mode { - PassMode::Ignore | PassMode::Indirect(..) => { - bx.ret_void(); - return; - } - - PassMode::Direct(_) | PassMode::Pair(..) => { - let op = - self.codegen_consume(&mut bx, &mir::Place::Local(mir::RETURN_PLACE)); - if let Ref(llval, _, align) = op.val { - bx.load(llval, align) - } else { - op.immediate_or_packed_pair(&mut bx) + PassMode::Cast(cast_ty) => { + let op = match self.locals[mir::RETURN_PLACE] { + LocalRef::Operand(Some(op)) => op, + LocalRef::Operand(None) => bug!("use of return before def"), + LocalRef::Place(cg_place) => { + OperandRef { + val: Ref(cg_place.llval, None, cg_place.align), + layout: cg_place.layout } } - - PassMode::Cast(cast_ty) => { - let op = match self.locals[mir::RETURN_PLACE] { - LocalRef::Operand(Some(op)) => op, - LocalRef::Operand(None) => bug!("use of return before def"), - LocalRef::Place(cg_place) => { - OperandRef { - val: Ref(cg_place.llval, None, cg_place.align), - layout: cg_place.layout - } - } - LocalRef::UnsizedPlace(_) => bug!("return type must be sized"), - }; - let llslot = match op.val { - Immediate(_) | Pair(..) => { - let scratch = - PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret"); - op.val.store(&mut bx, scratch); - scratch.llval - } - Ref(llval, _, align) => { - assert_eq!(align, op.layout.align.abi, - "return place is unaligned!"); - llval - } - }; - let addr = bx.pointercast(llslot, bx.type_ptr_to( - bx.cast_backend_type(&cast_ty) - )); - bx.load(addr, self.fn_ty.ret.layout.align.abi) + LocalRef::UnsizedPlace(_) => bug!("return type must be sized"), + }; + let llslot = match op.val { + Immediate(_) | Pair(..) => { + let scratch = + PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret"); + op.val.store(&mut bx, scratch); + scratch.llval + } + Ref(llval, _, align) => { + assert_eq!(align, op.layout.align.abi, + "return place is unaligned!"); + llval } }; - bx.ret(llval); + let addr = bx.pointercast(llslot, bx.type_ptr_to( + bx.cast_backend_type(&cast_ty) + )); + bx.load(addr, self.fn_ty.ret.layout.align.abi) } + }; + bx.ret(llval); + } - mir::TerminatorKind::Unreachable => { - bx.unreachable(); + + fn codegen_drop_terminator<'b>( + &mut self, + helper: TerminatorCodegenHelper<'b, 'tcx>, + mut bx: Bx, + location: &mir::Place<'tcx>, + target: mir::BasicBlock, + unwind: Option, + ) { + let ty = location.ty(self.mir, bx.tcx()).ty; + let ty = self.monomorphize(&ty); + let drop_fn = monomorphize::resolve_drop_in_place(bx.tcx(), ty); + + if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { + // we don't actually need to drop anything. + helper.funclet_br(self, &mut bx, target); + return + } + + let place = self.codegen_place(&mut bx, location); + let (args1, args2); + let mut args = if let Some(llextra) = place.llextra { + args2 = [place.llval, llextra]; + &args2[..] + } else { + args1 = [place.llval]; + &args1[..] + }; + let (drop_fn, fn_ty) = match ty.sty { + ty::Dynamic(..) => { + let sig = drop_fn.fn_sig(self.cx.tcx()); + let sig = self.cx.tcx().normalize_erasing_late_bound_regions( + ty::ParamEnv::reveal_all(), + &sig, + ); + let fn_ty = bx.new_vtable(sig, &[]); + let vtable = args[1]; + args = &args[..1]; + (meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_ty), fn_ty) } + _ => { + (bx.get_fn(drop_fn), + bx.fn_type_of_instance(&drop_fn)) + } + }; + helper.do_call(self, &mut bx, fn_ty, drop_fn, args, + Some((ReturnDest::Nothing, target)), + unwind); + } - mir::TerminatorKind::Drop { ref location, target, unwind } => { - let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx()); - let ty = self.monomorphize(&ty); - let drop_fn = monomorphize::resolve_drop_in_place(bx.tcx(), ty); - - if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { - // we don't actually need to drop anything. - funclet_br(self, &mut bx, target); - return - } + fn codegen_assert_terminator<'b>( + &mut self, + helper: TerminatorCodegenHelper<'b, 'tcx>, + mut bx: Bx, + terminator: &mir::Terminator<'tcx>, + cond: &mir::Operand<'tcx>, + expected: bool, + msg: &mir::AssertMessage<'tcx>, + target: mir::BasicBlock, + cleanup: Option, + ) { + let span = terminator.source_info.span; + let cond = self.codegen_operand(&mut bx, cond).immediate(); + let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1); + + // This case can currently arise only from functions marked + // with #[rustc_inherit_overflow_checks] and inlined from + // another crate (mostly core::num generic/#[inline] fns), + // while the current crate doesn't use overflow checks. + // NOTE: Unlike binops, negation doesn't have its own + // checked operation, just a comparison with the minimum + // value, so we have to check for the assert message. + if !bx.check_overflow() { + if let mir::interpret::InterpError::OverflowNeg = *msg { + const_cond = Some(expected); + } + } - let place = self.codegen_place(&mut bx, location); - let (args1, args2); - let mut args = if let Some(llextra) = place.llextra { - args2 = [place.llval, llextra]; - &args2[..] - } else { - args1 = [place.llval]; - &args1[..] - }; - let (drop_fn, fn_ty) = match ty.sty { - ty::Dynamic(..) => { - let sig = drop_fn.fn_sig(tcx); - let sig = tcx.normalize_erasing_late_bound_regions( - ty::ParamEnv::reveal_all(), - &sig, - ); - let fn_ty = bx.new_vtable(sig, &[]); - let vtable = args[1]; - args = &args[..1]; - (meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_ty), fn_ty) - } - _ => { - (bx.get_fn(drop_fn), - bx.fn_type_of_instance(&drop_fn)) - } - }; - do_call(self, &mut bx, fn_ty, drop_fn, args, - Some((ReturnDest::Nothing, target)), - unwind); + // Don't codegen the panic block if success if known. + if const_cond == Some(expected) { + helper.funclet_br(self, &mut bx, target); + return; + } + + // Pass the condition through llvm.expect for branch hinting. + let cond = bx.expect(cond, expected); + + // Create the failure block and the conditional branch to it. + let lltarget = helper.llblock(self, target); + let panic_block = self.new_block("panic"); + if expected { + bx.cond_br(cond, lltarget, panic_block.llbb()); + } else { + bx.cond_br(cond, panic_block.llbb(), lltarget); + } + + // After this point, bx is the block for the call to panic. + bx = panic_block; + self.set_debug_loc(&mut bx, terminator.source_info); + + // Get the location information. + let loc = bx.sess().source_map().lookup_char_pos(span.lo()); + let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); + let line = bx.const_u32(loc.line as u32); + let col = bx.const_u32(loc.col.to_usize() as u32 + 1); + + // Put together the arguments to the panic entry point. + let (lang_item, args) = match *msg { + InterpError::BoundsCheck { ref len, ref index } => { + let len = self.codegen_operand(&mut bx, len).immediate(); + let index = self.codegen_operand(&mut bx, index).immediate(); + + let file_line_col = bx.static_panic_msg( + None, + filename, + line, + col, + "panic_bounds_check_loc", + ); + (lang_items::PanicBoundsCheckFnLangItem, + vec![file_line_col, index, len]) } + _ => { + let str = msg.description(); + let msg_str = Symbol::intern(str).as_str(); + let msg_file_line_col = bx.static_panic_msg( + Some(msg_str), + filename, + line, + col, + "panic_loc", + ); + (lang_items::PanicFnLangItem, + vec![msg_file_line_col]) + } + }; - mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { - let cond = self.codegen_operand(&mut bx, cond).immediate(); - let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1); - - // This case can currently arise only from functions marked - // with #[rustc_inherit_overflow_checks] and inlined from - // another crate (mostly core::num generic/#[inline] fns), - // while the current crate doesn't use overflow checks. - // NOTE: Unlike binops, negation doesn't have its own - // checked operation, just a comparison with the minimum - // value, so we have to check for the assert message. - if !bx.check_overflow() { - if let mir::interpret::EvalErrorKind::OverflowNeg = *msg { - const_cond = Some(expected); - } - } + // Obtain the panic entry point. + let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item); + let instance = ty::Instance::mono(bx.tcx(), def_id); + let fn_ty = bx.fn_type_of_instance(&instance); + let llfn = bx.get_fn(instance); - // Don't codegen the panic block if success if known. - if const_cond == Some(expected) { - funclet_br(self, &mut bx, target); - return; - } + // Codegen the actual panic invoke/call. + helper.do_call(self, &mut bx, fn_ty, llfn, &args, None, cleanup); + } - // Pass the condition through llvm.expect for branch hinting. - let cond = bx.expect(cond, expected); + fn codegen_call_terminator<'b>( + &mut self, + helper: TerminatorCodegenHelper<'b, 'tcx>, + mut bx: Bx, + terminator: &mir::Terminator<'tcx>, + func: &mir::Operand<'tcx>, + args: &Vec>, + destination: &Option<(mir::Place<'tcx>, mir::BasicBlock)>, + cleanup: Option, + ) { + let span = terminator.source_info.span; + // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. + let callee = self.codegen_operand(&mut bx, func); + + let (instance, mut llfn) = match callee.layout.ty.sty { + ty::FnDef(def_id, substs) => { + (Some(ty::Instance::resolve(bx.tcx(), + ty::ParamEnv::reveal_all(), + def_id, + substs).unwrap()), + None) + } + ty::FnPtr(_) => { + (None, Some(callee.immediate())) + } + _ => bug!("{} is not callable", callee.layout.ty), + }; + let def = instance.map(|i| i.def); + let sig = callee.layout.ty.fn_sig(bx.tcx()); + let sig = bx.tcx().normalize_erasing_late_bound_regions( + ty::ParamEnv::reveal_all(), + &sig, + ); + let abi = sig.abi; + + // Handle intrinsics old codegen wants Expr's for, ourselves. + let intrinsic = match def { + Some(ty::InstanceDef::Intrinsic(def_id)) => + Some(bx.tcx().item_name(def_id).as_str()), + _ => None + }; + let intrinsic = intrinsic.as_ref().map(|s| &s[..]); - // Create the failure block and the conditional branch to it. - let lltarget = llblock(self, target); - let panic_block = self.new_block("panic"); - if expected { - bx.cond_br(cond, lltarget, panic_block.llbb()); - } else { - bx.cond_br(cond, panic_block.llbb(), lltarget); - } + if intrinsic == Some("transmute") { + if let Some(destination_ref) = destination.as_ref() { + let &(ref dest, target) = destination_ref; + self.codegen_transmute(&mut bx, &args[0], dest); + helper.funclet_br(self, &mut bx, target); + } else { + // If we are trying to transmute to an uninhabited type, + // it is likely there is no allotted destination. In fact, + // transmuting to an uninhabited type is UB, which means + // we can do what we like. Here, we declare that transmuting + // into an uninhabited type is impossible, so anything following + // it must be unreachable. + assert_eq!(bx.layout_of(sig.output()).abi, layout::Abi::Uninhabited); + bx.unreachable(); + } + return; + } - // After this point, bx is the block for the call to panic. - bx = panic_block; - self.set_debug_loc(&mut bx, terminator.source_info); + // The "spoofed" `VaList` added to a C-variadic functions signature + // should not be included in the `extra_args` calculation. + let extra_args_start_idx = sig.inputs().len() - if sig.c_variadic { 1 } else { 0 }; + let extra_args = &args[extra_args_start_idx..]; + let extra_args = extra_args.iter().map(|op_arg| { + let op_ty = op_arg.ty(self.mir, bx.tcx()); + self.monomorphize(&op_ty) + }).collect::>(); + + let fn_ty = match def { + Some(ty::InstanceDef::Virtual(..)) => { + bx.new_vtable(sig, &extra_args) + } + Some(ty::InstanceDef::DropGlue(_, None)) => { + // Empty drop glue; a no-op. + let &(_, target) = destination.as_ref().unwrap(); + helper.funclet_br(self, &mut bx, target); + return; + } + _ => bx.new_fn_type(sig, &extra_args) + }; - // Get the location information. + // Emit a panic or a no-op for `panic_if_uninhabited`. + if intrinsic == Some("panic_if_uninhabited") { + let ty = instance.unwrap().substs.type_at(0); + let layout = bx.layout_of(ty); + if layout.abi.is_uninhabited() { let loc = bx.sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = bx.const_str_slice(filename); let line = bx.const_u32(loc.line as u32); let col = bx.const_u32(loc.col.to_usize() as u32 + 1); - let align = tcx.data_layout.aggregate_align.abi - .max(tcx.data_layout.i32_align.abi) - .max(tcx.data_layout.pointer_align.abi); - - // Put together the arguments to the panic entry point. - let (lang_item, args) = match *msg { - EvalErrorKind::BoundsCheck { ref len, ref index } => { - let len = self.codegen_operand(&mut bx, len).immediate(); - let index = self.codegen_operand(&mut bx, index).immediate(); - - let file_line_col = bx.const_struct(&[filename, line, col], false); - let file_line_col = bx.static_addr_of( - file_line_col, - align, - Some("panic_bounds_check_loc") - ); - (lang_items::PanicBoundsCheckFnLangItem, - vec![file_line_col, index, len]) - } - _ => { - let str = msg.description(); - let msg_str = Symbol::intern(str).as_str(); - let msg_str = bx.const_str_slice(msg_str); - let msg_file_line_col = bx.const_struct( - &[msg_str, filename, line, col], - false - ); - let msg_file_line_col = bx.static_addr_of( - msg_file_line_col, - align, - Some("panic_loc") - ); - (lang_items::PanicFnLangItem, - vec![msg_file_line_col]) - } - }; + + let str = format!( + "Attempted to instantiate uninhabited type {}", + ty + ); + let msg_str = Symbol::intern(&str).as_str(); + let msg_file_line_col = bx.static_panic_msg( + Some(msg_str), + filename, + line, + col, + "panic_loc", + ); // Obtain the panic entry point. - let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item); + let def_id = + common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem); let instance = ty::Instance::mono(bx.tcx(), def_id); let fn_ty = bx.fn_type_of_instance(&instance); let llfn = bx.get_fn(instance); // Codegen the actual panic invoke/call. - do_call(self, &mut bx, fn_ty, llfn, &args, None, cleanup); + helper.do_call( + self, + &mut bx, + fn_ty, + llfn, + &[msg_file_line_col], + destination.as_ref().map(|(_, bb)| (ReturnDest::Nothing, *bb)), + cleanup, + ); + } else { + // a NOP + helper.funclet_br(self, &mut bx, destination.as_ref().unwrap().1) } + return; + } - mir::TerminatorKind::DropAndReplace { .. } => { - bug!("undesugared DropAndReplace in codegen: {:?}", terminator); - } + // The arguments we'll be passing. Plus one to account for outptr, if used. + let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize; + let mut llargs = Vec::with_capacity(arg_count); - mir::TerminatorKind::Call { - ref func, - ref args, - ref destination, - cleanup, - from_hir_call: _ - } => { - // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. - let callee = self.codegen_operand(&mut bx, func); - - let (instance, mut llfn) = match callee.layout.ty.sty { - ty::FnDef(def_id, substs) => { - (Some(ty::Instance::resolve(bx.tcx(), - ty::ParamEnv::reveal_all(), - def_id, - substs).unwrap()), - None) - } - ty::FnPtr(_) => { - (None, Some(callee.immediate())) - } - _ => bug!("{} is not callable", callee.layout.ty) - }; - let def = instance.map(|i| i.def); - let sig = callee.layout.ty.fn_sig(bx.tcx()); - let sig = bx.tcx().normalize_erasing_late_bound_regions( - ty::ParamEnv::reveal_all(), - &sig, - ); - let abi = sig.abi; + // Prepare the return value destination + let ret_dest = if let Some((ref dest, _)) = *destination { + let is_intrinsic = intrinsic.is_some(); + self.make_return_dest(&mut bx, dest, &fn_ty.ret, &mut llargs, + is_intrinsic) + } else { + ReturnDest::Nothing + }; - // Handle intrinsics old codegen wants Expr's for, ourselves. - let intrinsic = match def { - Some(ty::InstanceDef::Intrinsic(def_id)) - => Some(bx.tcx().item_name(def_id).as_str()), - _ => None - }; - let intrinsic = intrinsic.as_ref().map(|s| &s[..]); + if intrinsic.is_some() && intrinsic != Some("drop_in_place") { + let dest = match ret_dest { + _ if fn_ty.ret.is_indirect() => llargs[0], + ReturnDest::Nothing => + bx.const_undef(bx.type_ptr_to(bx.memory_ty(&fn_ty.ret))), + ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => + dst.llval, + ReturnDest::DirectOperand(_) => + bug!("Cannot use direct operand with an intrinsic call"), + }; - if intrinsic == Some("transmute") { - if let Some(destination_ref) = destination.as_ref() { - let &(ref dest, target) = destination_ref; - self.codegen_transmute(&mut bx, &args[0], dest); - funclet_br(self, &mut bx, target); - } else { - // If we are trying to transmute to an uninhabited type, - // it is likely there is no allotted destination. In fact, - // transmuting to an uninhabited type is UB, which means - // we can do what we like. Here, we declare that transmuting - // into an uninhabited type is impossible, so anything following - // it must be unreachable. - assert_eq!(bx.layout_of(sig.output()).abi, layout::Abi::Uninhabited); - bx.unreachable(); + let args: Vec<_> = args.iter().enumerate().map(|(i, arg)| { + // The indices passed to simd_shuffle* in the + // third argument must be constant. This is + // checked by const-qualification, which also + // promotes any complex rvalues to constants. + if i == 2 && intrinsic.unwrap().starts_with("simd_shuffle") { + match *arg { + // The shuffle array argument is usually not an explicit constant, + // but specified directly in the code. This means it gets promoted + // and we can then extract the value by evaluating the promoted. + mir::Operand::Copy( + Place::Base( + PlaceBase::Static( + box Static { kind: StaticKind::Promoted(promoted), ty } + ) + ) + ) | + mir::Operand::Move( + Place::Base( + PlaceBase::Static( + box Static { kind: StaticKind::Promoted(promoted), ty } + ) + ) + ) => { + let param_env = ty::ParamEnv::reveal_all(); + let cid = mir::interpret::GlobalId { + instance: self.instance, + promoted: Some(promoted), + }; + let c = bx.tcx().const_eval(param_env.and(cid)); + let (llval, ty) = self.simd_shuffle_indices( + &bx, + terminator.source_info.span, + ty, + c, + ); + return OperandRef { + val: Immediate(llval), + layout: bx.layout_of(ty), + }; + + } + mir::Operand::Copy(_) | + mir::Operand::Move(_) => { + span_bug!(span, "shuffle indices must be constant"); + } + mir::Operand::Constant(ref constant) => { + let c = self.eval_mir_constant(constant); + let (llval, ty) = self.simd_shuffle_indices( + &bx, + constant.span, + constant.ty, + c, + ); + return OperandRef { + val: Immediate(llval), + layout: bx.layout_of(ty) + }; + } } - return; } - let extra_args = &args[sig.inputs().len()..]; - let extra_args = extra_args.iter().map(|op_arg| { - let op_ty = op_arg.ty(self.mir, bx.tcx()); - self.monomorphize(&op_ty) - }).collect::>(); + self.codegen_operand(&mut bx, arg) + }).collect(); + + + let callee_ty = instance.as_ref().unwrap().ty(bx.tcx()); + bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest, + terminator.source_info.span); + + if let ReturnDest::IndirectOperand(dst, _) = ret_dest { + self.store_return(&mut bx, ret_dest, &fn_ty.ret, dst.llval); + } + + if let Some((_, target)) = *destination { + helper.funclet_br(self, &mut bx, target); + } else { + bx.unreachable(); + } + + return; + } + + // Split the rust-call tupled arguments off. + let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() { + let (tup, args) = args.split_last().unwrap(); + (args, Some(tup)) + } else { + (&args[..], None) + }; + + // Useful determining if the current argument is the "spoofed" `VaList` + let last_arg_idx = if sig.inputs().is_empty() { + None + } else { + Some(sig.inputs().len() - 1) + }; + 'make_args: for (i, arg) in first_args.iter().enumerate() { + // If this is a C-variadic function the function signature contains + // an "spoofed" `VaList`. This argument is ignored, but we need to + // populate it with a dummy operand so that the users real arguments + // are not overwritten. + let i = if sig.c_variadic && last_arg_idx.map(|x| i >= x).unwrap_or(false) { + if i + 1 < fn_ty.args.len() { + i + 1 + } else { + break 'make_args + } + } else { + i + }; + let mut op = self.codegen_operand(&mut bx, arg); + + if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) { + if let Pair(..) = op.val { + // In the case of Rc, we need to explicitly pass a + // *mut RcBox with a Scalar (not ScalarPair) ABI. This is a hack + // that is understood elsewhere in the compiler as a method on + // `dyn Trait`. + // To get a `*mut RcBox`, we just keep unwrapping newtypes until + // we get a value of a built-in pointer type + 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr() + && !op.layout.ty.is_region_ptr() + { + 'iter_fields: for i in 0..op.layout.fields.count() { + let field = op.extract_field(&mut bx, i); + if !field.layout.is_zst() { + // we found the one non-zero-sized field that is allowed + // now find *its* non-zero-sized field, or stop if it's a + // pointer + op = field; + continue 'descend_newtypes + } + } - let fn_ty = match def { - Some(ty::InstanceDef::Virtual(..)) => { - bx.new_vtable(sig, &extra_args) + span_bug!(span, "receiver has no non-zero-sized fields {:?}", op); } - Some(ty::InstanceDef::DropGlue(_, None)) => { - // empty drop glue - a nop. - let &(_, target) = destination.as_ref().unwrap(); - funclet_br(self, &mut bx, target); - return; + + // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its + // data pointer and vtable. Look up the method in the vtable, and pass + // the data pointer as the first argument + match op.val { + Pair(data_ptr, meta) => { + llfn = Some(meth::VirtualIndex::from_index(idx) + .get_fn(&mut bx, meta, &fn_ty)); + llargs.push(data_ptr); + continue 'make_args + } + other => bug!("expected a Pair, got {:?}", other), } - _ => bx.new_fn_type(sig, &extra_args) - }; + } else if let Ref(data_ptr, Some(meta), _) = op.val { + // by-value dynamic dispatch + llfn = Some(meth::VirtualIndex::from_index(idx) + .get_fn(&mut bx, meta, &fn_ty)); + llargs.push(data_ptr); + continue; + } else { + span_bug!(span, "can't codegen a virtual call on {:?}", op); + } + } - // emit a panic instead of instantiating an uninhabited type - if (intrinsic == Some("init") || intrinsic == Some("uninit")) && - fn_ty.ret.layout.abi.is_uninhabited() - { - let loc = bx.sess().source_map().lookup_char_pos(span.lo()); - let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = bx.const_str_slice(filename); - let line = bx.const_u32(loc.line as u32); - let col = bx.const_u32(loc.col.to_usize() as u32 + 1); - let align = tcx.data_layout.aggregate_align.abi - .max(tcx.data_layout.i32_align.abi) - .max(tcx.data_layout.pointer_align.abi); - - let str = format!( - "Attempted to instantiate uninhabited type {} using mem::{}", - sig.output(), - if intrinsic == Some("init") { "zeroed" } else { "uninitialized" } - ); - let msg_str = Symbol::intern(&str).as_str(); - let msg_str = bx.const_str_slice(msg_str); - let msg_file_line_col = bx.const_struct( - &[msg_str, filename, line, col], - false, - ); - let msg_file_line_col = bx.static_addr_of( - msg_file_line_col, - align, - Some("panic_loc"), - ); - - // Obtain the panic entry point. - let def_id = - common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem); - let instance = ty::Instance::mono(bx.tcx(), def_id); - let fn_ty = bx.fn_type_of_instance(&instance); - let llfn = bx.get_fn(instance); - - // Codegen the actual panic invoke/call. - do_call( - self, - &mut bx, - fn_ty, - llfn, - &[msg_file_line_col], - destination.as_ref().map(|(_, bb)| (ReturnDest::Nothing, *bb)), - cleanup, - ); - return; + // The callee needs to own the argument memory if we pass it + // by-ref, so make a local copy of non-immediate constants. + match (arg, op.val) { + (&mir::Operand::Copy(_), Ref(_, None, _)) | + (&mir::Operand::Constant(_), Ref(_, None, _)) => { + let tmp = PlaceRef::alloca(&mut bx, op.layout, "const"); + op.val.store(&mut bx, tmp); + op.val = Ref(tmp.llval, None, tmp.align); } + _ => {} + } + + self.codegen_argument(&mut bx, op, &mut llargs, &fn_ty.args[i]); + } + if let Some(tup) = untuple { + self.codegen_arguments_untupled(&mut bx, tup, &mut llargs, + &fn_ty.args[first_args.len()..]) + } - // The arguments we'll be passing. Plus one to account for outptr, if used. - let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize; - let mut llargs = Vec::with_capacity(arg_count); + let fn_ptr = match (llfn, instance) { + (Some(llfn), _) => llfn, + (None, Some(instance)) => bx.get_fn(instance), + _ => span_bug!(span, "no llfn for call"), + }; - // Prepare the return value destination - let ret_dest = if let Some((ref dest, _)) = *destination { - let is_intrinsic = intrinsic.is_some(); - self.make_return_dest(&mut bx, dest, &fn_ty.ret, &mut llargs, - is_intrinsic) - } else { - ReturnDest::Nothing - }; + helper.do_call(self, &mut bx, fn_ty, fn_ptr, &llargs, + destination.as_ref().map(|&(_, target)| (ret_dest, target)), + cleanup); + } +} - if intrinsic.is_some() && intrinsic != Some("drop_in_place") { - let dest = match ret_dest { - _ if fn_ty.ret.is_indirect() => llargs[0], - ReturnDest::Nothing => { - bx.const_undef(bx.type_ptr_to(bx.memory_ty(&fn_ty.ret))) - } - ReturnDest::IndirectOperand(dst, _) | - ReturnDest::Store(dst) => dst.llval, - ReturnDest::DirectOperand(_) => - bug!("Cannot use direct operand with an intrinsic call") - }; +impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { + pub fn codegen_block( + &mut self, + bb: mir::BasicBlock, + ) { + let mut bx = self.build_block(bb); + let data = &self.mir[bb]; - let args: Vec<_> = args.iter().enumerate().map(|(i, arg)| { - // The indices passed to simd_shuffle* in the - // third argument must be constant. This is - // checked by const-qualification, which also - // promotes any complex rvalues to constants. - if i == 2 && intrinsic.unwrap().starts_with("simd_shuffle") { - match *arg { - // The shuffle array argument is usually not an explicit constant, - // but specified directly in the code. This means it gets promoted - // and we can then extract the value by evaluating the promoted. - mir::Operand::Copy(mir::Place::Promoted(box(index, ty))) | - mir::Operand::Move(mir::Place::Promoted(box(index, ty))) => { - let param_env = ty::ParamEnv::reveal_all(); - let cid = mir::interpret::GlobalId { - instance: self.instance, - promoted: Some(index), - }; - let c = bx.tcx().const_eval(param_env.and(cid)); - let (llval, ty) = self.simd_shuffle_indices( - &bx, - terminator.source_info.span, - ty, - c, - ); - return OperandRef { - val: Immediate(llval), - layout: bx.layout_of(ty), - }; - - }, - mir::Operand::Copy(_) | - mir::Operand::Move(_) => { - span_bug!(span, "shuffle indices must be constant"); - } - mir::Operand::Constant(ref constant) => { - let c = self.eval_mir_constant(&bx, constant); - let (llval, ty) = self.simd_shuffle_indices( - &bx, - constant.span, - constant.ty, - c, - ); - return OperandRef { - val: Immediate(llval), - layout: bx.layout_of(ty) - }; - } - } - } + debug!("codegen_block({:?}={:?})", bb, data); - self.codegen_operand(&mut bx, arg) - }).collect(); + for statement in &data.statements { + bx = self.codegen_statement(bx, statement); + } + self.codegen_terminator(bx, bb, data.terminator()); + } - let callee_ty = instance.as_ref().unwrap().ty(bx.tcx()); - bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest, - terminator.source_info.span); + fn codegen_terminator( + &mut self, + mut bx: Bx, + bb: mir::BasicBlock, + terminator: &mir::Terminator<'tcx> + ) { + debug!("codegen_terminator: {:?}", terminator); - if let ReturnDest::IndirectOperand(dst, _) = ret_dest { - self.store_return(&mut bx, ret_dest, &fn_ty.ret, dst.llval); - } + // Create the cleanup bundle, if needed. + let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb); + let helper = TerminatorCodegenHelper { + bb: &bb, terminator, funclet_bb + }; - if let Some((_, target)) = *destination { - funclet_br(self, &mut bx, target); - } else { - bx.unreachable(); - } + self.set_debug_loc(&mut bx, terminator.source_info); + match terminator.kind { + mir::TerminatorKind::Resume => { + self.codegen_resume_terminator(helper, bx) + } - return; - } + mir::TerminatorKind::Abort => { + bx.abort(); + bx.unreachable(); + } - // Split the rust-call tupled arguments off. - let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() { - let (tup, args) = args.split_last().unwrap(); - (args, Some(tup)) - } else { - (&args[..], None) - }; + mir::TerminatorKind::Goto { target } => { + helper.funclet_br(self, &mut bx, target); + } - 'make_args: for (i, arg) in first_args.iter().enumerate() { - let mut op = self.codegen_operand(&mut bx, arg); - - if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) { - if let Pair(..) = op.val { - // In the case of Rc, we need to explicitly pass a - // *mut RcBox with a Scalar (not ScalarPair) ABI. This is a hack - // that is understood elsewhere in the compiler as a method on - // `dyn Trait`. - // To get a `*mut RcBox`, we just keep unwrapping newtypes until - // we get a value of a built-in pointer type - 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr() - && !op.layout.ty.is_region_ptr() - { - 'iter_fields: for i in 0..op.layout.fields.count() { - let field = op.extract_field(&mut bx, i); - if !field.layout.is_zst() { - // we found the one non-zero-sized field that is allowed - // now find *its* non-zero-sized field, or stop if it's a - // pointer - op = field; - continue 'descend_newtypes - } - } - - span_bug!(span, "receiver has no non-zero-sized fields {:?}", op); - } + mir::TerminatorKind::SwitchInt { + ref discr, switch_ty, ref values, ref targets + } => { + self.codegen_switchint_terminator(helper, bx, discr, switch_ty, + values, targets); + } - // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its - // data pointer and vtable. Look up the method in the vtable, and pass - // the data pointer as the first argument - match op.val { - Pair(data_ptr, meta) => { - llfn = Some(meth::VirtualIndex::from_index(idx) - .get_fn(&mut bx, meta, &fn_ty)); - llargs.push(data_ptr); - continue 'make_args - } - other => bug!("expected a Pair, got {:?}", other) - } - } else if let Ref(data_ptr, Some(meta), _) = op.val { - // by-value dynamic dispatch - llfn = Some(meth::VirtualIndex::from_index(idx) - .get_fn(&mut bx, meta, &fn_ty)); - llargs.push(data_ptr); - continue; - } else { - span_bug!(span, "can't codegen a virtual call on {:?}", op); - } - } + mir::TerminatorKind::Return => { + self.codegen_return_terminator(bx); + } - // The callee needs to own the argument memory if we pass it - // by-ref, so make a local copy of non-immediate constants. - match (arg, op.val) { - (&mir::Operand::Copy(_), Ref(_, None, _)) | - (&mir::Operand::Constant(_), Ref(_, None, _)) => { - let tmp = PlaceRef::alloca(&mut bx, op.layout, "const"); - op.val.store(&mut bx, tmp); - op.val = Ref(tmp.llval, None, tmp.align); - } - _ => {} - } + mir::TerminatorKind::Unreachable => { + bx.unreachable(); + } - self.codegen_argument(&mut bx, op, &mut llargs, &fn_ty.args[i]); - } - if let Some(tup) = untuple { - self.codegen_arguments_untupled(&mut bx, tup, &mut llargs, - &fn_ty.args[first_args.len()..]) - } + mir::TerminatorKind::Drop { ref location, target, unwind } => { + self.codegen_drop_terminator(helper, bx, location, target, unwind); + } - let fn_ptr = match (llfn, instance) { - (Some(llfn), _) => llfn, - (None, Some(instance)) => bx.get_fn(instance), - _ => span_bug!(span, "no llfn for call"), - }; + mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { + self.codegen_assert_terminator(helper, bx, terminator, cond, + expected, msg, target, cleanup); + } - do_call(self, &mut bx, fn_ty, fn_ptr, &llargs, - destination.as_ref().map(|&(_, target)| (ret_dest, target)), - cleanup); + mir::TerminatorKind::DropAndReplace { .. } => { + bug!("undesugared DropAndReplace in codegen: {:?}", terminator); + } + + mir::TerminatorKind::Call { + ref func, + ref args, + ref destination, + cleanup, + from_hir_call: _ + } => { + self.codegen_call_terminator(helper, bx, terminator, func, + args, destination, cleanup); } mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Yield { .. } => bug!("generator ops in codegen"), @@ -890,7 +1022,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } - /// Return the landingpad wrapper around the given basic block + /// Returns the landing-pad wrapper around the given basic block. /// /// No-op in MSVC SEH scheme. fn landing_pad_to( @@ -970,7 +1102,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if fn_ret.is_ignore() { return ReturnDest::Nothing; } - let dest = if let mir::Place::Local(index) = *dest { + let dest = if let mir::Place::Base(mir::PlaceBase::Local(index)) = *dest { match self.locals[index] { LocalRef::Place(dest) => dest, LocalRef::UnsizedPlace(_) => bug!("return type must be sized"), @@ -1025,7 +1157,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { src: &mir::Operand<'tcx>, dst: &mir::Place<'tcx> ) { - if let mir::Place::Local(index) = *dst { + if let mir::Place::Base(mir::PlaceBase::Local(index)) = *dst { match self.locals[index] { LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place), LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"), diff --git a/src/librustc_codegen_ssa/mir/constant.rs b/src/librustc_codegen_ssa/mir/constant.rs index c03fff7806330..3f8dc420cf402 100644 --- a/src/librustc_codegen_ssa/mir/constant.rs +++ b/src/librustc_codegen_ssa/mir/constant.rs @@ -1,95 +1,74 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::mir::interpret::ErrorHandled; use rustc_mir::const_eval::const_field; use rustc::mir; use rustc_data_structures::indexed_vec::Idx; -use rustc::mir::interpret::{GlobalId, ConstValue}; use rustc::ty::{self, Ty}; -use rustc::ty::layout; +use rustc::ty::layout::{self, HasTyCtxt}; use syntax::source_map::Span; -use traits::*; +use crate::traits::*; use super::FunctionCx; impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { - fn fully_evaluate( + pub fn eval_mir_constant( &mut self, - bx: &Bx, - constant: &'tcx ty::Const<'tcx>, - ) -> Result<&'tcx ty::Const<'tcx>, ErrorHandled> { - match constant.val { - ConstValue::Unevaluated(def_id, ref substs) => { - let tcx = bx.tcx(); - let param_env = ty::ParamEnv::reveal_all(); - let instance = ty::Instance::resolve(tcx, param_env, def_id, substs).unwrap(); - let cid = GlobalId { + constant: &mir::Constant<'tcx>, + ) -> Result, ErrorHandled> { + match constant.literal.val { + mir::interpret::ConstValue::Unevaluated(def_id, ref substs) => { + let substs = self.monomorphize(substs); + let instance = ty::Instance::resolve( + self.cx.tcx(), ty::ParamEnv::reveal_all(), def_id, substs, + ).unwrap(); + let cid = mir::interpret::GlobalId { instance, promoted: None, }; - tcx.const_eval(param_env.and(cid)) + self.cx.tcx().const_eval(ty::ParamEnv::reveal_all().and(cid)) }, - _ => Ok(constant), + _ => Ok(*self.monomorphize(&constant.literal)), } } - pub fn eval_mir_constant( - &mut self, - bx: &Bx, - constant: &mir::Constant<'tcx>, - ) -> Result<&'tcx ty::Const<'tcx>, ErrorHandled> { - let c = self.monomorphize(&constant.literal); - self.fully_evaluate(bx, c) - } - /// process constant containing SIMD shuffle indices pub fn simd_shuffle_indices( &mut self, bx: &Bx, span: Span, ty: Ty<'tcx>, - constant: Result<&'tcx ty::Const<'tcx>, ErrorHandled>, + constant: Result, ErrorHandled>, ) -> (Bx::Value, Ty<'tcx>) { constant - .and_then(|c| { + .map(|c| { let field_ty = c.ty.builtin_index().unwrap(); let fields = match c.ty.sty { ty::Array(_, n) => n.unwrap_usize(bx.tcx()), - ref other => bug!("invalid simd shuffle type: {}", other), + _ => bug!("invalid simd shuffle type: {}", c.ty), }; - let values: Result, ErrorHandled> = (0..fields).map(|field| { + let values: Vec<_> = (0..fields).map(|field| { let field = const_field( bx.tcx(), ty::ParamEnv::reveal_all(), - self.instance, None, mir::Field::new(field as usize), c, - )?; + ); if let Some(prim) = field.val.try_to_scalar() { let layout = bx.layout_of(field_ty); let scalar = match layout.abi { layout::Abi::Scalar(ref x) => x, _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) }; - Ok(bx.scalar_to_backend( + bx.scalar_to_backend( prim, scalar, bx.immediate_backend_type(layout), - )) + ) } else { bug!("simd shuffle field {:?}", field) } }).collect(); - let llval = bx.const_struct(&values?, false); - Ok((llval, c.ty)) + let llval = bx.const_struct(&values, false); + (llval, c.ty) }) .unwrap_or_else(|_| { bx.tcx().sess.span_err( diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs index d316b3ec3508c..91aa9bcc7808b 100644 --- a/src/librustc_codegen_ssa/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -1,24 +1,12 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use libc::c_uint; use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts}; use rustc::ty::layout::{TyLayout, HasTyCtxt}; use rustc::mir::{self, Mir}; -use rustc::ty::subst::Substs; use rustc::session::config::DebugInfo; -use base; -use debuginfo::{self, VariableAccess, VariableKind, FunctionDebugContext}; use rustc_mir::monomorphize::Instance; -use rustc_target::abi::call::{FnType, PassMode}; -use traits::*; +use rustc_target::abi::call::{FnType, PassMode, IgnoreMode}; +use crate::base; +use crate::debuginfo::{self, VariableAccess, VariableKind, FunctionDebugContext}; +use crate::traits::*; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; @@ -94,8 +82,9 @@ pub struct FunctionCx<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> { /// Debug information for MIR scopes. scopes: IndexVec>, - /// If this function is being monomorphized, this contains the type substitutions used. - param_substs: &'tcx Substs<'tcx>, + /// If this function is a C-variadic function, this contains the `PlaceRef` of the + /// "spoofed" `VaList`. + va_list_ref: Option>, } impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { @@ -103,7 +92,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { where T: TypeFoldable<'tcx> { self.cx.tcx().subst_and_normalize_erasing_regions( - self.param_substs, + self.instance.substs, ty::ParamEnv::reveal_all(), value, ) @@ -115,7 +104,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { source_info: mir::SourceInfo ) { let (scope, span) = self.debug_loc(source_info); - bx.set_source_location(&self.debug_context, scope, span); + bx.set_source_location(&mut self.debug_context, scope, span); } pub fn debug_loc(&self, source_info: mir::SourceInfo) -> (Option, Span) { @@ -185,16 +174,16 @@ enum LocalRef<'tcx, V> { Operand(Option>), } -impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> { - fn new_operand>( - cx: &Cx, +impl<'a, 'tcx: 'a, V: CodegenObject> LocalRef<'tcx, V> { + fn new_operand>( + bx: &mut Bx, layout: TyLayout<'tcx>, ) -> LocalRef<'tcx, V> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but // we need something in the operand. - LocalRef::Operand(Some(OperandRef::new_zst(cx, layout))) + LocalRef::Operand(Some(OperandRef::new_zst(bx, layout))) } else { LocalRef::Operand(None) } @@ -210,9 +199,11 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( instance: Instance<'tcx>, sig: ty::FnSig<'tcx>, ) { + assert!(!instance.substs.needs_infer()); + let fn_ty = cx.new_fn_type(sig, &[]); debug!("fn_ty: {:?}", fn_ty); - let debug_context = + let mut debug_context = cx.create_function_debug_context(instance, sig, llfn, mir); let mut bx = Bx::new_block(cx, llfn, "start"); @@ -234,7 +225,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( }).collect(); // Compute debuginfo scopes from MIR scopes. - let scopes = cx.create_mir_scopes(mir, &debug_context); + let scopes = cx.create_mir_scopes(mir, &mut debug_context); let (landing_pads, funclets) = create_funclets(mir, &mut bx, &cleanup_kinds, &block_bxs); let mut fx = FunctionCx { @@ -252,17 +243,18 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( scopes, locals: IndexVec::new(), debug_context, - param_substs: { - assert!(!instance.substs.needs_infer()); - instance.substs - }, + va_list_ref: None, }; let memory_locals = analyze::non_ssa_locals(&fx); // Allocate variable and temp allocas fx.locals = { - let args = arg_local_refs(&mut bx, &fx, &fx.scopes, &memory_locals); + // FIXME(dlrobertson): This is ugly. Find a better way of getting the `PlaceRef` or + // `LocalRef` from `arg_local_refs` + let mut va_list_ref = None; + let args = arg_local_refs(&mut bx, &fx, &memory_locals, &mut va_list_ref); + fx.va_list_ref = va_list_ref; let mut allocate_local = |local| { let decl = &mir.local_decls[local]; @@ -277,7 +269,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( if !memory_locals.contains(local) && !dbg { debug!("alloc: {:?} ({}) -> operand", local, name); - return LocalRef::new_operand(bx.cx(), layout); + return LocalRef::new_operand(&mut bx, layout); } debug!("alloc: {:?} ({}) -> place", local, name); @@ -303,7 +295,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // Temporary or return place if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return place) -> place", local); - let llretptr = fx.cx.get_param(llfn, 0); + let llretptr = bx.get_param(0); LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align.abi)) } else if memory_locals.contains(local) { debug!("alloc: {:?} -> place", local); @@ -322,7 +314,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // alloca in advance. Instead we wait until we see the // definition and update the operand there. debug!("alloc: {:?} -> operand", local); - LocalRef::new_operand(bx.cx(), layout) + LocalRef::new_operand(&mut bx, layout) } } }; @@ -342,7 +334,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // Up until here, IR instructions for this function have explicitly not been annotated with // source code location, so we don't step into call setup code. From here on, source location // emitting should be enabled. - debuginfo::start_emitting_source_locations(&fx.debug_context); + debuginfo::start_emitting_source_locations(&mut fx.debug_context); let rpo = traversal::reverse_postorder(&mir); let mut visited = BitSet::new_empty(mir.basic_blocks().len()); @@ -432,17 +424,14 @@ fn create_funclets<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( }).unzip() } -/// Produce, for each argument, a `Value` pointing at the +/// Produces, for each argument, a `Value` pointing at the /// argument's value. As arguments are places, these are always /// indirect. fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( bx: &mut Bx, fx: &FunctionCx<'a, 'tcx, Bx>, - scopes: &IndexVec< - mir::SourceScope, - debuginfo::MirDebugScope - >, memory_locals: &BitSet, + va_list_ref: &mut Option>, ) -> Vec> { let mir = fx.mir; let tcx = fx.cx.tcx(); @@ -450,13 +439,22 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( let mut llarg_idx = fx.fn_ty.ret.is_indirect() as usize; // Get the argument scope, if it exists and if we need it. - let arg_scope = scopes[mir::OUTERMOST_SOURCE_SCOPE]; + let arg_scope = fx.scopes[mir::OUTERMOST_SOURCE_SCOPE]; let arg_scope = if bx.sess().opts.debuginfo == DebugInfo::Full { arg_scope.scope_metadata } else { None }; + // Store the index of the last argument. This is used to + // call va_start on the va_list instead of attempting + // to store_fn_arg. + let last_arg_idx = if fx.fn_ty.args.is_empty() { + None + } else { + Some(fx.fn_ty.args.len() - 1) + }; + mir.args_iter().enumerate().map(|(arg_index, local)| { let arg_decl = &mir.local_decls[local]; @@ -520,22 +518,23 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // of putting everything in allocas just so we can use llvm.dbg.declare. let local = |op| LocalRef::Operand(Some(op)); match arg.mode { - PassMode::Ignore => { - return local(OperandRef::new_zst(bx.cx(), arg.layout)); + PassMode::Ignore(IgnoreMode::Zst) => { + return local(OperandRef::new_zst(bx, arg.layout)); } + PassMode::Ignore(IgnoreMode::CVarArgs) => {} PassMode::Direct(_) => { - let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.get_param(llarg_idx); bx.set_value_name(llarg, &name); llarg_idx += 1; return local( OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout)); } PassMode::Pair(..) => { - let a = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let a = bx.get_param(llarg_idx); bx.set_value_name(a, &(name.clone() + ".0")); llarg_idx += 1; - let b = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let b = bx.get_param(llarg_idx); bx.set_value_name(b, &(name + ".1")); llarg_idx += 1; @@ -552,16 +551,16 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // Don't copy an indirect argument to an alloca, the caller // already put it in a temporary alloca and gave it up. // FIXME: lifetimes - let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.get_param(llarg_idx); bx.set_value_name(llarg, &name); llarg_idx += 1; PlaceRef::new_sized(llarg, arg.layout, arg.layout.align.abi) } else if arg.is_unsized_indirect() { // As the storage for the indirect argument lives during // the whole function call, we just copy the fat pointer. - let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.get_param(llarg_idx); llarg_idx += 1; - let llextra = bx.get_param(bx.llfn(), llarg_idx as c_uint); + let llextra = bx.get_param(llarg_idx); llarg_idx += 1; let indirect_operand = OperandValue::Pair(llarg, llextra); @@ -569,9 +568,35 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( indirect_operand.store(bx, tmp); tmp } else { - let tmp = PlaceRef::alloca(bx, arg.layout, &name); - bx.store_fn_arg(arg, &mut llarg_idx, tmp); - tmp + if fx.fn_ty.c_variadic && last_arg_idx.map(|idx| arg_index == idx).unwrap_or(false) { + let va_list_impl = match arg_decl.ty.ty_adt_def() { + Some(adt) => adt.non_enum_variant(), + None => bug!("`va_list` language item improperly constructed") + }; + match tcx.type_of(va_list_impl.fields[0].did).sty { + ty::Ref(_, ty, _) => { + // If the underlying structure the `VaList` contains is a structure, + // we need to allocate it (e.g., X86_64 on Linux). + let tmp = PlaceRef::alloca(bx, arg.layout, &name); + if let ty::Adt(..) = ty.sty { + let layout = bx.layout_of(ty); + // Create an unnamed allocation for the backing structure + // and store it in the the spoofed `VaList`. + let backing = PlaceRef::alloca(bx, layout, ""); + bx.store(backing.llval, tmp.llval, layout.align.abi); + } + // Call `va_start` on the spoofed `VaList`. + bx.va_start(tmp.llval); + *va_list_ref = Some(tmp); + tmp + } + _ => bug!("improperly constructed `va_list` lang item"), + } + } else { + let tmp = PlaceRef::alloca(bx, arg.layout, &name); + bx.store_fn_arg(arg, &mut llarg_idx, tmp); + tmp + } }; arg_scope.map(|scope| { // Is this a regular argument? @@ -596,10 +621,17 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( return; } + let pin_did = tcx.lang_items().pin_type(); // Or is it the closure environment? let (closure_layout, env_ref) = match arg.layout.ty.sty { ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _) => (bx.layout_of(ty), true), + ty::Adt(def, substs) if Some(def.did) == pin_did => { + match substs.type_at(0).sty { + ty::Ref(_, ty, _) => (bx.layout_of(ty), true), + _ => (arg.layout, false), + } + } _ => (arg.layout, false) }; @@ -610,15 +642,43 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( }; let upvar_tys = upvar_substs.upvar_tys(def_id, tcx); - for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() { - let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes(); + let extra_locals = { + let upvars = mir.upvar_decls + .iter() + .zip(upvar_tys) + .enumerate() + .map(|(i, (decl, ty))| (i, decl.debug_name, decl.by_ref, ty)); + + let generator_fields = mir.generator_layout.as_ref().map(|generator_layout| { + let (def_id, gen_substs) = match closure_layout.ty.sty { + ty::Generator(def_id, substs, _) => (def_id, substs), + _ => bug!("generator layout without generator substs"), + }; + let state_tys = gen_substs.state_tys(def_id, tcx); + + let upvar_count = mir.upvar_decls.len(); + generator_layout.fields + .iter() + .zip(state_tys) + .enumerate() + .filter_map(move |(i, (decl, ty))| { + let ty = fx.monomorphize(&ty); + decl.name.map(|name| (i + upvar_count + 1, name, false, ty)) + }) + }).into_iter().flatten(); + + upvars.chain(generator_fields) + }; + + for (field, name, by_ref, ty) in extra_locals { + let byte_offset_of_var_in_env = closure_layout.fields.offset(field).bytes(); let ops = bx.debuginfo_upvar_decls_ops_sequence(byte_offset_of_var_in_env); // The environment and the capture can each be indirect. let mut ops = if env_ref { &ops[..] } else { &ops[1..] }; - let ty = if let (true, &ty::Ref(_, ty, _)) = (decl.by_ref, &ty.sty) { + let ty = if let (true, &ty::Ref(_, ty, _)) = (by_ref, &ty.sty) { ty } else { ops = &ops[..ops.len() - 1]; @@ -631,7 +691,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( }; bx.declare_local( &fx.debug_context, - decl.debug_name, + name, ty, scope, variable_access, diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs index a85e75936dedf..c2b1021f816a6 100644 --- a/src/librustc_codegen_ssa/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -1,23 +1,13 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::mir::interpret::{ConstValue, ErrorHandled}; use rustc::mir; use rustc::ty; use rustc::ty::layout::{self, Align, LayoutOf, TyLayout}; -use base; -use MemFlags; -use glue; +use crate::base; +use crate::MemFlags; +use crate::glue; -use traits::*; +use crate::traits::*; use std::fmt; @@ -58,66 +48,64 @@ pub struct OperandRef<'tcx, V> { } impl fmt::Debug for OperandRef<'tcx, V> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout) } } impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { - pub fn new_zst>( - cx: &Cx, + pub fn new_zst>( + bx: &mut Bx, layout: TyLayout<'tcx> ) -> OperandRef<'tcx, V> { assert!(layout.is_zst()); OperandRef { - val: OperandValue::Immediate(cx.const_undef(cx.immediate_backend_type(layout))), + val: OperandValue::Immediate(bx.const_undef(bx.immediate_backend_type(layout))), layout } } pub fn from_const>( bx: &mut Bx, - val: &'tcx ty::Const<'tcx> + val: ty::Const<'tcx> ) -> Result { - let layout = bx.cx().layout_of(val.ty); + let layout = bx.layout_of(val.ty); if layout.is_zst() { - return Ok(OperandRef::new_zst(bx.cx(), layout)); + return Ok(OperandRef::new_zst(bx, layout)); } let val = match val.val { - ConstValue::Unevaluated(..) => bug!(), + ConstValue::Unevaluated(..) => bug!("unevaluated constant in `OperandRef::from_const`"), + ConstValue::Param(_) => bug!("encountered a ConstValue::Param in codegen"), + ConstValue::Infer(_) => bug!("encountered a ConstValue::Infer in codegen"), ConstValue::Scalar(x) => { let scalar = match layout.abi { layout::Abi::Scalar(ref x) => x, _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) }; - let llval = bx.cx().scalar_to_backend( + let llval = bx.scalar_to_backend( x, scalar, - bx.cx().immediate_backend_type(layout), + bx.immediate_backend_type(layout), ); OperandValue::Immediate(llval) }, - ConstValue::ScalarPair(a, b) => { - let (a_scalar, b_scalar) = match layout.abi { - layout::Abi::ScalarPair(ref a, ref b) => (a, b), + ConstValue::Slice(a, b) => { + let a_scalar = match layout.abi { + layout::Abi::ScalarPair(ref a, _) => a, _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout) }; - let a_llval = bx.cx().scalar_to_backend( + let a_llval = bx.scalar_to_backend( a, a_scalar, - bx.cx().scalar_pair_element_backend_type(layout, 0, true), - ); - let b_llval = bx.cx().scalar_to_backend( - b, - b_scalar, - bx.cx().scalar_pair_element_backend_type(layout, 1, true), + bx.scalar_pair_element_backend_type(layout, 0, true), ); + let b_llval = bx.const_usize(b); OperandValue::Pair(a_llval, b_llval) }, - ConstValue::ByRef(_, alloc, offset) => { - return Ok(bx.load_operand(bx.cx().from_const_alloc(layout, alloc, offset))); + ConstValue::ByRef(ptr, alloc) => { + return Ok(bx.load_operand(bx.from_const_alloc(layout, alloc, ptr.offset))); }, }; @@ -136,7 +124,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { } } - pub fn deref>( + pub fn deref>( self, cx: &Cx ) -> PlaceRef<'tcx, V> { @@ -211,7 +199,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> { let mut val = match (self.val, &self.layout.abi) { // If the field is ZST, it has no data. _ if field.is_zst() => { - return OperandRef::new_zst(bx.cx(), field); + return OperandRef::new_zst(bx, field); } // Newtype of a scalar, scalar pair or vector. @@ -393,7 +381,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // watch out for locals that do not have an // alloca; they are handled somewhat differently - if let mir::Place::Local(index) = *place { + if let mir::Place::Base(mir::PlaceBase::Local(index)) = *place { match self.locals[index] { LocalRef::Operand(Some(o)) => { return Some(o); @@ -421,7 +409,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // checks in `codegen_consume` and `extract_field`. let elem = o.layout.field(bx.cx(), 0); if elem.is_zst() { - return Some(OperandRef::new_zst(bx.cx(), elem)); + return Some(OperandRef::new_zst(bx, elem)); } } _ => {} @@ -444,7 +432,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // ZSTs don't require any actual memory access. if layout.is_zst() { - return OperandRef::new_zst(bx.cx(), layout); + return OperandRef::new_zst(bx, layout); } if let Some(o) = self.maybe_codegen_consume_direct(bx, place) { @@ -472,7 +460,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Operand::Constant(ref constant) => { let ty = self.monomorphize(&constant.ty); - self.eval_mir_constant(bx, constant) + self.eval_mir_constant(constant) .and_then(|c| OperandRef::from_const(bx, c)) .unwrap_or_else(|err| { match err { diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs index 90aa9f6cbc763..1134707f96c92 100644 --- a/src/librustc_codegen_ssa/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -1,38 +1,28 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::ty::{self, Ty}; use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt}; use rustc::mir; use rustc::mir::tcx::PlaceTy; -use MemFlags; -use common::IntPredicate; -use glue; +use crate::MemFlags; +use crate::common::IntPredicate; +use crate::glue; -use traits::*; +use crate::traits::*; use super::{FunctionCx, LocalRef}; use super::operand::OperandValue; #[derive(Copy, Clone, Debug)] pub struct PlaceRef<'tcx, V> { - /// Pointer to the contents of the place + /// Pointer to the contents of the place. pub llval: V, - /// This place's extra data if it is unsized, or null + /// This place's extra data if it is unsized, or null. pub llextra: Option, - /// Monomorphized type of this place, including variant information + /// Monomorphized type of this place, including variant information. pub layout: TyLayout<'tcx>, - /// What alignment we know for this place + /// What alignment we know for this place. pub align: Align, } @@ -51,6 +41,21 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { } } + fn new_thin_place>( + bx: &mut Bx, + llval: V, + layout: TyLayout<'tcx>, + align: Align, + ) -> PlaceRef<'tcx, V> { + assert!(!bx.cx().type_has_metadata(layout.ty)); + PlaceRef { + llval, + llextra: None, + layout, + align + } + } + pub fn alloca>( bx: &mut Bx, layout: TyLayout<'tcx>, @@ -75,7 +80,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { Self::alloca(bx, ptr_layout, name) } - pub fn len>( + pub fn len>( &self, cx: &Cx ) -> V { @@ -211,37 +216,36 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { if self.layout.abi.is_uninhabited() { return bx.cx().const_undef(cast_to); } - match self.layout.variants { + let (discr_scalar, discr_kind, discr_index) = match self.layout.variants { layout::Variants::Single { index } => { let discr_val = self.layout.ty.ty_adt_def().map_or( index.as_u32() as u128, |def| def.discriminant_for_variant(bx.cx().tcx(), index).val); return bx.cx().const_uint_big(cast_to, discr_val); } - layout::Variants::Tagged { .. } | - layout::Variants::NicheFilling { .. } => {}, - } + layout::Variants::Multiple { ref discr, ref discr_kind, discr_index, .. } => { + (discr, discr_kind, discr_index) + } + }; - let discr = self.project_field(bx, 0); + let discr = self.project_field(bx, discr_index); let lldiscr = bx.load_operand(discr).immediate(); - match self.layout.variants { - layout::Variants::Single { .. } => bug!(), - layout::Variants::Tagged { ref tag, .. } => { - let signed = match tag.value { + match *discr_kind { + layout::DiscriminantKind::Tag => { + let signed = match discr_scalar.value { // We use `i1` for bytes that are always `0` or `1`, // e.g., `#[repr(i8)] enum E { A, B }`, but we can't // let LLVM interpret the `i1` as signed, because // then `i1 1` (i.e., E::B) is effectively `i8 -1`. - layout::Int(_, signed) => !tag.is_bool() && signed, + layout::Int(_, signed) => !discr_scalar.is_bool() && signed, _ => false }; bx.intcast(lldiscr, cast_to, signed) } - layout::Variants::NicheFilling { + layout::DiscriminantKind::Niche { dataful_variant, ref niche_variants, niche_start, - .. } => { let niche_llty = bx.cx().immediate_backend_type(discr.layout); if niche_variants.start() == niche_variants.end() { @@ -272,7 +276,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { } } - /// Set the discriminant for a new value of the given case of the given + /// Sets the discriminant for a new value of the given case of the given /// representation. pub fn codegen_set_discr>( &self, @@ -286,8 +290,12 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { layout::Variants::Single { index } => { assert_eq!(index, variant_index); } - layout::Variants::Tagged { .. } => { - let ptr = self.project_field(bx, 0); + layout::Variants::Multiple { + discr_kind: layout::DiscriminantKind::Tag, + discr_index, + .. + } => { + let ptr = self.project_field(bx, discr_index); let to = self.layout.ty.ty_adt_def().unwrap() .discriminant_for_variant(bx.tcx(), variant_index) .val; @@ -296,10 +304,13 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { ptr.llval, ptr.align); } - layout::Variants::NicheFilling { - dataful_variant, - ref niche_variants, - niche_start, + layout::Variants::Multiple { + discr_kind: layout::DiscriminantKind::Niche { + dataful_variant, + ref niche_variants, + niche_start, + }, + discr_index, .. } => { if variant_index != dataful_variant { @@ -312,7 +323,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty()); } - let niche = self.project_field(bx, 0); + let niche = self.project_field(bx, discr_index); let niche_llty = bx.cx().immediate_backend_type(niche.layout); let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); let niche_value = (niche_value as u128) @@ -335,11 +346,20 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> { bx: &mut Bx, llindex: V ) -> Self { + // Statically compute the offset if we can, otherwise just use the element size, + // as this will yield the lowest alignment. + let layout = self.layout.field(bx, 0); + let offset = if bx.is_const_integral(llindex) { + layout.size.checked_mul(bx.const_to_uint(llindex), bx).unwrap_or(layout.size) + } else { + layout.size + }; + PlaceRef { llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]), llextra: None, - layout: self.layout.field(bx.cx(), 0), - align: self.align + layout, + align: self.align.restrict_for_offset(offset), } } @@ -378,7 +398,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let cx = self.cx; let tcx = self.cx.tcx(); - if let mir::Place::Local(index) = *place { + if let mir::Place::Base(mir::PlaceBase::Local(index)) = *place { match self.locals[index] { LocalRef::Place(place) => { return place; @@ -393,18 +413,22 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } let result = match *place { - mir::Place::Local(_) => bug!(), // handled above - mir::Place::Promoted(box (index, ty)) => { + mir::Place::Base(mir::PlaceBase::Local(_)) => bug!(), // handled above + mir::Place::Base( + mir::PlaceBase::Static( + box mir::Static { ty, kind: mir::StaticKind::Promoted(promoted) } + ) + ) => { let param_env = ty::ParamEnv::reveal_all(); let cid = mir::interpret::GlobalId { instance: self.instance, - promoted: Some(index), + promoted: Some(promoted), }; let layout = cx.layout_of(self.monomorphize(&ty)); match bx.tcx().const_eval(param_env.and(cid)) { Ok(val) => match val.val { - mir::interpret::ConstValue::ByRef(_, alloc, offset) => { - bx.cx().from_const_alloc(layout, alloc, offset) + mir::interpret::ConstValue::ByRef(ptr, alloc) => { + bx.cx().from_const_alloc(layout, alloc, ptr.offset) } _ => bug!("promoteds should have an allocation: {:?}", val), }, @@ -421,9 +445,16 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } } - mir::Place::Static(box mir::Static { def_id, ty }) => { + mir::Place::Base( + mir::PlaceBase::Static( + box mir::Static { ty, kind: mir::StaticKind::Static(def_id) } + ) + ) => { + // NB: The layout of a static may be unsized as is the case when working + // with a static that is an extern_type. let layout = cx.layout_of(self.monomorphize(&ty)); - PlaceRef::new_sized(bx.get_static(def_id), layout, layout.align.abi) + let static_ = bx.get_static(def_id); + PlaceRef::new_thin_place(bx, static_, layout, layout.align.abi) }, mir::Place::Projection(box mir::Projection { ref base, @@ -441,7 +472,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { cg_base.project_field(bx, field.index()) } mir::ProjectionElem::Index(index) => { - let index = &mir::Operand::Copy(mir::Place::Local(index)); + let index = &mir::Operand::Copy( + mir::Place::Base(mir::PlaceBase::Local(index)) + ); let index = self.codegen_operand(bx, index); let llindex = index.immediate(); cg_base.project_index(bx, llindex) @@ -463,8 +496,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::ProjectionElem::Subslice { from, to } => { let mut subslice = cg_base.project_index(bx, bx.cx().const_usize(from as u64)); - let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty } - .projection_ty(tcx, &projection.elem).to_ty(tcx); + let projected_ty = PlaceTy::from_ty(cg_base.layout.ty) + .projection_ty(tcx, &projection.elem).ty; subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty)); if subslice.layout.is_unsized() { @@ -492,6 +525,6 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> { let tcx = self.cx.tcx(); let place_ty = place.ty(self.mir, tcx); - self.monomorphize(&place_ty.to_ty(tcx)) + self.monomorphize(&place_ty.ty) } } diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs index c932ffd1c1bda..53640284a2ca9 100644 --- a/src/librustc_codegen_ssa/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::layout::{self, LayoutOf, HasTyCtxt}; @@ -16,13 +6,13 @@ use rustc::middle::lang_items::ExchangeMallocFnLangItem; use rustc_apfloat::{ieee, Float, Status, Round}; use std::{u128, i128}; -use base; -use MemFlags; -use callee; -use common::{self, RealPredicate, IntPredicate}; +use crate::base; +use crate::MemFlags; +use crate::callee; +use crate::common::{self, RealPredicate, IntPredicate}; use rustc_mir::monomorphize; -use traits::*; +use crate::traits::*; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; @@ -97,11 +87,11 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if dest.layout.is_zst() { return bx; } - let zero = bx.cx().const_usize(0); - let start = dest.project_index(&mut bx, zero).llval; if let OperandValue::Immediate(v) = cg_elem.val { - let size = bx.cx().const_usize(dest.layout.size.bytes()); + let zero = bx.const_usize(0); + let start = dest.project_index(&mut bx, zero).llval; + let size = bx.const_usize(dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 { @@ -118,27 +108,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } - let count = bx.cx().const_usize(count); - let end = dest.project_index(&mut bx, count).llval; - - let mut header_bx = bx.build_sibling_block("repeat_loop_header"); - let mut body_bx = bx.build_sibling_block("repeat_loop_body"); - let next_bx = bx.build_sibling_block("repeat_loop_next"); - - bx.br(header_bx.llbb()); - let current = header_bx.phi(bx.cx().val_ty(start), &[start], &[bx.llbb()]); - - let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); - header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); - - cg_elem.val.store(&mut body_bx, - PlaceRef::new_sized(current, cg_elem.layout, dest.align)); - - let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]); - body_bx.br(header_bx.llbb()); - header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); - - next_bx + bx.write_operand_repeatedly(cg_elem, count, dest) } mir::Rvalue::Aggregate(ref kind, ref operands) => { @@ -223,7 +193,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } } - mir::CastKind::ClosureFnPointer => { + mir::CastKind::ClosureFnPointer(_) => { match operand.layout.ty.sty { ty::Closure(def_id, substs) => { let instance = monomorphize::resolve_closure( @@ -266,7 +236,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } } - mir::CastKind::Misc if bx.cx().is_backend_scalar_pair(operand.layout) => { + mir::CastKind::MutToConstPointer + | mir::CastKind::Misc if bx.cx().is_backend_scalar_pair(operand.layout) => { if let OperandValue::Pair(data_ptr, meta) = operand.val { if bx.cx().is_backend_scalar_pair(cast) { let data_cast = bx.pointercast(data_ptr, @@ -283,7 +254,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bug!("Unexpected non-Pair operand") } } - mir::CastKind::Misc => { + mir::CastKind::MutToConstPointer + | mir::CastKind::Misc => { assert!(bx.cx().is_backend_immediate(cast)); let ll_t_out = bx.cx().immediate_backend_type(cast); if operand.layout.abi.is_uninhabited() { @@ -310,8 +282,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }); } } - layout::Variants::Tagged { .. } | - layout::Variants::NicheFilling { .. } => {}, + layout::Variants::Multiple { .. } => {}, } let llval = operand.immediate(); @@ -530,8 +501,11 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // According to `rvalue_creates_operand`, only ZST // aggregate rvalues are allowed to be operands. let ty = rvalue.ty(self.mir, self.cx.tcx()); - (bx, OperandRef::new_zst(self.cx, - self.cx.layout_of(self.monomorphize(&ty)))) + let operand = OperandRef::new_zst( + &mut bx, + self.cx.layout_of(self.monomorphize(&ty)), + ); + (bx, operand) } } } @@ -543,7 +517,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { ) -> Bx::Value { // ZST are passed as operands and require special handling // because codegen_place() panics if Local is operand. - if let mir::Place::Local(index) = *place { + if let mir::Place::Base(mir::PlaceBase::Local(index)) = *place { if let LocalRef::Operand(Some(op)) = self.locals[index] { if let ty::Array(_, n) = op.layout.ty.sty { let n = n.unwrap_usize(bx.cx().tcx()); @@ -757,7 +731,6 @@ fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity, // and for everything else LLVM's uitofp works just fine. use rustc_apfloat::ieee::Single; - use rustc_apfloat::Float; const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1) << (Single::MAX_EXP - Single::PRECISION as i16); let max = bx.cx().const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); diff --git a/src/librustc_codegen_ssa/mir/statement.rs b/src/librustc_codegen_ssa/mir/statement.rs index 568a7e7e1600f..618d05245d2ca 100644 --- a/src/librustc_codegen_ssa/mir/statement.rs +++ b/src/librustc_codegen_ssa/mir/statement.rs @@ -1,20 +1,10 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::mir; -use traits::BuilderMethods; +use crate::traits::BuilderMethods; use super::FunctionCx; use super::LocalRef; use super::OperandValue; -use traits::*; +use crate::traits::*; impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { pub fn codegen_statement( @@ -27,7 +17,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.set_debug_loc(&mut bx, statement.source_info); match statement.kind { mir::StatementKind::Assign(ref place, ref rvalue) => { - if let mir::Place::Local(index) = *place { + if let mir::Place::Base(mir::PlaceBase::Local(index)) = *place { match self.locals[index] { LocalRef::Place(cg_dest) => { self.codegen_rvalue(bx, cg_dest, rvalue) @@ -78,13 +68,13 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } bx } - mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => { - let outputs = outputs.iter().map(|output| { + mir::StatementKind::InlineAsm(ref asm) => { + let outputs = asm.outputs.iter().map(|output| { self.codegen_place(&mut bx, output) }).collect(); - let input_vals = inputs.iter() - .fold(Vec::with_capacity(inputs.len()), |mut acc, (span, input)| { + let input_vals = asm.inputs.iter() + .fold(Vec::with_capacity(asm.inputs.len()), |mut acc, (span, input)| { let op = self.codegen_operand(&mut bx, input); if let OperandValue::Immediate(_) = op.val { acc.push(op.immediate()); @@ -95,8 +85,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { acc }); - if input_vals.len() == inputs.len() { - let res = bx.codegen_inline_asm(asm, outputs, input_vals); + if input_vals.len() == asm.inputs.len() { + let res = bx.codegen_inline_asm(&asm.asm, outputs, input_vals); if !res { span_err!(bx.sess(), statement.source_info.span, E0668, "malformed inline assembly"); @@ -106,7 +96,6 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } mir::StatementKind::FakeRead(..) | mir::StatementKind::Retag { .. } | - mir::StatementKind::EscapeToRaw { .. } | mir::StatementKind::AscribeUserType(..) | mir::StatementKind::Nop => bx, } diff --git a/src/librustc_codegen_ssa/mono_item.rs b/src/librustc_codegen_ssa/mono_item.rs index 26f8a9a5dd4a0..48159d7979923 100644 --- a/src/librustc_codegen_ssa/mono_item.rs +++ b/src/librustc_codegen_ssa/mono_item.rs @@ -1,20 +1,10 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use base; use rustc::hir; use rustc::hir::def::Def; use rustc::mir::mono::{Linkage, Visibility}; use rustc::ty::layout::HasTyCtxt; use std::fmt; -use traits::*; +use crate::base; +use crate::traits::*; pub use rustc::mir::mono::MonoItem; @@ -23,7 +13,7 @@ pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt; pub trait MonoItemExt<'a, 'tcx: 'a>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { fn define>(&self, cx: &'a Bx::CodegenCx) { debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}", - self.to_string(cx.tcx()), + self.to_string(cx.tcx(), true), self.to_raw_string(), cx.codegen_unit().name()); @@ -41,8 +31,8 @@ pub trait MonoItemExt<'a, 'tcx: 'a>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { }; cx.codegen_static(def_id, is_mutable); } - MonoItem::GlobalAsm(node_id) => { - let item = cx.tcx().hir().expect_item(node_id); + MonoItem::GlobalAsm(hir_id) => { + let item = cx.tcx().hir().expect_item_by_hir_id(hir_id); if let hir::ItemKind::GlobalAsm(ref ga) = item.node { cx.codegen_global_asm(ga); } else { @@ -55,7 +45,7 @@ pub trait MonoItemExt<'a, 'tcx: 'a>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { } debug!("END IMPLEMENTING '{} ({})' in cgu {}", - self.to_string(cx.tcx()), + self.to_string(cx.tcx(), true), self.to_raw_string(), cx.codegen_unit().name()); } @@ -67,7 +57,7 @@ pub trait MonoItemExt<'a, 'tcx: 'a>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { visibility: Visibility ) { debug!("BEGIN PREDEFINING '{} ({})' in cgu {}", - self.to_string(cx.tcx()), + self.to_string(cx.tcx(), true), self.to_raw_string(), cx.codegen_unit().name()); @@ -86,7 +76,7 @@ pub trait MonoItemExt<'a, 'tcx: 'a>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { } debug!("END PREDEFINING '{} ({})' in cgu {}", - self.to_string(cx.tcx()), + self.to_string(cx.tcx(), true), self.to_raw_string(), cx.codegen_unit().name()); } diff --git a/src/librustc_codegen_ssa/traits/abi.rs b/src/librustc_codegen_ssa/traits/abi.rs index c659a99e1c998..a8fd4e1d2c7c7 100644 --- a/src/librustc_codegen_ssa/traits/abi.rs +++ b/src/librustc_codegen_ssa/traits/abi.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::BackendTypes; use rustc::ty::{FnSig, Instance, Ty}; use rustc_target::abi::call::FnType; @@ -20,4 +10,5 @@ pub trait AbiMethods<'tcx> { pub trait AbiBuilderMethods<'tcx>: BackendTypes { fn apply_attrs_callsite(&mut self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: Self::Value); + fn get_param(&self, index: usize) -> Self::Value; } diff --git a/src/librustc_codegen_ssa/traits/asm.rs b/src/librustc_codegen_ssa/traits/asm.rs index 0e56fe46a313c..a95bf3af5bf27 100644 --- a/src/librustc_codegen_ssa/traits/asm.rs +++ b/src/librustc_codegen_ssa/traits/asm.rs @@ -1,15 +1,5 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::BackendTypes; -use mir::place::PlaceRef; +use crate::mir::place::PlaceRef; use rustc::hir::{GlobalAsm, InlineAsm}; pub trait AsmBuilderMethods<'tcx>: BackendTypes { diff --git a/src/librustc_codegen_ssa/traits/backend.rs b/src/librustc_codegen_ssa/traits/backend.rs index b59f970ae06a4..a9e0eadb198a8 100644 --- a/src/librustc_codegen_ssa/traits/backend.rs +++ b/src/librustc_codegen_ssa/traits/backend.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::ty::layout::{HasTyCtxt, LayoutOf, TyLayout}; use rustc::ty::Ty; @@ -16,7 +6,7 @@ use super::CodegenObject; use rustc::middle::allocator::AllocatorKind; use rustc::middle::cstore::EncodedMetadata; use rustc::mir::mono::Stats; -use rustc::session::Session; +use rustc::session::{Session, config}; use rustc::ty::TyCtxt; use rustc_codegen_utils::codegen_backend::CodegenBackend; use std::sync::Arc; @@ -42,13 +32,18 @@ impl<'tcx, T> Backend<'tcx> for T where } pub trait ExtraBackendMethods: CodegenBackend + WriteBackendMethods + Sized + Send { - fn new_metadata(&self, sess: &Session, mod_name: &str) -> Self::Module; + fn new_metadata(&self, sess: TyCtxt<'_, '_, '_>, mod_name: &str) -> Self::Module; fn write_metadata<'b, 'gcx>( &self, tcx: TyCtxt<'b, 'gcx, 'gcx>, - metadata: &Self::Module, + metadata: &mut Self::Module, ) -> EncodedMetadata; - fn codegen_allocator(&self, tcx: TyCtxt, mods: &Self::Module, kind: AllocatorKind); + fn codegen_allocator<'b, 'gcx>( + &self, + tcx: TyCtxt<'b, 'gcx, 'gcx>, + mods: &mut Self::Module, + kind: AllocatorKind + ); fn compile_codegen_unit<'a, 'tcx: 'a>( &self, tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -60,6 +55,7 @@ pub trait ExtraBackendMethods: CodegenBackend + WriteBackendMethods + Sized + Se fn target_machine_factory( &self, sess: &Session, + opt_level: config::OptLevel, find_features: bool, ) -> Arc Result + Send + Sync>; fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str; diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index c1349329c17ec..48142fc9fa9f4 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -1,30 +1,18 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::abi::AbiBuilderMethods; use super::asm::AsmBuilderMethods; use super::debuginfo::DebugInfoBuilderMethods; use super::intrinsic::IntrinsicCallMethods; use super::type_::ArgTypeMethods; use super::{HasCodegen, StaticBuilderMethods}; -use common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope}; -use mir::operand::OperandRef; -use mir::place::PlaceRef; +use crate::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, + SynchronizationScope}; +use crate::mir::operand::OperandRef; +use crate::mir::place::PlaceRef; +use crate::MemFlags; use rustc::ty::Ty; use rustc::ty::layout::{Align, Size}; -use std::ffi::CStr; -use MemFlags; - -use std::borrow::Cow; use std::ops::Range; -use syntax::ast::AsmDialect; +use std::iter::TrustedLen; #[derive(Copy, Clone)] pub enum OverflowOp { @@ -46,13 +34,9 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn with_cx(cx: &'a Self::CodegenCx) -> Self; fn build_sibling_block<'b>(&self, name: &'b str) -> Self; fn cx(&self) -> &Self::CodegenCx; - fn llfn(&self) -> Self::Value; fn llbb(&self) -> Self::BasicBlock; - fn count_insn(&self, category: &str); - fn set_value_name(&mut self, value: Self::Value, name: &str); fn position_at_end(&mut self, llbb: Self::BasicBlock); - fn position_at_start(&mut self, llbb: Self::BasicBlock); fn ret_void(&mut self); fn ret(&mut self, v: Self::Value); fn br(&mut self, dest: Self::BasicBlock); @@ -66,8 +50,8 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: &mut self, v: Self::Value, else_llbb: Self::BasicBlock, - num_cases: usize, - ) -> Self::Value; + cases: impl ExactSizeIterator + TrustedLen, + ); fn invoke( &mut self, llfn: Self::Value, @@ -77,6 +61,7 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: funclet: Option<&Self::Funclet>, ) -> Self::Value; fn unreachable(&mut self); + fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; @@ -109,7 +94,7 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn checked_binop( &mut self, oop: OverflowOp, - ty: Ty, + ty: Ty<'_>, lhs: Self::Value, rhs: Self::Value, ) -> (Self::Value, Self::Value); @@ -130,6 +115,14 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>) -> OperandRef<'tcx, Self::Value>; + /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset. + fn write_operand_repeatedly( + self, + elem: OperandRef<'tcx, Self::Value>, + count: u64, + dest: PlaceRef<'tcx, Self::Value>, + ) -> Self; + fn range_metadata(&mut self, load: Self::Value, range: Range); fn nonnull_metadata(&mut self, load: Self::Value); @@ -170,24 +163,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn empty_phi(&mut self, ty: Self::Type) -> Self::Value; - fn phi( - &mut self, - ty: Self::Type, - vals: &[Self::Value], - bbs: &[Self::BasicBlock], - ) -> Self::Value; - fn inline_asm_call( - &mut self, - asm: &CStr, - cons: &CStr, - inputs: &[Self::Value], - output: Self::Type, - volatile: bool, - alignstack: bool, - dia: AsmDialect, - ) -> Option; - fn memcpy( &mut self, dst: Self::Value, @@ -215,8 +190,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: flags: MemFlags, ); - fn minnum(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; - fn maxnum(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value; fn select( &mut self, cond: Self::Value, @@ -226,32 +199,7 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value; fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value; - fn insert_element( - &mut self, - vec: Self::Value, - elt: Self::Value, - idx: Self::Value, - ) -> Self::Value; - fn shuffle_vector( - &mut self, - v1: Self::Value, - v2: Self::Value, - mask: Self::Value, - ) -> Self::Value; fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value; - fn vector_reduce_fadd_fast(&mut self, acc: Self::Value, src: Self::Value) -> Self::Value; - fn vector_reduce_fmul_fast(&mut self, acc: Self::Value, src: Self::Value) -> Self::Value; - fn vector_reduce_add(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_mul(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_and(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_or(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_xor(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmin(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmax(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmin_fast(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_fmax_fast(&mut self, src: Self::Value) -> Self::Value; - fn vector_reduce_min(&mut self, src: Self::Value, is_signed: bool) -> Self::Value; - fn vector_reduce_max(&mut self, src: Self::Value, is_signed: bool) -> Self::Value; fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value; fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value; @@ -261,7 +209,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: pers_fn: Self::Value, num_clauses: usize, ) -> Self::Value; - fn add_clause(&mut self, landing_pad: Self::Value, clause: Self::Value); fn set_cleanup(&mut self, landing_pad: Self::Value); fn resume(&mut self, exn: Self::Value) -> Self::Value; fn cleanup_pad(&mut self, parent: Option, args: &[Self::Value]) -> Self::Funclet; @@ -271,7 +218,6 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: unwind: Option, ) -> Self::Value; fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet; - fn catch_ret(&mut self, funclet: &Self::Funclet, unwind: Self::BasicBlock) -> Self::Value; fn catch_switch( &mut self, parent: Option, @@ -298,23 +244,8 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: order: AtomicOrdering, ) -> Self::Value; fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope); - fn add_case(&mut self, s: Self::Value, on_val: Self::Value, dest: Self::BasicBlock); - fn add_incoming_to_phi(&mut self, phi: Self::Value, val: Self::Value, bb: Self::BasicBlock); fn set_invariant_load(&mut self, load: Self::Value); - /// Returns the ptr value that should be used for storing `val`. - fn check_store(&mut self, val: Self::Value, ptr: Self::Value) -> Self::Value; - - /// Returns the args that should be used for a call to `llfn`. - fn check_call<'b>( - &mut self, - typ: &str, - llfn: Self::Value, - args: &'b [Self::Value], - ) -> Cow<'b, [Self::Value]> - where - [Self::Value]: ToOwned; - /// Called for `StorageLive` fn lifetime_start(&mut self, ptr: Self::Value, size: Size); diff --git a/src/librustc_codegen_ssa/traits/consts.rs b/src/librustc_codegen_ssa/traits/consts.rs index af49410794efb..32412f303c155 100644 --- a/src/librustc_codegen_ssa/traits/consts.rs +++ b/src/librustc_codegen_ssa/traits/consts.rs @@ -1,19 +1,8 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::BackendTypes; -use mir::place::PlaceRef; +use crate::mir::place::PlaceRef; use rustc::mir::interpret::Allocation; use rustc::mir::interpret::Scalar; use rustc::ty::layout; -use syntax::symbol::LocalInternedString; pub trait ConstMethods<'tcx>: BackendTypes { // Constant constructors @@ -29,24 +18,12 @@ pub trait ConstMethods<'tcx>: BackendTypes { fn const_usize(&self, i: u64) -> Self::Value; fn const_u8(&self, i: u8) -> Self::Value; - // This is a 'c-like' raw string, which differs from - // our boxed-and-length-annotated strings. - fn const_cstr(&self, s: LocalInternedString, null_terminated: bool) -> Self::Value; - - fn const_str_slice(&self, s: LocalInternedString) -> Self::Value; - fn const_fat_ptr(&self, ptr: Self::Value, meta: Self::Value) -> Self::Value; fn const_struct(&self, elts: &[Self::Value], packed: bool) -> Self::Value; - fn const_array(&self, ty: Self::Type, elts: &[Self::Value]) -> Self::Value; - fn const_vector(&self, elts: &[Self::Value]) -> Self::Value; - fn const_bytes(&self, bytes: &[u8]) -> Self::Value; - fn const_get_elt(&self, v: Self::Value, idx: u64) -> Self::Value; - fn const_get_real(&self, v: Self::Value) -> Option<(f64, bool)>; fn const_to_uint(&self, v: Self::Value) -> u64; fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option; fn is_const_integral(&self, v: Self::Value) -> bool; - fn is_const_real(&self, v: Self::Value) -> bool; fn scalar_to_backend( &self, diff --git a/src/librustc_codegen_ssa/traits/debuginfo.rs b/src/librustc_codegen_ssa/traits/debuginfo.rs index c4becf37059e3..a0b53fde09c4d 100644 --- a/src/librustc_codegen_ssa/traits/debuginfo.rs +++ b/src/librustc_codegen_ssa/traits/debuginfo.rs @@ -1,15 +1,5 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::BackendTypes; -use debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, VariableKind}; +use crate::debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, VariableKind}; use rustc::hir::def_id::CrateNum; use rustc::mir; use rustc::ty::{self, Ty}; @@ -32,13 +22,13 @@ pub trait DebugInfoMethods<'tcx>: BackendTypes { instance: Instance<'tcx>, sig: ty::FnSig<'tcx>, llfn: Self::Value, - mir: &mir::Mir, + mir: &mir::Mir<'_>, ) -> FunctionDebugContext; fn create_mir_scopes( &self, - mir: &mir::Mir, - debug_context: &FunctionDebugContext, + mir: &mir::Mir<'_>, + debug_context: &mut FunctionDebugContext, ) -> IndexVec>; fn extend_scope_to_file( &self, @@ -63,9 +53,10 @@ pub trait DebugInfoBuilderMethods<'tcx>: BackendTypes { ); fn set_source_location( &mut self, - debug_context: &FunctionDebugContext, + debug_context: &mut FunctionDebugContext, scope: Option, span: Span, ); fn insert_reference_to_gdb_debug_scripts_section_global(&mut self); + fn set_value_name(&mut self, value: Self::Value, name: &str); } diff --git a/src/librustc_codegen_ssa/traits/declare.rs b/src/librustc_codegen_ssa/traits/declare.rs index 611e5f758a781..6a400a7d7a45d 100644 --- a/src/librustc_codegen_ssa/traits/declare.rs +++ b/src/librustc_codegen_ssa/traits/declare.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::BackendTypes; use rustc::hir::def_id::DefId; use rustc::mir::mono::{Linkage, Visibility}; @@ -39,7 +29,7 @@ pub trait DeclareMethods<'tcx>: BackendTypes { /// Declare a global with an intention to define it. /// /// Use this function when you intend to define a global. This function will - /// return None if the name already has a definition associated with it. In that + /// return `None` if the name already has a definition associated with it. In that /// case an error should be reported to the user, because it usually happens due /// to user’s fault (e.g., misuse of #[no_mangle] or #[export_name] attributes). fn define_global(&self, name: &str, ty: Self::Type) -> Option; @@ -63,10 +53,10 @@ pub trait DeclareMethods<'tcx>: BackendTypes { /// can happen with #[no_mangle] or #[export_name], for example. fn define_internal_fn(&self, name: &str, fn_sig: ty::PolyFnSig<'tcx>) -> Self::Value; - /// Get declared value by name. + /// Gets declared value by name. fn get_declared_value(&self, name: &str) -> Option; - /// Get defined or externally defined (AvailableExternally linkage) value by + /// Gets defined or externally defined (AvailableExternally linkage) value by /// name. fn get_defined_value(&self, name: &str) -> Option; } diff --git a/src/librustc_codegen_ssa/traits/intrinsic.rs b/src/librustc_codegen_ssa/traits/intrinsic.rs index abc118e770852..cd5278989778f 100644 --- a/src/librustc_codegen_ssa/traits/intrinsic.rs +++ b/src/librustc_codegen_ssa/traits/intrinsic.rs @@ -1,15 +1,5 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::BackendTypes; -use mir::operand::OperandRef; +use crate::mir::operand::OperandRef; use rustc::ty::Ty; use rustc_target::abi::call::FnType; use syntax_pos::Span; @@ -30,4 +20,10 @@ pub trait IntrinsicCallMethods<'tcx>: BackendTypes { fn abort(&mut self); fn assume(&mut self, val: Self::Value); fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value; + /// Trait method used to inject `va_start` on the "spoofed" `VaList` in + /// Rust defined C-variadic functions. + fn va_start(&mut self, val: Self::Value) -> Self::Value; + /// Trait method used to inject `va_end` on the "spoofed" `VaList` before + /// Rust defined C-variadic functions return. + fn va_end(&mut self, val: Self::Value) -> Self::Value; } diff --git a/src/librustc_codegen_ssa/traits/misc.rs b/src/librustc_codegen_ssa/traits/misc.rs index 57afb800d014c..2797dd89f5b15 100644 --- a/src/librustc_codegen_ssa/traits/misc.rs +++ b/src/librustc_codegen_ssa/traits/misc.rs @@ -1,15 +1,4 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::BackendTypes; -use libc::c_uint; use rustc::mir::mono::Stats; use rustc::session::Session; use rustc::ty::{self, Instance, Ty}; @@ -21,11 +10,10 @@ use std::sync::Arc; pub trait MiscMethods<'tcx>: BackendTypes { fn vtables( &self, - ) -> &RefCell, ty::PolyExistentialTraitRef<'tcx>), Self::Value>>; + ) -> &RefCell, Option>), Self::Value>>; fn check_overflow(&self) -> bool; fn instances(&self) -> &RefCell, Self::Value>>; fn get_fn(&self, instance: Instance<'tcx>) -> Self::Value; - fn get_param(&self, llfn: Self::Value, index: c_uint) -> Self::Value; fn eh_personality(&self) -> Self::Value; fn eh_unwind_resume(&self) -> Self::Value; fn sess(&self) -> &Session; diff --git a/src/librustc_codegen_ssa/traits/mod.rs b/src/librustc_codegen_ssa/traits/mod.rs index 6251fc3d3f30e..8fe8b7ecd4709 100644 --- a/src/librustc_codegen_ssa/traits/mod.rs +++ b/src/librustc_codegen_ssa/traits/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Interface of a Rust codegen backend //! //! This crate defines all the traits that have to be implemented by a codegen backend in order to diff --git a/src/librustc_codegen_ssa/traits/statics.rs b/src/librustc_codegen_ssa/traits/statics.rs index 0e665fc29fc19..d8992c159337d 100644 --- a/src/librustc_codegen_ssa/traits/statics.rs +++ b/src/librustc_codegen_ssa/traits/statics.rs @@ -1,14 +1,5 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::BackendTypes; +use syntax_pos::symbol::LocalInternedString; use rustc::hir::def_id::DefId; use rustc::ty::layout::Align; @@ -18,5 +9,13 @@ pub trait StaticMethods: BackendTypes { } pub trait StaticBuilderMethods<'tcx>: BackendTypes { - fn get_static(&self, def_id: DefId) -> Self::Value; + fn get_static(&mut self, def_id: DefId) -> Self::Value; + fn static_panic_msg( + &mut self, + msg: Option, + filename: LocalInternedString, + line: Self::Value, + col: Self::Value, + kind: &str, + ) -> Self::Value; } diff --git a/src/librustc_codegen_ssa/traits/type_.rs b/src/librustc_codegen_ssa/traits/type_.rs index ed53c8fffa7aa..efc18d401c082 100644 --- a/src/librustc_codegen_ssa/traits/type_.rs +++ b/src/librustc_codegen_ssa/traits/type_.rs @@ -1,72 +1,45 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::misc::MiscMethods; use super::Backend; use super::HasCodegen; -use common::{self, TypeKind}; -use mir::place::PlaceRef; -use rustc::ty::layout::{self, Align, Size, TyLayout}; +use crate::common::TypeKind; +use crate::mir::place::PlaceRef; use rustc::ty::{self, Ty}; -use rustc::util::nodemap::FxHashMap; +use rustc::ty::layout::{self, TyLayout}; use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg}; -use std::cell::RefCell; -use syntax::ast; +use syntax_pos::DUMMY_SP; // This depends on `Backend` and not `BackendTypes`, because consumers will probably want to use // `LayoutOf` or `HasTyCtxt`. This way, they don't have to add a constraint on it themselves. pub trait BaseTypeMethods<'tcx>: Backend<'tcx> { - fn type_void(&self) -> Self::Type; - fn type_metadata(&self) -> Self::Type; fn type_i1(&self) -> Self::Type; fn type_i8(&self) -> Self::Type; fn type_i16(&self) -> Self::Type; fn type_i32(&self) -> Self::Type; fn type_i64(&self) -> Self::Type; fn type_i128(&self) -> Self::Type; - - // Creates an integer type with the given number of bits, e.g., i24 - fn type_ix(&self, num_bits: u64) -> Self::Type; fn type_isize(&self) -> Self::Type; fn type_f32(&self) -> Self::Type; fn type_f64(&self) -> Self::Type; - fn type_x86_mmx(&self) -> Self::Type; fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; - fn type_variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type; - fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type; - fn type_vector(&self, ty: Self::Type, len: u64) -> Self::Type; fn type_kind(&self, ty: Self::Type) -> TypeKind; fn type_ptr_to(&self, ty: Self::Type) -> Self::Type; fn element_type(&self, ty: Self::Type) -> Self::Type; - /// Return the number of elements in `self` if it is a LLVM vector type. + /// Returns the number of elements in `self` if it is a LLVM vector type. fn vector_length(&self, ty: Self::Type) -> usize; - fn func_params_types(&self, ty: Self::Type) -> Vec; fn float_width(&self, ty: Self::Type) -> usize; - /// Retrieve the bit width of the integer type `self`. + /// Retrieves the bit width of the integer type `self`. fn int_width(&self, ty: Self::Type) -> u64; fn val_ty(&self, v: Self::Value) -> Self::Type; - fn scalar_lltypes(&self) -> &RefCell, Self::Type>>; } pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { - fn type_bool(&self) -> Self::Type { - self.type_i8() - } - fn type_i8p(&self) -> Self::Type { self.type_ptr_to(self.type_i8()) } @@ -80,35 +53,6 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { } } - fn type_int_from_ty(&self, t: ast::IntTy) -> Self::Type { - match t { - ast::IntTy::Isize => self.type_isize(), - ast::IntTy::I8 => self.type_i8(), - ast::IntTy::I16 => self.type_i16(), - ast::IntTy::I32 => self.type_i32(), - ast::IntTy::I64 => self.type_i64(), - ast::IntTy::I128 => self.type_i128(), - } - } - - fn type_uint_from_ty(&self, t: ast::UintTy) -> Self::Type { - match t { - ast::UintTy::Usize => self.type_isize(), - ast::UintTy::U8 => self.type_i8(), - ast::UintTy::U16 => self.type_i16(), - ast::UintTy::U32 => self.type_i32(), - ast::UintTy::U64 => self.type_i64(), - ast::UintTy::U128 => self.type_i128(), - } - } - - fn type_float_from_ty(&self, t: ast::FloatTy) -> Self::Type { - match t { - ast::FloatTy::F32 => self.type_f32(), - ast::FloatTy::F64 => self.type_f64(), - } - } - fn type_from_integer(&self, i: layout::Integer) -> Self::Type { use rustc::ty::layout::Integer::*; match i { @@ -120,36 +64,19 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { } } - fn type_pointee_for_align(&self, align: Align) -> Self::Type { - // FIXME(eddyb) We could find a better approximation if ity.align < align. - let ity = layout::Integer::approximate_align(self, align); - self.type_from_integer(ity) - } - - /// Return a LLVM type that has at most the required alignment, - /// and exactly the required size, as a best-effort padding array. - fn type_padding_filler(&self, size: Size, align: Align) -> Self::Type { - let unit = layout::Integer::approximate_align(self, align); - let size = size.bytes(); - let unit_size = unit.size().bytes(); - assert_eq!(size % unit_size, 0); - self.type_array(self.type_from_integer(unit), size / unit_size) - } - fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - common::type_needs_drop(self.tcx(), ty) + ty.needs_drop(self.tcx(), ty::ParamEnv::reveal_all()) } fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { - common::type_is_sized(self.tcx(), ty) + ty.is_sized(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all()) } fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { - common::type_is_freeze(self.tcx(), ty) + ty.is_freeze(self.tcx(), ty::ParamEnv::reveal_all(), DUMMY_SP) } fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { - use syntax_pos::DUMMY_SP; if ty.is_sized(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all()) { return false; } @@ -158,7 +85,7 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { match tail.sty { ty::Foreign(..) => false, ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, - _ => bug!("unexpected unsized tail: {:?}", tail.sty), + _ => bug!("unexpected unsized tail: {:?}", tail), } } } @@ -168,7 +95,6 @@ impl DerivedTypeMethods<'tcx> for T where Self: BaseTypeMethods<'tcx> + MiscM pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> { fn backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type; fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type; - fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type; fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type; fn reg_backend_type(&self, ty: &Reg) -> Self::Type; fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type; diff --git a/src/librustc_codegen_ssa/traits/write.rs b/src/librustc_codegen_ssa/traits/write.rs index edc5c2717bc4f..23bb7179557b9 100644 --- a/src/librustc_codegen_ssa/traits/write.rs +++ b/src/librustc_codegen_ssa/traits/write.rs @@ -1,19 +1,8 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; -use back::write::{CodegenContext, ModuleConfig}; -use {CompiledModule, ModuleCodegen}; +use crate::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; +use crate::back::write::{CodegenContext, ModuleConfig, FatLTOInput}; +use crate::{CompiledModule, ModuleCodegen}; use rustc::dep_graph::WorkProduct; -use rustc::util::time_graph::Timeline; use rustc_errors::{FatalError, Handler}; pub trait WriteBackendMethods: 'static + Sized + Clone { @@ -28,8 +17,8 @@ pub trait WriteBackendMethods: 'static + Sized + Clone { /// for further optimization. fn run_fat_lto( cgcx: &CodegenContext, - modules: Vec>, - timeline: &mut Timeline, + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, ) -> Result, FatalError>; /// Performs thin LTO by performing necessary global analysis and returning two /// lists, one of the modules that need optimization and another for modules that @@ -38,7 +27,6 @@ pub trait WriteBackendMethods: 'static + Sized + Clone { cgcx: &CodegenContext, modules: Vec<(String, Self::ThinBuffer)>, cached_modules: Vec<(SerializedModule, WorkProduct)>, - timeline: &mut Timeline, ) -> Result<(Vec>, Vec), FatalError>; fn print_pass_timings(&self); unsafe fn optimize( @@ -46,24 +34,23 @@ pub trait WriteBackendMethods: 'static + Sized + Clone { diag_handler: &Handler, module: &ModuleCodegen, config: &ModuleConfig, - timeline: &mut Timeline, ) -> Result<(), FatalError>; unsafe fn optimize_thin( cgcx: &CodegenContext, thin: &mut ThinModule, - timeline: &mut Timeline, ) -> Result, FatalError>; unsafe fn codegen( cgcx: &CodegenContext, diag_handler: &Handler, module: ModuleCodegen, config: &ModuleConfig, - timeline: &mut Timeline, ) -> Result; fn prepare_thin( - cgcx: &CodegenContext, module: ModuleCodegen ) -> (String, Self::ThinBuffer); + fn serialize_module( + module: ModuleCodegen + ) -> (String, Self::ModuleBuffer); fn run_lto_pass_manager( cgcx: &CodegenContext, llmod: &ModuleCodegen, diff --git a/src/librustc_codegen_utils/Cargo.toml b/src/librustc_codegen_utils/Cargo.toml index 34a09f30b6411..c75208b9e06c1 100644 --- a/src/librustc_codegen_utils/Cargo.toml +++ b/src/librustc_codegen_utils/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustc_codegen_utils" version = "0.0.0" +edition = "2018" [lib] name = "rustc_codegen_utils" @@ -20,4 +21,3 @@ rustc_target = { path = "../librustc_target" } rustc_data_structures = { path = "../librustc_data_structures" } rustc_metadata = { path = "../librustc_metadata" } rustc_mir = { path = "../librustc_mir" } -rustc_incremental = { path = "../librustc_incremental" } diff --git a/src/librustc_codegen_utils/codegen_backend.rs b/src/librustc_codegen_utils/codegen_backend.rs index 74718460f562f..56eaffb1ca31d 100644 --- a/src/librustc_codegen_utils/codegen_backend.rs +++ b/src/librustc_codegen_utils/codegen_backend.rs @@ -1,48 +1,25 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The Rust compiler. //! //! # Note //! //! This API is completely unstable and subject to change. -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![deny(warnings)] #![feature(box_syntax)] use std::any::Any; -use std::io::Write; -use std::fs; -use std::path::Path; -use std::sync::{mpsc, Arc}; - -use rustc_data_structures::owning_ref::OwningRef; -use flate2::Compression; -use flate2::write::DeflateEncoder; +use std::sync::mpsc; use syntax::symbol::Symbol; -use rustc::hir::def_id::LOCAL_CRATE; -use rustc::session::{Session, CompileIncomplete}; -use rustc::session::config::{CrateType, OutputFilenames, PrintRequest}; +use rustc::session::Session; +use rustc::util::common::ErrorReported; +use rustc::session::config::{OutputFilenames, PrintRequest}; use rustc::ty::TyCtxt; use rustc::ty::query::Providers; -use rustc::middle::cstore::EncodedMetadata; use rustc::middle::cstore::MetadataLoader; use rustc::dep_graph::DepGraph; -use rustc_target::spec::Target; -use rustc_mir::monomorphize::collector; -use link::out_filename; pub use rustc_data_structures::sync::MetadataRef; @@ -55,8 +32,8 @@ pub trait CodegenBackend { fn diagnostics(&self) -> &[(&'static str, &'static str)] { &[] } fn metadata_loader(&self) -> Box; - fn provide(&self, _providers: &mut Providers); - fn provide_extern(&self, _providers: &mut Providers); + fn provide(&self, _providers: &mut Providers<'_>); + fn provide_extern(&self, _providers: &mut Providers<'_>); fn codegen_crate<'a, 'tcx>( &self, tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -74,146 +51,5 @@ pub trait CodegenBackend { sess: &Session, dep_graph: &DepGraph, outputs: &OutputFilenames, - ) -> Result<(), CompileIncomplete>; -} - -pub struct NoLlvmMetadataLoader; - -impl MetadataLoader for NoLlvmMetadataLoader { - fn get_rlib_metadata(&self, _: &Target, filename: &Path) -> Result { - let buf = fs::read(filename).map_err(|e| format!("metadata file open err: {:?}", e))?; - let buf: OwningRef, [u8]> = OwningRef::new(buf); - Ok(rustc_erase_owner!(buf.map_owner_box())) - } - - fn get_dylib_metadata(&self, target: &Target, filename: &Path) -> Result { - self.get_rlib_metadata(target, filename) - } -} - -pub struct MetadataOnlyCodegenBackend(()); -pub struct OngoingCodegen { - metadata: EncodedMetadata, - metadata_version: Vec, - crate_name: Symbol, -} - -impl MetadataOnlyCodegenBackend { - pub fn boxed() -> Box { - box MetadataOnlyCodegenBackend(()) - } -} - -impl CodegenBackend for MetadataOnlyCodegenBackend { - fn init(&self, sess: &Session) { - for cty in sess.opts.crate_types.iter() { - match *cty { - CrateType::Rlib | CrateType::Dylib | CrateType::Executable => {}, - _ => { - sess.diagnostic().warn( - &format!("LLVM unsupported, so output type {} is not supported", cty) - ); - }, - } - } - } - - fn metadata_loader(&self) -> Box { - box NoLlvmMetadataLoader - } - - fn provide(&self, providers: &mut Providers) { - ::symbol_names::provide(providers); - - providers.target_features_whitelist = |_tcx, _cnum| { - Default::default() // Just a dummy - }; - providers.is_reachable_non_generic = |_tcx, _defid| true; - providers.exported_symbols = |_tcx, _crate| Arc::new(Vec::new()); - } - fn provide_extern(&self, providers: &mut Providers) { - providers.is_reachable_non_generic = |_tcx, _defid| true; - } - - fn codegen_crate<'a, 'tcx>( - &self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - _rx: mpsc::Receiver> - ) -> Box { - use rustc_mir::monomorphize::item::MonoItem; - - ::check_for_rustc_errors_attr(tcx); - ::symbol_names_test::report_symbol_names(tcx); - ::rustc_incremental::assert_dep_graph(tcx); - ::rustc_incremental::assert_module_sources::assert_module_sources(tcx); - ::rustc_mir::monomorphize::assert_symbols_are_distinct(tcx, - collector::collect_crate_mono_items( - tcx, - collector::MonoItemCollectionMode::Eager - ).0.iter() - ); - // FIXME: Fix this - // ::rustc::middle::dependency_format::calculate(tcx); - let _ = tcx.link_args(LOCAL_CRATE); - let _ = tcx.native_libraries(LOCAL_CRATE); - for mono_item in - collector::collect_crate_mono_items( - tcx, - collector::MonoItemCollectionMode::Eager - ).0 { - if let MonoItem::Fn(inst) = mono_item { - let def_id = inst.def_id(); - if def_id.is_local() { - let _ = inst.def.is_inline(tcx); - let _ = tcx.codegen_fn_attrs(def_id); - } - } - } - tcx.sess.abort_if_errors(); - - let metadata = tcx.encode_metadata(); - - box OngoingCodegen { - metadata, - metadata_version: tcx.metadata_encoding_version().to_vec(), - crate_name: tcx.crate_name(LOCAL_CRATE), - } - } - - fn join_codegen_and_link( - &self, - ongoing_codegen: Box, - sess: &Session, - _dep_graph: &DepGraph, - outputs: &OutputFilenames, - ) -> Result<(), CompileIncomplete> { - let ongoing_codegen = ongoing_codegen.downcast::() - .expect("Expected MetadataOnlyCodegenBackend's OngoingCodegen, found Box"); - for &crate_type in sess.opts.crate_types.iter() { - if crate_type != CrateType::Rlib && - crate_type != CrateType::Dylib { - continue; - } - let output_name = - out_filename(sess, crate_type, &outputs, &ongoing_codegen.crate_name.as_str()); - let mut compressed = ongoing_codegen.metadata_version.clone(); - let metadata = if crate_type == CrateType::Dylib { - DeflateEncoder::new(&mut compressed, Compression::fast()) - .write_all(&ongoing_codegen.metadata.raw_data) - .unwrap(); - &compressed - } else { - &ongoing_codegen.metadata.raw_data - }; - fs::write(&output_name, metadata).unwrap(); - } - - sess.abort_if_errors(); - if !sess.opts.crate_types.contains(&CrateType::Rlib) - && !sess.opts.crate_types.contains(&CrateType::Dylib) - { - sess.fatal("Executables are not supported by the metadata-only backend."); - } - Ok(()) - } + ) -> Result<(), ErrorReported>; } diff --git a/src/librustc_codegen_utils/lib.rs b/src/librustc_codegen_utils/lib.rs index cbef3ff5b9286..330cfe154e302 100644 --- a/src/librustc_codegen_utils/lib.rs +++ b/src/librustc_codegen_utils/lib.rs @@ -1,46 +1,28 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! # Note //! //! This API is completely unstable and subject to change. -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] +#![feature(arbitrary_self_types)] #![feature(box_patterns)] #![feature(box_syntax)] #![feature(custom_attribute)] #![feature(nll)] #![allow(unused_attributes)] -#![feature(quote)] #![feature(rustc_diagnostic_macros)] +#![feature(in_band_lifetimes)] #![recursion_limit="256"] -extern crate flate2; -#[macro_use] -extern crate log; +#![deny(rust_2018_idioms)] +#![cfg_attr(not(stage0), deny(internal))] #[macro_use] extern crate rustc; -extern crate rustc_target; -extern crate rustc_metadata; -extern crate rustc_mir; -extern crate rustc_incremental; -extern crate syntax; -extern crate syntax_pos; -#[macro_use] extern crate rustc_data_structures; use rustc::ty::TyCtxt; +use rustc::hir::def_id::LOCAL_CRATE; pub mod link; pub mod codegen_backend; @@ -51,12 +33,10 @@ pub mod symbol_names_test; /// error in codegen. This is used to write compile-fail tests /// that actually test that compilation succeeds without /// reporting an error. -pub fn check_for_rustc_errors_attr(tcx: TyCtxt) { - if let Some((id, span, _)) = *tcx.sess.entry_fn.borrow() { - let main_def_id = tcx.hir().local_def_id(id); - - if tcx.has_attr(main_def_id, "rustc_error") { - tcx.sess.span_fatal(span, "compilation successful"); +pub fn check_for_rustc_errors_attr(tcx: TyCtxt<'_, '_, '_>) { + if let Some((def_id, _)) = tcx.entry_fn(LOCAL_CRATE) { + if tcx.has_attr(def_id, "rustc_error") { + tcx.sess.span_fatal(tcx.def_span(def_id), "compilation successful"); } } } diff --git a/src/librustc_codegen_utils/link.rs b/src/librustc_codegen_utils/link.rs index b11aa687326f2..f3a1b219f8a84 100644 --- a/src/librustc_codegen_utils/link.rs +++ b/src/librustc_codegen_utils/link.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::session::config::{self, OutputFilenames, Input, OutputType}; use rustc::session::Session; use std::path::{Path, PathBuf}; @@ -51,7 +41,7 @@ pub fn find_crate_name(sess: Option<&Session>, attrs: &[ast::Attribute], input: &Input) -> String { let validate = |s: String, span: Option| { - ::rustc_metadata::validate_crate_name(sess, &s, span); + rustc_metadata::validate_crate_name(sess, &s, span); s }; diff --git a/src/librustc_codegen_utils/symbol_names.rs b/src/librustc_codegen_utils/symbol_names.rs index d5b95e77b1a8a..d50a9a1607b24 100644 --- a/src/librustc_codegen_utils/symbol_names.rs +++ b/src/librustc_codegen_utils/symbol_names.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The Rust Linkage Model and Symbol Names //! ======================================= //! @@ -97,28 +87,29 @@ //! virtually impossible. Thus, symbol hash generation exclusively relies on //! DefPaths which are much more robust in the face of changes to the code base. -use rustc::hir::def_id::{DefId, LOCAL_CRATE}; +use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; use rustc::hir::Node; use rustc::hir::CodegenFnAttrFlags; -use rustc::hir::map::definitions::DefPathData; +use rustc::hir::map::{DefPathData, DisambiguatedDefPathData}; use rustc::ich::NodeIdHashingMode; -use rustc::ty::item_path::{self, ItemPathBuffer, RootMode}; +use rustc::ty::print::{PrettyPrinter, Printer, Print}; use rustc::ty::query::Providers; -use rustc::ty::subst::Substs; +use rustc::ty::subst::{Kind, SubstsRef, UnpackedKind}; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::util::common::record_time; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_mir::monomorphize::item::{InstantiationMode, MonoItem, MonoItemExt}; use rustc_mir::monomorphize::Instance; -use syntax_pos::symbol::Symbol; +use syntax_pos::symbol::{Symbol, InternedString}; + +use log::debug; -use std::fmt::Write; -use std::mem::discriminant; +use std::fmt::{self, Write}; +use std::mem::{self, discriminant}; -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { *providers = Providers { - def_symbol_name, symbol_name, ..*providers @@ -142,7 +133,7 @@ fn get_symbol_hash<'a, 'tcx>( // values for generic type parameters, // if any. - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, ) -> u64 { debug!( "get_symbol_hash(def_id={:?}, parameters={:?})", @@ -180,7 +171,7 @@ fn get_symbol_hash<'a, 'tcx>( assert!(!substs.needs_subst()); substs.hash_stable(&mut hcx, &mut hasher); - let is_generic = substs.types().next().is_some(); + let is_generic = substs.non_erasable_generics().next().is_some(); let avoid_cross_crate_conflicts = // If this is an instance of a generic function, we also hash in // the ID of the instantiating crate. This avoids symbol conflicts @@ -230,42 +221,36 @@ fn get_symbol_hash<'a, 'tcx>( hasher.finish() } -fn def_symbol_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> ty::SymbolName { - let mut buffer = SymbolPathBuffer::new(); - item_path::with_forced_absolute_paths(|| { - tcx.push_item_path(&mut buffer, def_id, false); - }); - buffer.into_interned() -} - -fn symbol_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance<'tcx>) -> ty::SymbolName { +fn symbol_name(tcx: TyCtxt<'_, 'tcx, 'tcx>, instance: Instance<'tcx>) -> ty::SymbolName { ty::SymbolName { - name: Symbol::intern(&compute_symbol_name(tcx, instance)).as_interned_str(), + name: compute_symbol_name(tcx, instance), } } -fn compute_symbol_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance<'tcx>) -> String { +fn compute_symbol_name(tcx: TyCtxt<'_, 'tcx, 'tcx>, instance: Instance<'tcx>) -> InternedString { let def_id = instance.def_id(); let substs = instance.substs; debug!("symbol_name(def_id={:?}, substs={:?})", def_id, substs); - let node_id = tcx.hir().as_local_node_id(def_id); + let hir_id = tcx.hir().as_local_hir_id(def_id); - if let Some(id) = node_id { - if *tcx.sess.plugin_registrar_fn.get() == Some(id) { + if def_id.is_local() { + if tcx.plugin_registrar_fn(LOCAL_CRATE) == Some(def_id) { let disambiguator = tcx.sess.local_crate_disambiguator(); - return tcx.sess.generate_plugin_registrar_symbol(disambiguator); + return Symbol::intern(&tcx.sess.generate_plugin_registrar_symbol(disambiguator)) + .as_interned_str(); } - if *tcx.sess.proc_macro_decls_static.get() == Some(id) { + if tcx.proc_macro_decls_static(LOCAL_CRATE) == Some(def_id) { let disambiguator = tcx.sess.local_crate_disambiguator(); - return tcx.sess.generate_proc_macro_decls_symbol(disambiguator); + return Symbol::intern(&tcx.sess.generate_proc_macro_decls_symbol(disambiguator)) + .as_interned_str(); } } // FIXME(eddyb) Precompute a custom symbol name based on attributes. - let is_foreign = if let Some(id) = node_id { - match tcx.hir().get(id) { + let is_foreign = if let Some(id) = hir_id { + match tcx.hir().get_by_hir_id(id) { Node::ForeignItem(_) => true, _ => false, } @@ -276,20 +261,20 @@ fn compute_symbol_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance let attrs = tcx.codegen_fn_attrs(def_id); if is_foreign { if let Some(name) = attrs.link_name { - return name.to_string(); + return name.as_interned_str(); } // Don't mangle foreign items. - return tcx.item_name(def_id).to_string(); + return tcx.item_name(def_id); } if let Some(name) = &attrs.export_name { // Use provided name - return name.to_string(); + return name.as_interned_str(); } if attrs.flags.contains(CodegenFnAttrFlags::NO_MANGLE) { // Don't mangle - return tcx.item_name(def_id).to_string(); + return tcx.item_name(def_id); } // We want to compute the "type" of this item. Unfortunately, some @@ -327,13 +312,17 @@ fn compute_symbol_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance let hash = get_symbol_hash(tcx, def_id, instance, instance_ty, substs); - let mut buf = SymbolPathBuffer::from_interned(tcx.def_symbol_name(def_id)); + let mut printer = SymbolPrinter { + tcx, + path: SymbolPath::new(), + keep_within_component: false, + }.print_def_path(def_id, &[]).unwrap(); if instance.is_vtable_shim() { - buf.push("{{vtable-shim}}"); + let _ = printer.write_str("{{vtable-shim}}"); } - buf.finish(hash) + Symbol::intern(&printer.path.finish(hash)).as_interned_str() } // Follow C++ namespace-mangling style, see @@ -350,14 +339,14 @@ fn compute_symbol_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance // To be able to work on all platforms and get *some* reasonable output, we // use C++ name-mangling. #[derive(Debug)] -struct SymbolPathBuffer { +struct SymbolPath { result: String, temp_buf: String, } -impl SymbolPathBuffer { +impl SymbolPath { fn new() -> Self { - let mut result = SymbolPathBuffer { + let mut result = SymbolPath { result: String::with_capacity(64), temp_buf: String::with_capacity(16), }; @@ -365,88 +354,281 @@ impl SymbolPathBuffer { result } - fn from_interned(symbol: ty::SymbolName) -> Self { - let mut result = SymbolPathBuffer { - result: String::with_capacity(64), - temp_buf: String::with_capacity(16), - }; - result.result.push_str(&symbol.as_str()); - result - } - - fn into_interned(self) -> ty::SymbolName { - ty::SymbolName { - name: Symbol::intern(&self.result).as_interned_str(), + fn finalize_pending_component(&mut self) { + if !self.temp_buf.is_empty() { + let _ = write!(self.result, "{}{}", self.temp_buf.len(), self.temp_buf); + self.temp_buf.clear(); } } fn finish(mut self, hash: u64) -> String { + self.finalize_pending_component(); // E = end name-sequence let _ = write!(self.result, "17h{:016x}E", hash); self.result } } -impl ItemPathBuffer for SymbolPathBuffer { - fn root_mode(&self) -> &RootMode { - const ABSOLUTE: &RootMode = &RootMode::Absolute; - ABSOLUTE +struct SymbolPrinter<'a, 'tcx> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + path: SymbolPath, + + // When `true`, `finalize_pending_component` isn't used. + // This is needed when recursing into `path_qualified`, + // or `path_generic_args`, as any nested paths are + // logically within one component. + keep_within_component: bool, +} + +// HACK(eddyb) this relies on using the `fmt` interface to get +// `PrettyPrinter` aka pretty printing of e.g. types in paths, +// symbol names should have their own printing machinery. + +impl Printer<'tcx, 'tcx> for SymbolPrinter<'_, 'tcx> { + type Error = fmt::Error; + + type Path = Self; + type Region = Self; + type Type = Self; + type DynExistential = Self; + + fn tcx(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.tcx + } + + fn print_region( + self, + _region: ty::Region<'_>, + ) -> Result { + Ok(self) + } + + fn print_type( + self, + ty: Ty<'tcx>, + ) -> Result { + match ty.sty { + // Print all nominal types as paths (unlike `pretty_print_type`). + ty::FnDef(def_id, substs) | + ty::Opaque(def_id, substs) | + ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs }) | + ty::UnnormalizedProjection(ty::ProjectionTy { item_def_id: def_id, substs }) | + ty::Closure(def_id, ty::ClosureSubsts { substs }) | + ty::Generator(def_id, ty::GeneratorSubsts { substs }, _) => { + self.print_def_path(def_id, substs) + } + _ => self.pretty_print_type(ty), + } + } + + fn print_dyn_existential( + mut self, + predicates: &'tcx ty::List>, + ) -> Result { + let mut first = false; + for p in predicates { + if !first { + write!(self, "+")?; + } + first = false; + self = p.print(self)?; + } + Ok(self) + } + + fn path_crate( + mut self, + cnum: CrateNum, + ) -> Result { + self.write_str(&self.tcx.original_crate_name(cnum).as_str())?; + Ok(self) + } + fn path_qualified( + self, + self_ty: Ty<'tcx>, + trait_ref: Option>, + ) -> Result { + // Similar to `pretty_path_qualified`, but for the other + // types that are printed as paths (see `print_type` above). + match self_ty.sty { + ty::FnDef(..) | + ty::Opaque(..) | + ty::Projection(_) | + ty::UnnormalizedProjection(_) | + ty::Closure(..) | + ty::Generator(..) + if trait_ref.is_none() => + { + self.print_type(self_ty) + } + + _ => self.pretty_path_qualified(self_ty, trait_ref) + } } - fn push(&mut self, text: &str) { - self.temp_buf.clear(); - let need_underscore = sanitize(&mut self.temp_buf, text); - let _ = write!( - self.result, - "{}", - self.temp_buf.len() + (need_underscore as usize) - ); - if need_underscore { - self.result.push('_'); + fn path_append_impl( + self, + print_prefix: impl FnOnce(Self) -> Result, + _disambiguated_data: &DisambiguatedDefPathData, + self_ty: Ty<'tcx>, + trait_ref: Option>, + ) -> Result { + self.pretty_path_append_impl( + |mut cx| { + cx = print_prefix(cx)?; + + if cx.keep_within_component { + // HACK(eddyb) print the path similarly to how `FmtPrinter` prints it. + cx.write_str("::")?; + } else { + cx.path.finalize_pending_component(); + } + + Ok(cx) + }, + self_ty, + trait_ref, + ) + } + fn path_append( + mut self, + print_prefix: impl FnOnce(Self) -> Result, + disambiguated_data: &DisambiguatedDefPathData, + ) -> Result { + self = print_prefix(self)?; + + // Skip `::{{constructor}}` on tuple/unit structs. + match disambiguated_data.data { + DefPathData::Ctor => return Ok(self), + _ => {} + } + + if self.keep_within_component { + // HACK(eddyb) print the path similarly to how `FmtPrinter` prints it. + self.write_str("::")?; + } else { + self.path.finalize_pending_component(); + } + + self.write_str(&disambiguated_data.data.as_interned_str().as_str())?; + Ok(self) + } + fn path_generic_args( + mut self, + print_prefix: impl FnOnce(Self) -> Result, + args: &[Kind<'tcx>], + ) -> Result { + self = print_prefix(self)?; + + let args = args.iter().cloned().filter(|arg| { + match arg.unpack() { + UnpackedKind::Lifetime(_) => false, + _ => true, + } + }); + + if args.clone().next().is_some() { + self.generic_delimiters(|cx| cx.comma_sep(args)) + } else { + Ok(self) } - self.result.push_str(&self.temp_buf); } } -// Name sanitation. LLVM will happily accept identifiers with weird names, but -// gas doesn't! -// gas accepts the following characters in symbols: a-z, A-Z, 0-9, ., _, $ -// -// returns true if an underscore must be added at the start -pub fn sanitize(result: &mut String, s: &str) -> bool { - for c in s.chars() { - match c { - // Escape these with $ sequences - '@' => result.push_str("$SP$"), - '*' => result.push_str("$BP$"), - '&' => result.push_str("$RF$"), - '<' => result.push_str("$LT$"), - '>' => result.push_str("$GT$"), - '(' => result.push_str("$LP$"), - ')' => result.push_str("$RP$"), - ',' => result.push_str("$C$"), - - // '.' doesn't occur in types and functions, so reuse it - // for ':' and '-' - '-' | ':' => result.push('.'), - - // These are legal symbols - 'a'..='z' | 'A'..='Z' | '0'..='9' | '_' | '.' | '$' => result.push(c), +impl PrettyPrinter<'tcx, 'tcx> for SymbolPrinter<'_, 'tcx> { + fn region_should_not_be_omitted( + &self, + _region: ty::Region<'_>, + ) -> bool { + false + } + fn comma_sep( + mut self, + mut elems: impl Iterator, + ) -> Result + where T: Print<'tcx, 'tcx, Self, Output = Self, Error = Self::Error> + { + if let Some(first) = elems.next() { + self = first.print(self)?; + for elem in elems { + self.write_str(",")?; + self = elem.print(self)?; + } + } + Ok(self) + } - _ => { - result.push('$'); - for c in c.escape_unicode().skip(1) { - match c { - '{' => {} - '}' => result.push('$'), - c => result.push(c), + fn generic_delimiters( + mut self, + f: impl FnOnce(Self) -> Result, + ) -> Result { + write!(self, "<")?; + + let kept_within_component = + mem::replace(&mut self.keep_within_component, true); + self = f(self)?; + self.keep_within_component = kept_within_component; + + write!(self, ">")?; + + Ok(self) + } +} + +impl fmt::Write for SymbolPrinter<'_, '_> { + fn write_str(&mut self, s: &str) -> fmt::Result { + // Name sanitation. LLVM will happily accept identifiers with weird names, but + // gas doesn't! + // gas accepts the following characters in symbols: a-z, A-Z, 0-9, ., _, $ + // NVPTX assembly has more strict naming rules than gas, so additionally, dots + // are replaced with '$' there. + + for c in s.chars() { + if self.path.temp_buf.is_empty() { + match c { + 'a'..='z' | 'A'..='Z' | '_' => {} + _ => { + // Underscore-qualify anything that didn't start as an ident. + self.path.temp_buf.push('_'); + } + } + } + match c { + // Escape these with $ sequences + '@' => self.path.temp_buf.push_str("$SP$"), + '*' => self.path.temp_buf.push_str("$BP$"), + '&' => self.path.temp_buf.push_str("$RF$"), + '<' => self.path.temp_buf.push_str("$LT$"), + '>' => self.path.temp_buf.push_str("$GT$"), + '(' => self.path.temp_buf.push_str("$LP$"), + ')' => self.path.temp_buf.push_str("$RP$"), + ',' => self.path.temp_buf.push_str("$C$"), + + '-' | ':' | '.' if self.tcx.has_strict_asm_symbol_naming() => { + // NVPTX doesn't support these characters in symbol names. + self.path.temp_buf.push('$') + } + + // '.' doesn't occur in types and functions, so reuse it + // for ':' and '-' + '-' | ':' => self.path.temp_buf.push('.'), + + // These are legal symbols + 'a'..='z' | 'A'..='Z' | '0'..='9' | '_' | '.' | '$' => self.path.temp_buf.push(c), + + _ => { + self.path.temp_buf.push('$'); + for c in c.escape_unicode().skip(1) { + match c { + '{' => {} + '}' => self.path.temp_buf.push('$'), + c => self.path.temp_buf.push(c), + } } } } } - } - // Underscore-qualify anything that didn't start as an ident. - !result.is_empty() && result.as_bytes()[0] != '_' as u8 - && !(result.as_bytes()[0] as char).is_xid_start() + Ok(()) + } } diff --git a/src/librustc_codegen_utils/symbol_names_test.rs b/src/librustc_codegen_utils/symbol_names_test.rs index c4ad31ab02198..6a2b6f1321b88 100644 --- a/src/librustc_codegen_utils/symbol_names_test.rs +++ b/src/librustc_codegen_utils/symbol_names_test.rs @@ -1,27 +1,16 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Walks the crate looking for items/impl-items/trait-items that have -//! either a `rustc_symbol_name` or `rustc_item_path` attribute and +//! either a `rustc_symbol_name` or `rustc_def_path` attribute and //! generates an error giving, respectively, the symbol name or -//! item-path. This is used for unit testing the code that generates +//! def-path. This is used for unit testing the code that generates //! paths etc in all kinds of annoying scenarios. use rustc::hir; use rustc::ty::TyCtxt; -use syntax::ast; use rustc_mir::monomorphize::Instance; const SYMBOL_NAME: &'static str = "rustc_symbol_name"; -const ITEM_PATH: &'static str = "rustc_item_path"; +const DEF_PATH: &'static str = "rustc_def_path"; pub fn report_symbol_names<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { // if the `rustc_attrs` feature is not enabled, then the @@ -43,18 +32,18 @@ struct SymbolNamesTest<'a, 'tcx:'a> { impl<'a, 'tcx> SymbolNamesTest<'a, 'tcx> { fn process_attrs(&mut self, - node_id: ast::NodeId) { + hir_id: hir::HirId) { let tcx = self.tcx; - let def_id = tcx.hir().local_def_id(node_id); + let def_id = tcx.hir().local_def_id_from_hir_id(hir_id); for attr in tcx.get_attrs(def_id).iter() { if attr.check_name(SYMBOL_NAME) { // for now, can only use on monomorphic names let instance = Instance::mono(tcx, def_id); let name = self.tcx.symbol_name(instance); tcx.sess.span_err(attr.span, &format!("symbol-name({})", name)); - } else if attr.check_name(ITEM_PATH) { - let path = tcx.item_path_str(def_id); - tcx.sess.span_err(attr.span, &format!("item-path({})", path)); + } else if attr.check_name(DEF_PATH) { + let path = tcx.def_path_str(def_id); + tcx.sess.span_err(attr.span, &format!("def-path({})", path)); } // (*) The formatting of `tag({})` is chosen so that tests can elect @@ -66,14 +55,14 @@ impl<'a, 'tcx> SymbolNamesTest<'a, 'tcx> { impl<'a, 'tcx> hir::itemlikevisit::ItemLikeVisitor<'tcx> for SymbolNamesTest<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx hir::Item) { - self.process_attrs(item.id); + self.process_attrs(item.hir_id); } fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) { - self.process_attrs(trait_item.id); + self.process_attrs(trait_item.hir_id); } fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) { - self.process_attrs(impl_item.id); + self.process_attrs(impl_item.hir_id); } } diff --git a/src/librustc_cratesio_shim/Cargo.toml b/src/librustc_cratesio_shim/Cargo.toml index b8e494e4040ec..6bdfbe09354b4 100644 --- a/src/librustc_cratesio_shim/Cargo.toml +++ b/src/librustc_cratesio_shim/Cargo.toml @@ -15,6 +15,7 @@ authors = ["The Rust Project Developers"] name = "rustc_cratesio_shim" version = "0.0.0" +edition = "2018" [lib] crate-type = ["dylib"] diff --git a/src/librustc_cratesio_shim/src/lib.rs b/src/librustc_cratesio_shim/src/lib.rs index 56e480208e1c3..4c170f4f5f6f9 100644 --- a/src/librustc_cratesio_shim/src/lib.rs +++ b/src/librustc_cratesio_shim/src/lib.rs @@ -1,12 +1,4 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +#![deny(rust_2018_idioms)] // See Cargo.toml for a comment explaining this crate. #![allow(unused_extern_crates)] diff --git a/src/librustc_data_structures/Cargo.toml b/src/librustc_data_structures/Cargo.toml index 1754376a5d7f9..63e44d82a28c3 100644 --- a/src/librustc_data_structures/Cargo.toml +++ b/src/librustc_data_structures/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustc_data_structures" version = "0.0.0" +edition = "2018" [lib] name = "rustc_data_structures" @@ -9,18 +10,20 @@ path = "lib.rs" crate-type = ["dylib"] [dependencies] -ena = "0.11" +ena = "0.13" log = "0.4" +jobserver_crate = { version = "0.1", package = "jobserver" } +lazy_static = "1" rustc_cratesio_shim = { path = "../librustc_cratesio_shim" } serialize = { path = "../libserialize" } graphviz = { path = "../libgraphviz" } cfg-if = "0.1.2" stable_deref_trait = "1.0.0" -rustc-rayon = "0.1.1" -rustc-rayon-core = "0.1.1" +rayon = { version = "0.1.2", package = "rustc-rayon" } +rayon-core = { version = "0.1.2", package = "rustc-rayon-core" } rustc-hash = "1.0.1" smallvec = { version = "0.6.7", features = ["union", "may_dangle"] } [dependencies.parking_lot] -version = "0.6" +version = "0.7" features = ["nightly"] diff --git a/src/librustc_data_structures/base_n.rs b/src/librustc_data_structures/base_n.rs index d3b47daa5b4b8..f1bd3f03aef8d 100644 --- a/src/librustc_data_structures/base_n.rs +++ b/src/librustc_data_structures/base_n.rs @@ -1,14 +1,4 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/// Convert unsigned integers into a string representation with some base. +/// Converts unsigned integers into a string representation with some base. /// Bases up to and including 36 can be used for case-insensitive things. use std::str; diff --git a/src/librustc_data_structures/bit_set.rs b/src/librustc_data_structures/bit_set.rs index 28aad49b09b99..ff7964646d608 100644 --- a/src/librustc_data_structures/bit_set.rs +++ b/src/librustc_data_structures/bit_set.rs @@ -1,14 +1,4 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use indexed_vec::{Idx, IndexVec}; +use crate::indexed_vec::{Idx, IndexVec}; use smallvec::SmallVec; use std::fmt; use std::iter; @@ -37,7 +27,7 @@ pub struct BitSet { } impl BitSet { - /// Create a new, empty bitset with a given `domain_size`. + /// Creates a new, empty bitset with a given `domain_size`. #[inline] pub fn new_empty(domain_size: usize) -> BitSet { let num_words = num_words(domain_size); @@ -48,7 +38,7 @@ impl BitSet { } } - /// Create a new, filled bitset with a given `domain_size`. + /// Creates a new, filled bitset with a given `domain_size`. #[inline] pub fn new_filled(domain_size: usize) -> BitSet { let num_words = num_words(domain_size); @@ -61,7 +51,7 @@ impl BitSet { result } - /// Get the domain size. + /// Gets the domain size. pub fn domain_size(&self) -> usize { self.domain_size } @@ -95,7 +85,7 @@ impl BitSet { self.words.iter().map(|e| e.count_ones() as usize).sum() } - /// True if `self` contains `elem`. + /// Returns `true` if `self` contains `elem`. #[inline] pub fn contains(&self, elem: T) -> bool { assert!(elem.index() < self.domain_size); @@ -116,7 +106,7 @@ impl BitSet { self.words.iter().all(|a| *a == 0) } - /// Insert `elem`. Returns true if the set has changed. + /// Insert `elem`. Returns whether the set has changed. #[inline] pub fn insert(&mut self, elem: T) -> bool { assert!(elem.index() < self.domain_size); @@ -136,7 +126,7 @@ impl BitSet { self.clear_excess_bits(); } - /// Returns true if the set has changed. + /// Returns `true` if the set has changed. #[inline] pub fn remove(&mut self, elem: T) -> bool { assert!(elem.index() < self.domain_size); @@ -148,26 +138,26 @@ impl BitSet { new_word != word } - /// Set `self = self | other` and return true if `self` changed + /// Sets `self = self | other` and returns `true` if `self` changed /// (i.e., if new bits were added). pub fn union(&mut self, other: &impl UnionIntoBitSet) -> bool { other.union_into(self) } - /// Set `self = self - other` and return true if `self` changed. + /// Sets `self = self - other` and returns `true` if `self` changed. /// (i.e., if any bits were removed). pub fn subtract(&mut self, other: &impl SubtractFromBitSet) -> bool { other.subtract_from(self) } - /// Set `self = self & other` and return true if `self` changed. + /// Sets `self = self & other` and return `true` if `self` changed. /// (i.e., if any bits were removed). pub fn intersect(&mut self, other: &BitSet) -> bool { assert_eq!(self.domain_size, other.domain_size); bitwise(&mut self.words, &other.words, |a, b| { a & b }) } - /// Get a slice of the underlying words. + /// Gets a slice of the underlying words. pub fn words(&self) -> &[Word] { &self.words } @@ -218,7 +208,7 @@ impl SubtractFromBitSet for BitSet { } impl fmt::Debug for BitSet { - fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result { w.debug_list() .entries(self.iter()) .finish() @@ -376,7 +366,7 @@ impl SparseBitSet { dense } - fn iter(&self) -> slice::Iter { + fn iter(&self) -> slice::Iter<'_, T> { self.elems.iter() } } @@ -546,7 +536,7 @@ impl HybridBitSet { } } - pub fn iter(&self) -> HybridIter { + pub fn iter(&self) -> HybridIter<'_, T> { match self { HybridBitSet::Sparse(sparse) => HybridIter::Sparse(sparse.iter()), HybridBitSet::Dense(dense) => HybridIter::Dense(dense.iter()), @@ -621,7 +611,7 @@ impl GrowableBitSet { GrowableBitSet { bit_set: BitSet::new_empty(bits) } } - /// Returns true if the set has changed. + /// Returns `true` if the set has changed. #[inline] pub fn insert(&mut self, elem: T) -> bool { self.ensure(elem.index() + 1); @@ -655,7 +645,7 @@ pub struct BitMatrix { } impl BitMatrix { - /// Create a new `rows x columns` matrix, initially empty. + /// Creates a new `rows x columns` matrix, initially empty. pub fn new(num_rows: usize, num_columns: usize) -> BitMatrix { // For every element, we need one bit for every other // element. Round up to an even number of words. @@ -678,7 +668,7 @@ impl BitMatrix { /// Sets the cell at `(row, column)` to true. Put another way, insert /// `column` to the bitset for `row`. /// - /// Returns true if this changed the matrix, and false otherwise. + /// Returns `true` if this changed the matrix. pub fn insert(&mut self, row: R, column: C) -> bool { assert!(row.index() < self.num_rows && column.index() < self.num_columns); let (start, _) = self.range(row); @@ -701,7 +691,7 @@ impl BitMatrix { (self.words[start + word_index] & mask) != 0 } - /// Returns those indices that are true in rows `a` and `b`. This + /// Returns those indices that are true in rows `a` and `b`. This /// is an O(n) operation where `n` is the number of elements /// (somewhat independent from the actual size of the /// intersection, in particular). @@ -725,8 +715,8 @@ impl BitMatrix { result } - /// Add the bits from row `read` to the bits from row `write`, - /// return true if anything changed. + /// Adds the bits from row `read` to the bits from row `write`, and + /// returns `true` if anything changed. /// /// This is used when computing transitive reachability because if /// you have an edge `write -> read`, because in that case @@ -782,7 +772,7 @@ where } impl SparseBitMatrix { - /// Create a new empty sparse bit matrix with no rows or columns. + /// Creates a new empty sparse bit matrix with no rows or columns. pub fn new(num_columns: usize) -> Self { Self { num_columns, @@ -803,7 +793,7 @@ impl SparseBitMatrix { /// Sets the cell at `(row, column)` to true. Put another way, insert /// `column` to the bitset for `row`. /// - /// Returns true if this changed the matrix, and false otherwise. + /// Returns `true` if this changed the matrix. pub fn insert(&mut self, row: R, column: C) -> bool { self.ensure_row(row).insert(column) } @@ -816,8 +806,8 @@ impl SparseBitMatrix { self.row(row).map_or(false, |r| r.contains(column)) } - /// Add the bits from row `read` to the bits from row `write`, - /// return true if anything changed. + /// Adds the bits from row `read` to the bits from row `write`, and + /// returns `true` if anything changed. /// /// This is used when computing transitive reachability because if /// you have an edge `write -> read`, because in that case diff --git a/src/librustc_data_structures/box_region.rs b/src/librustc_data_structures/box_region.rs new file mode 100644 index 0000000000000..278dcdf2bee42 --- /dev/null +++ b/src/librustc_data_structures/box_region.rs @@ -0,0 +1,172 @@ +use std::cell::Cell; +use std::marker::PhantomData; +use std::pin::Pin; +use std::ops::{Generator, GeneratorState}; + +#[derive(Copy, Clone)] +pub struct AccessAction(*mut dyn FnMut()); + +impl AccessAction { + pub fn get(self) -> *mut dyn FnMut() { + self.0 + } +} + +#[derive(Copy, Clone)] +pub enum Action { + Access(AccessAction), + Complete, +} + +thread_local!(pub static BOX_REGION_ARG: Cell = Cell::new(Action::Complete)); + +pub struct PinnedGenerator { + generator: Pin, Return = R>>> +} + +impl PinnedGenerator { + pub fn new< + T: Generator, Return = R> + 'static + >(generator: T) -> (I, Self) { + let mut result = PinnedGenerator { + generator: Box::pin(generator) + }; + + // Run it to the first yield to set it up + let init = match Pin::new(&mut result.generator).resume() { + GeneratorState::Yielded( + YieldType::Initial(y) + ) => y, + _ => panic!() + }; + + (init, result) + } + + pub unsafe fn access(&mut self, closure: *mut dyn FnMut()) { + BOX_REGION_ARG.with(|i| { + i.set(Action::Access(AccessAction(closure))); + }); + + // Call the generator, which in turn will call the closure in BOX_REGION_ARG + if let GeneratorState::Complete(_) = Pin::new(&mut self.generator).resume() { + panic!() + } + } + + pub fn complete(&mut self) -> R { + // Tell the generator we want it to complete, consuming it and yielding a result + BOX_REGION_ARG.with(|i| { + i.set(Action::Complete) + }); + + let result = Pin::new(&mut self.generator).resume(); + if let GeneratorState::Complete(r) = result { + r + } else { + panic!() + } + } +} + +#[derive(PartialEq)] +pub struct Marker(PhantomData); + +impl Marker { + pub unsafe fn new() -> Self { + Marker(PhantomData) + } +} + +pub enum YieldType { + Initial(I), + Accessor(Marker), +} + +#[macro_export] +#[allow_internal_unstable(fn_traits)] +macro_rules! declare_box_region_type { + (impl $v:vis + $name: ident, + $yield_type:ty, + for($($lifetimes:tt)*), + ($($args:ty),*) -> ($reti:ty, $retc:ty) + ) => { + $v struct $name($crate::box_region::PinnedGenerator< + $reti, + for<$($lifetimes)*> fn(($($args,)*)), + $retc + >); + + impl $name { + fn new + 'static>( + generator: T + ) -> ($reti, Self) { + let (initial, pinned) = $crate::box_region::PinnedGenerator::new(generator); + (initial, $name(pinned)) + } + + $v fn access FnOnce($($args,)*) -> R, R>(&mut self, f: F) -> R { + // Turn the FnOnce closure into *mut dyn FnMut() + // so we can pass it in to the generator using the BOX_REGION_ARG thread local + let mut r = None; + let mut f = Some(f); + let mut_f: &mut dyn for<$($lifetimes)*> FnMut(($($args,)*)) = + &mut |args| { + let f = f.take().unwrap(); + r = Some(FnOnce::call_once(f, args)); + }; + let mut_f = mut_f as *mut dyn for<$($lifetimes)*> FnMut(($($args,)*)); + + // Get the generator to call our closure + unsafe { + self.0.access(::std::mem::transmute(mut_f)); + } + + // Unwrap the result + r.unwrap() + } + + $v fn complete(mut self) -> $retc { + self.0.complete() + } + + fn initial_yield(value: $reti) -> $yield_type { + $crate::box_region::YieldType::Initial(value) + } + } + }; + + ($v:vis $name: ident, for($($lifetimes:tt)*), ($($args:ty),*) -> ($reti:ty, $retc:ty)) => { + declare_box_region_type!( + impl $v $name, + $crate::box_region::YieldType<$reti, for<$($lifetimes)*> fn(($($args,)*))>, + for($($lifetimes)*), + ($($args),*) -> ($reti, $retc) + ); + }; +} + +#[macro_export] +#[allow_internal_unstable(fn_traits)] +macro_rules! box_region_allow_access { + (for($($lifetimes:tt)*), ($($args:ty),*), ($($exprs:expr),*) ) => { + loop { + match $crate::box_region::BOX_REGION_ARG.with(|i| i.get()) { + $crate::box_region::Action::Access(accessor) => { + let accessor: &mut dyn for<$($lifetimes)*> FnMut($($args),*) = unsafe { + ::std::mem::transmute(accessor.get()) + }; + (*accessor)(($($exprs),*)); + unsafe { + let marker = $crate::box_region::Marker::< + for<$($lifetimes)*> fn(($($args,)*)) + >::new(); + yield $crate::box_region::YieldType::Accessor(marker) + }; + } + $crate::box_region::Action::Complete => break, + } + } + } +} diff --git a/src/librustc_data_structures/const_cstr.rs b/src/librustc_data_structures/const_cstr.rs index 4589d973b6a41..fbe2f29f706f6 100644 --- a/src/librustc_data_structures/const_cstr.rs +++ b/src/librustc_data_structures/const_cstr.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// This macro creates a zero-overhead &CStr by adding a NUL terminator to /// the string literal passed into it at compile-time. Use it like: /// diff --git a/src/librustc_data_structures/fingerprint.rs b/src/librustc_data_structures/fingerprint.rs index aa9ddda2b9364..c4c0db5801209 100644 --- a/src/librustc_data_structures/fingerprint.rs +++ b/src/librustc_data_structures/fingerprint.rs @@ -1,15 +1,5 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - +use crate::stable_hasher; use std::mem; -use stable_hasher; use serialize; use serialize::opaque::{EncodeResult, Encoder, Decoder}; @@ -80,12 +70,13 @@ impl Fingerprint { } impl ::std::fmt::Display for Fingerprint { - fn fmt(&self, formatter: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + fn fmt(&self, formatter: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { write!(formatter, "{:x}-{:x}", self.0, self.1) } } impl stable_hasher::StableHasherResult for Fingerprint { + #[inline] fn finish(hasher: stable_hasher::StableHasher) -> Self { let (_0, _1) = hasher.finalize(); Fingerprint(_0, _1) diff --git a/src/librustc_data_structures/flock.rs b/src/librustc_data_structures/flock.rs index 86e48e21626ab..255c5fd7fe7ec 100644 --- a/src/librustc_data_structures/flock.rs +++ b/src/librustc_data_structures/flock.rs @@ -1,13 +1,3 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Simple file-locking apis for each OS. //! //! This is not meant to be in the standard library, it does nothing with @@ -24,12 +14,9 @@ cfg_if! { if #[cfg(unix)] { use std::ffi::{CString, OsStr}; use std::os::unix::prelude::*; - use libc; #[cfg(any(target_os = "linux", target_os = "android"))] mod os { - use libc; - #[repr(C)] pub struct flock { pub l_type: libc::c_short, @@ -41,18 +28,10 @@ cfg_if! { // not actually here, but brings in line with freebsd pub l_sysid: libc::c_int, } - - pub const F_RDLCK: libc::c_short = 0; - pub const F_WRLCK: libc::c_short = 1; - pub const F_UNLCK: libc::c_short = 2; - pub const F_SETLK: libc::c_int = 6; - pub const F_SETLKW: libc::c_int = 7; } #[cfg(target_os = "freebsd")] mod os { - use libc; - #[repr(C)] pub struct flock { pub l_start: libc::off_t, @@ -62,12 +41,6 @@ cfg_if! { pub l_whence: libc::c_short, pub l_sysid: libc::c_int, } - - pub const F_RDLCK: libc::c_short = 1; - pub const F_UNLCK: libc::c_short = 2; - pub const F_WRLCK: libc::c_short = 3; - pub const F_SETLK: libc::c_int = 12; - pub const F_SETLKW: libc::c_int = 13; } #[cfg(any(target_os = "dragonfly", @@ -75,8 +48,6 @@ cfg_if! { target_os = "netbsd", target_os = "openbsd"))] mod os { - use libc; - #[repr(C)] pub struct flock { pub l_start: libc::off_t, @@ -88,18 +59,10 @@ cfg_if! { // not actually here, but brings in line with freebsd pub l_sysid: libc::c_int, } - - pub const F_RDLCK: libc::c_short = 1; - pub const F_UNLCK: libc::c_short = 2; - pub const F_WRLCK: libc::c_short = 3; - pub const F_SETLK: libc::c_int = 8; - pub const F_SETLKW: libc::c_int = 9; } #[cfg(target_os = "haiku")] mod os { - use libc; - #[repr(C)] pub struct flock { pub l_type: libc::c_short, @@ -111,18 +74,10 @@ cfg_if! { // not actually here, but brings in line with freebsd pub l_sysid: libc::c_int, } - - pub const F_RDLCK: libc::c_short = 0x0040; - pub const F_UNLCK: libc::c_short = 0x0200; - pub const F_WRLCK: libc::c_short = 0x0400; - pub const F_SETLK: libc::c_int = 0x0080; - pub const F_SETLKW: libc::c_int = 0x0100; } #[cfg(any(target_os = "macos", target_os = "ios"))] mod os { - use libc; - #[repr(C)] pub struct flock { pub l_start: libc::off_t, @@ -134,18 +89,10 @@ cfg_if! { // not actually here, but brings in line with freebsd pub l_sysid: libc::c_int, } - - pub const F_RDLCK: libc::c_short = 1; - pub const F_UNLCK: libc::c_short = 2; - pub const F_WRLCK: libc::c_short = 3; - pub const F_SETLK: libc::c_int = 8; - pub const F_SETLKW: libc::c_int = 9; } #[cfg(target_os = "solaris")] mod os { - use libc; - #[repr(C)] pub struct flock { pub l_type: libc::c_short, @@ -155,12 +102,6 @@ cfg_if! { pub l_sysid: libc::c_int, pub l_pid: libc::pid_t, } - - pub const F_RDLCK: libc::c_short = 1; - pub const F_WRLCK: libc::c_short = 2; - pub const F_UNLCK: libc::c_short = 3; - pub const F_SETLK: libc::c_int = 6; - pub const F_SETLKW: libc::c_int = 7; } #[derive(Debug)] @@ -192,9 +133,9 @@ cfg_if! { } let lock_type = if exclusive { - os::F_WRLCK + libc::F_WRLCK as libc::c_short } else { - os::F_RDLCK + libc::F_RDLCK as libc::c_short }; let flock = os::flock { @@ -205,7 +146,7 @@ cfg_if! { l_type: lock_type, l_sysid: 0, }; - let cmd = if wait { os::F_SETLKW } else { os::F_SETLK }; + let cmd = if wait { libc::F_SETLKW } else { libc::F_SETLK }; let ret = unsafe { libc::fcntl(fd, cmd, &flock) }; @@ -226,11 +167,11 @@ cfg_if! { l_len: 0, l_pid: 0, l_whence: libc::SEEK_SET as libc::c_short, - l_type: os::F_UNLCK, + l_type: libc::F_UNLCK as libc::c_short, l_sysid: 0, }; unsafe { - libc::fcntl(self.fd, os::F_SETLK, &flock); + libc::fcntl(self.fd, libc::F_SETLK, &flock); libc::close(self.fd); } } diff --git a/src/librustc_data_structures/fx.rs b/src/librustc_data_structures/fx.rs index 7c7fc3a934620..a2afeffe73050 100644 --- a/src/librustc_data_structures/fx.rs +++ b/src/librustc_data_structures/fx.rs @@ -1,11 +1 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - pub use rustc_hash::{FxHasher, FxHashMap, FxHashSet}; diff --git a/src/librustc_data_structures/graph/dominators/mod.rs b/src/librustc_data_structures/graph/dominators/mod.rs index 9b7f4cec47b8e..93a2a261c6fde 100644 --- a/src/librustc_data_structures/graph/dominators/mod.rs +++ b/src/librustc_data_structures/graph/dominators/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Algorithm citation: //! A Simple, Fast Dominance Algorithm. //! Keith D. Cooper, Timothy J. Harvey, and Ken Kennedy @@ -18,8 +8,6 @@ use super::super::indexed_vec::{Idx, IndexVec}; use super::iterate::reverse_post_order; use super::ControlFlowGraph; -use std::fmt; - #[cfg(test)] mod test; @@ -127,7 +115,7 @@ impl Dominators { self.immediate_dominators[node].unwrap() } - pub fn dominators(&self, node: Node) -> Iter { + pub fn dominators(&self, node: Node) -> Iter<'_, Node> { assert!(self.is_reachable(node), "node {:?} is not reachable", node); Iter { dominators: self, @@ -146,7 +134,7 @@ impl Dominators { } } -pub struct Iter<'dom, Node: Idx + 'dom> { +pub struct Iter<'dom, Node: Idx> { dominators: &'dom Dominators, node: Option, } @@ -168,48 +156,3 @@ impl<'dom, Node: Idx> Iterator for Iter<'dom, Node> { } } } - -pub struct DominatorTree { - root: N, - children: IndexVec>, -} - -impl DominatorTree { - pub fn children(&self, node: Node) -> &[Node] { - &self.children[node] - } -} - -impl fmt::Debug for DominatorTree { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt( - &DominatorTreeNode { - tree: self, - node: self.root, - }, - fmt, - ) - } -} - -struct DominatorTreeNode<'tree, Node: Idx> { - tree: &'tree DominatorTree, - node: Node, -} - -impl<'tree, Node: Idx> fmt::Debug for DominatorTreeNode<'tree, Node> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - let subtrees: Vec<_> = self.tree - .children(self.node) - .iter() - .map(|&child| DominatorTreeNode { - tree: self.tree, - node: child, - }) - .collect(); - fmt.debug_tuple("") - .field(&self.node) - .field(&subtrees) - .finish() - } -} diff --git a/src/librustc_data_structures/graph/dominators/test.rs b/src/librustc_data_structures/graph/dominators/test.rs index 0af878cac2df1..5d17ce9e90941 100644 --- a/src/librustc_data_structures/graph/dominators/test.rs +++ b/src/librustc_data_structures/graph/dominators/test.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::super::test::TestGraph; use super::*; diff --git a/src/librustc_data_structures/graph/implementation/mod.rs b/src/librustc_data_structures/graph/implementation/mod.rs index c31321fa374a4..de4b1bcd0c2a1 100644 --- a/src/librustc_data_structures/graph/implementation/mod.rs +++ b/src/librustc_data_structures/graph/implementation/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A graph module for use in dataflow, region resolution, and elsewhere. //! //! # Interface details @@ -24,16 +14,16 @@ //! stored. The edges are stored in a central array, but they are also //! threaded onto two linked lists for each node, one for incoming edges //! and one for outgoing edges. Note that every edge is a member of some -//! incoming list and some outgoing list. Basically you can load the +//! incoming list and some outgoing list. Basically you can load the //! first index of the linked list from the node data structures (the //! field `first_edge`) and then, for each edge, load the next index from //! the field `next_edge`). Each of those fields is an array that should //! be indexed by the direction (see the type `Direction`). -use bit_set::BitSet; +use crate::bit_set::BitSet; +use crate::snapshot_vec::{SnapshotVec, SnapshotVecDelegate}; use std::fmt::Debug; use std::usize; -use snapshot_vec::{SnapshotVec, SnapshotVecDelegate}; #[cfg(test)] mod tests; @@ -89,7 +79,7 @@ pub const OUTGOING: Direction = Direction { repr: 0 }; pub const INCOMING: Direction = Direction { repr: 1 }; impl NodeIndex { - /// Returns unique id (unique with respect to the graph holding associated node). + /// Returns unique ID (unique with respect to the graph holding associated node). pub fn node_id(self) -> usize { self.0 } @@ -222,15 +212,19 @@ impl Graph { .all(|(edge_idx, edge)| f(edge_idx, edge)) } - pub fn outgoing_edges(&self, source: NodeIndex) -> AdjacentEdges { + pub fn outgoing_edges(&self, source: NodeIndex) -> AdjacentEdges<'_, N, E> { self.adjacent_edges(source, OUTGOING) } - pub fn incoming_edges(&self, source: NodeIndex) -> AdjacentEdges { + pub fn incoming_edges(&self, source: NodeIndex) -> AdjacentEdges<'_, N, E> { self.adjacent_edges(source, INCOMING) } - pub fn adjacent_edges(&self, source: NodeIndex, direction: Direction) -> AdjacentEdges { + pub fn adjacent_edges( + &self, + source: NodeIndex, + direction: Direction + ) -> AdjacentEdges<'_, N, E> { let first_edge = self.node(source).first_edge[direction.repr]; AdjacentEdges { graph: self, @@ -301,11 +295,7 @@ impl Graph { // # Iterators -pub struct AdjacentEdges<'g, N, E> -where - N: 'g, - E: 'g, -{ +pub struct AdjacentEdges<'g, N, E> { graph: &'g Graph, direction: Direction, next: EdgeIndex, @@ -341,11 +331,7 @@ impl<'g, N: Debug, E: Debug> Iterator for AdjacentEdges<'g, N, E> { } } -pub struct DepthFirstTraversal<'g, N, E> -where - N: 'g, - E: 'g, -{ +pub struct DepthFirstTraversal<'g, N, E> { graph: &'g Graph, stack: Vec, visited: BitSet, diff --git a/src/librustc_data_structures/graph/implementation/tests.rs b/src/librustc_data_structures/graph/implementation/tests.rs index 3814827b5df6e..82c6da3f42711 100644 --- a/src/librustc_data_structures/graph/implementation/tests.rs +++ b/src/librustc_data_structures/graph/implementation/tests.rs @@ -1,14 +1,4 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use graph::implementation::*; +use crate::graph::implementation::*; use std::fmt::Debug; type TestGraph = Graph<&'static str, &'static str>; diff --git a/src/librustc_data_structures/graph/iterate/mod.rs b/src/librustc_data_structures/graph/iterate/mod.rs index 3afdc88d60279..c09364b0a5395 100644 --- a/src/librustc_data_structures/graph/iterate/mod.rs +++ b/src/librustc_data_structures/graph/iterate/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::super::indexed_vec::IndexVec; use super::{DirectedGraph, WithSuccessors, WithNumNodes}; diff --git a/src/librustc_data_structures/graph/iterate/test.rs b/src/librustc_data_structures/graph/iterate/test.rs index 100881ddfdd77..62e48aaec53f1 100644 --- a/src/librustc_data_structures/graph/iterate/test.rs +++ b/src/librustc_data_structures/graph/iterate/test.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::super::test::TestGraph; use super::*; diff --git a/src/librustc_data_structures/graph/mod.rs b/src/librustc_data_structures/graph/mod.rs index 7265e4e8c7c66..3d47b7d49fb96 100644 --- a/src/librustc_data_structures/graph/mod.rs +++ b/src/librustc_data_structures/graph/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::indexed_vec::Idx; pub mod dominators; diff --git a/src/librustc_data_structures/graph/reference.rs b/src/librustc_data_structures/graph/reference.rs index a7b763db8da29..5ad2a71e1d732 100644 --- a/src/librustc_data_structures/graph/reference.rs +++ b/src/librustc_data_structures/graph/reference.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::*; impl<'graph, G: DirectedGraph> DirectedGraph for &'graph G { diff --git a/src/librustc_data_structures/graph/scc/mod.rs b/src/librustc_data_structures/graph/scc/mod.rs index 64de0c2f565a1..24c5448639e7d 100644 --- a/src/librustc_data_structures/graph/scc/mod.rs +++ b/src/librustc_data_structures/graph/scc/mod.rs @@ -1,21 +1,11 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Routine to compute the strongly connected components (SCCs) of a //! graph, as well as the resulting DAG if each SCC is replaced with a //! node in the graph. This uses Tarjan's algorithm that completes in //! O(n) time. -use fx::FxHashSet; -use graph::{DirectedGraph, WithNumNodes, WithSuccessors}; -use indexed_vec::{Idx, IndexVec}; +use crate::fx::FxHashSet; +use crate::graph::{DirectedGraph, WithNumNodes, WithSuccessors}; +use crate::indexed_vec::{Idx, IndexVec}; use std::ops::Range; mod test; @@ -103,7 +93,7 @@ impl SccData { } } -struct SccsConstruction<'c, G: DirectedGraph + WithNumNodes + WithSuccessors + 'c, S: Idx> { +struct SccsConstruction<'c, G: DirectedGraph + WithNumNodes + WithSuccessors, S: Idx> { graph: &'c G, /// The state of each node; used during walk to record the stack @@ -210,7 +200,7 @@ where } } - /// Visit a node during the DFS. We first examine its current + /// Visits a node during the DFS. We first examine its current /// state -- if it is not yet visited (`NotVisited`), we can push /// it onto the stack and start walking its successors. /// diff --git a/src/librustc_data_structures/graph/scc/test.rs b/src/librustc_data_structures/graph/scc/test.rs index 405e1b3a61748..da3a1ceefe94b 100644 --- a/src/librustc_data_structures/graph/scc/test.rs +++ b/src/librustc_data_structures/graph/scc/test.rs @@ -1,16 +1,6 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![cfg(test)] -use graph::test::TestGraph; +use crate::graph::test::TestGraph; use super::*; #[test] diff --git a/src/librustc_data_structures/graph/test.rs b/src/librustc_data_structures/graph/test.rs index 26cc2c9f17cfb..b390c41957294 100644 --- a/src/librustc_data_structures/graph/test.rs +++ b/src/librustc_data_structures/graph/test.rs @@ -1,14 +1,4 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use fx::FxHashMap; +use crate::fx::FxHashMap; use std::cmp::max; use std::slice; use std::iter; diff --git a/src/librustc_data_structures/indexed_vec.rs b/src/librustc_data_structures/indexed_vec.rs index 6522dbe117994..359b89f683dc4 100644 --- a/src/librustc_data_structures/indexed_vec.rs +++ b/src/librustc_data_structures/indexed_vec.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::fmt::Debug; use std::iter::{self, FromIterator}; use std::slice; @@ -22,7 +12,7 @@ use rustc_serialize as serialize; /// Represents some newtyped `usize` wrapper. /// -/// (purpose: avoid mixing indexes for different bitvector domains.) +/// Purpose: avoid mixing indexes for different bitvector domains. pub trait Idx: Copy + 'static + Ord + Debug + Hash { fn new(idx: usize) -> Self; @@ -68,9 +58,10 @@ macro_rules! newtype_index { // ---- public rules ---- // Use default constants - ($v:vis struct $name:ident { .. }) => ( + ($(#[$attrs:meta])* $v:vis struct $name:ident { .. }) => ( newtype_index!( // Leave out derives marker so we can use its absence to ensure it comes first + @attrs [$(#[$attrs])*] @type [$name] // shave off 256 indices at the end to allow space for packing these indices into enums @max [0xFFFF_FF00] @@ -79,9 +70,10 @@ macro_rules! newtype_index { ); // Define any constants - ($v:vis struct $name:ident { $($tokens:tt)+ }) => ( + ($(#[$attrs:meta])* $v:vis struct $name:ident { $($tokens:tt)+ }) => ( newtype_index!( // Leave out derives marker so we can use its absence to ensure it comes first + @attrs [$(#[$attrs])*] @type [$name] // shave off 256 indices at the end to allow space for packing these indices into enums @max [0xFFFF_FF00] @@ -94,10 +86,12 @@ macro_rules! newtype_index { // Base case, user-defined constants (if any) have already been defined (@derives [$($derives:ident,)*] + @attrs [$(#[$attrs:meta])*] @type [$type:ident] @max [$max:expr] @vis [$v:vis] @debug_format [$debug_format:tt]) => ( + $(#[$attrs])* #[derive(Copy, PartialEq, Eq, Hash, PartialOrd, Ord, $($derives),*)] #[rustc_layout_scalar_valid_range_end($max)] $v struct $type { @@ -154,19 +148,19 @@ macro_rules! newtype_index { unsafe { $type { private: value } } } - /// Extract value of this index as an integer. + /// Extracts the value of this index as an integer. #[inline] $v fn index(self) -> usize { self.as_usize() } - /// Extract value of this index as a usize. + /// Extracts the value of this index as a `u32`. #[inline] $v fn as_u32(self) -> u32 { self.private } - /// Extract value of this index as a u32. + /// Extracts the value of this index as a `usize`. #[inline] $v fn as_usize(self) -> usize { self.as_u32() as usize @@ -267,7 +261,7 @@ macro_rules! newtype_index { @type [$type:ident] @debug_format [$debug_format:tt]) => ( impl ::std::fmt::Debug for $type { - fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { write!(fmt, $debug_format, self.as_u32()) } } @@ -292,13 +286,15 @@ macro_rules! newtype_index { ); // Append comma to end of derives list if it's missing - (@type [$type:ident] + (@attrs [$(#[$attrs:meta])*] + @type [$type:ident] @max [$max:expr] @vis [$v:vis] @debug_format [$debug_format:tt] derive [$($derives:ident),*] $($tokens:tt)*) => ( newtype_index!( + @attrs [$(#[$attrs])*] @type [$type] @max [$max] @vis [$v] @@ -309,7 +305,8 @@ macro_rules! newtype_index { // By not including the @derives marker in this list nor in the default args, we can force it // to come first if it exists. When encodable is custom, just use the derives list as-is. - (@type [$type:ident] + (@attrs [$(#[$attrs:meta])*] + @type [$type:ident] @max [$max:expr] @vis [$v:vis] @debug_format [$debug_format:tt] @@ -317,6 +314,7 @@ macro_rules! newtype_index { ENCODABLE = custom $($tokens:tt)*) => ( newtype_index!( + @attrs [$(#[$attrs])*] @derives [$($derives,)+] @type [$type] @max [$max] @@ -327,7 +325,8 @@ macro_rules! newtype_index { // By not including the @derives marker in this list nor in the default args, we can force it // to come first if it exists. When encodable isn't custom, add serialization traits by default. - (@type [$type:ident] + (@attrs [$(#[$attrs:meta])*] + @type [$type:ident] @max [$max:expr] @vis [$v:vis] @debug_format [$debug_format:tt] @@ -335,6 +334,7 @@ macro_rules! newtype_index { $($tokens:tt)*) => ( newtype_index!( @derives [$($derives,)+ RustcEncodable,] + @attrs [$(#[$attrs])*] @type [$type] @max [$max] @vis [$v] @@ -345,7 +345,8 @@ macro_rules! newtype_index { // The case where no derives are added, but encodable is overridden. Don't // derive serialization traits - (@type [$type:ident] + (@attrs [$(#[$attrs:meta])*] + @type [$type:ident] @max [$max:expr] @vis [$v:vis] @debug_format [$debug_format:tt] @@ -353,6 +354,7 @@ macro_rules! newtype_index { $($tokens:tt)*) => ( newtype_index!( @derives [] + @attrs [$(#[$attrs])*] @type [$type] @max [$max] @vis [$v] @@ -361,13 +363,15 @@ macro_rules! newtype_index { ); // The case where no derives are added, add serialization derives by default - (@type [$type:ident] + (@attrs [$(#[$attrs:meta])*] + @type [$type:ident] @max [$max:expr] @vis [$v:vis] @debug_format [$debug_format:tt] $($tokens:tt)*) => ( newtype_index!( @derives [RustcEncodable,] + @attrs [$(#[$attrs])*] @type [$type] @max [$max] @vis [$v] @@ -394,6 +398,7 @@ macro_rules! newtype_index { // Rewrite final without comma to one that includes comma (@derives [$($derives:ident,)*] + @attrs [$(#[$attrs:meta])*] @type [$type:ident] @max [$max:expr] @vis [$v:vis] @@ -401,6 +406,7 @@ macro_rules! newtype_index { $name:ident = $constant:expr) => ( newtype_index!( @derives [$($derives,)*] + @attrs [$(#[$attrs])*] @type [$type] @max [$max] @vis [$v] @@ -410,6 +416,7 @@ macro_rules! newtype_index { // Rewrite final const without comma to one that includes comma (@derives [$($derives:ident,)*] + @attrs [$(#[$attrs:meta])*] @type [$type:ident] @max [$_max:expr] @vis [$v:vis] @@ -418,6 +425,7 @@ macro_rules! newtype_index { const $name:ident = $constant:expr) => ( newtype_index!( @derives [$($derives,)*] + @attrs [$(#[$attrs])*] @type [$type] @max [$max] @vis [$v] @@ -427,6 +435,7 @@ macro_rules! newtype_index { // Replace existing default for max (@derives [$($derives:ident,)*] + @attrs [$(#[$attrs:meta])*] @type [$type:ident] @max [$_max:expr] @vis [$v:vis] @@ -435,6 +444,7 @@ macro_rules! newtype_index { $($tokens:tt)*) => ( newtype_index!( @derives [$($derives,)*] + @attrs [$(#[$attrs])*] @type [$type] @max [$max] @vis [$v] @@ -444,6 +454,7 @@ macro_rules! newtype_index { // Replace existing default for debug_format (@derives [$($derives:ident,)*] + @attrs [$(#[$attrs:meta])*] @type [$type:ident] @max [$max:expr] @vis [$v:vis] @@ -452,6 +463,7 @@ macro_rules! newtype_index { $($tokens:tt)*) => ( newtype_index!( @derives [$($derives,)*] + @attrs [$(#[$attrs])*] @type [$type] @max [$max] @vis [$v] @@ -461,6 +473,7 @@ macro_rules! newtype_index { // Assign a user-defined constant (@derives [$($derives:ident,)*] + @attrs [$(#[$attrs:meta])*] @type [$type:ident] @max [$max:expr] @vis [$v:vis] @@ -472,6 +485,7 @@ macro_rules! newtype_index { pub const $name: $type = $type::from_u32_const($constant); newtype_index!( @derives [$($derives,)*] + @attrs [$(#[$attrs])*] @type [$type] @max [$max] @vis [$v] @@ -505,7 +519,7 @@ impl serialize::Decodable for IndexVec { } impl fmt::Debug for IndexVec { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Debug::fmt(&self.raw, fmt) } } @@ -583,7 +597,7 @@ impl IndexVec { } #[inline] - pub fn iter(&self) -> slice::Iter { + pub fn iter(&self) -> slice::Iter<'_, T> { self.raw.iter() } @@ -599,7 +613,7 @@ impl IndexVec { } #[inline] - pub fn iter_mut(&mut self) -> slice::IterMut { + pub fn iter_mut(&mut self) -> slice::IterMut<'_, T> { self.raw.iter_mut() } @@ -651,7 +665,7 @@ impl IndexVec { self.raw.get_mut(index.index()) } - /// Return mutable references to two distinct elements, a and b. Panics if a == b. + /// Returns mutable references to two distinct elements, a and b. Panics if a == b. #[inline] pub fn pick2_mut(&mut self, a: I, b: I) -> (&mut T, &mut T) { let (ai, bi) = (a.index(), b.index()); diff --git a/src/librustc_data_structures/interner.rs b/src/librustc_data_structures/interner.rs index 29e5aefee7f0b..36ccbb704a733 100644 --- a/src/librustc_data_structures/interner.rs +++ b/src/librustc_data_structures/interner.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::hash::Hash; use std::hash::BuildHasher; use std::hash::Hasher; diff --git a/src/librustc_data_structures/jobserver.rs b/src/librustc_data_structures/jobserver.rs new file mode 100644 index 0000000000000..48ac8125a0d66 --- /dev/null +++ b/src/librustc_data_structures/jobserver.rs @@ -0,0 +1,156 @@ +use jobserver_crate::{Client, HelperThread, Acquired}; +use lazy_static::lazy_static; +use std::sync::{Condvar, Arc, Mutex}; +use std::mem; + +#[derive(Default)] +struct LockedProxyData { + /// The number of free thread tokens, this may include the implicit token given to the process + free: usize, + + /// The number of threads waiting for a token + waiters: usize, + + /// The number of tokens we requested from the server + requested: usize, + + /// Stored tokens which will be dropped when we no longer need them + tokens: Vec, +} + +impl LockedProxyData { + fn request_token(&mut self, thread: &Mutex) { + self.requested += 1; + thread.lock().unwrap().request_token(); + } + + fn release_token(&mut self, cond_var: &Condvar) { + if self.waiters > 0 { + self.free += 1; + cond_var.notify_one(); + } else { + if self.tokens.is_empty() { + // We are returning the implicit token + self.free += 1; + } else { + // Return a real token to the server + self.tokens.pop().unwrap(); + } + } + } + + fn take_token(&mut self, thread: &Mutex) -> bool { + if self.free > 0 { + self.free -= 1; + self.waiters -= 1; + + // We stole some token reqested by someone else + // Request another one + if self.requested + self.free < self.waiters { + self.request_token(thread); + } + + true + } else { + false + } + } + + fn new_requested_token(&mut self, token: Acquired, cond_var: &Condvar) { + self.requested -= 1; + + // Does anything need this token? + if self.waiters > 0 { + self.free += 1; + self.tokens.push(token); + cond_var.notify_one(); + } else { + // Otherwise we'll just drop it + mem::drop(token); + } + } +} + +#[derive(Default)] +struct ProxyData { + lock: Mutex, + cond_var: Condvar, +} + +/// A helper type which makes managing jobserver tokens easier. +/// It also allows you to treat the implicit token given to the process +/// in the same manner as requested tokens. +struct Proxy { + thread: Mutex, + data: Arc, +} + +lazy_static! { + // We can only call `from_env` once per process + + // Note that this is unsafe because it may misinterpret file descriptors + // on Unix as jobserver file descriptors. We hopefully execute this near + // the beginning of the process though to ensure we don't get false + // positives, or in other words we try to execute this before we open + // any file descriptors ourselves. + // + // Pick a "reasonable maximum" if we don't otherwise have + // a jobserver in our environment, capping out at 32 so we + // don't take everything down by hogging the process run queue. + // The fixed number is used to have deterministic compilation + // across machines. + // + // Also note that we stick this in a global because there could be + // multiple rustc instances in this process, and the jobserver is + // per-process. + static ref GLOBAL_CLIENT: Client = unsafe { + Client::from_env().unwrap_or_else(|| { + Client::new(32).expect("failed to create jobserver") + }) + }; + + static ref GLOBAL_PROXY: Proxy = { + let data = Arc::new(ProxyData::default()); + + Proxy { + data: data.clone(), + thread: Mutex::new(client().into_helper_thread(move |token| { + data.lock.lock().unwrap().new_requested_token(token.unwrap(), &data.cond_var); + }).unwrap()), + } + }; +} + +pub fn client() -> Client { + GLOBAL_CLIENT.clone() +} + +pub fn acquire_thread() { + GLOBAL_PROXY.acquire_token(); +} + +pub fn release_thread() { + GLOBAL_PROXY.release_token(); +} + +impl Proxy { + fn release_token(&self) { + self.data.lock.lock().unwrap().release_token(&self.data.cond_var); + } + + fn acquire_token(&self) { + let mut data = self.data.lock.lock().unwrap(); + data.waiters += 1; + if data.take_token(&self.thread) { + return; + } + // Request a token for us + data.request_token(&self.thread); + loop { + data = self.data.cond_var.wait(data).unwrap(); + if data.take_token(&self.thread) { + return; + } + } + } +} diff --git a/src/librustc_data_structures/lib.rs b/src/librustc_data_structures/lib.rs index 8e0ecb70c6896..a1d7ab8856daa 100644 --- a/src/librustc_data_structures/lib.rs +++ b/src/librustc_data_structures/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Various data structures used by the Rust compiler. The intention //! is that code in here should be not be *specific* to rustc, so that //! it can be easily unit tested and so forth. @@ -16,41 +6,36 @@ //! //! This API is completely unstable and subject to change. -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://www.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![feature(in_band_lifetimes)] #![feature(unboxed_closures)] +#![feature(generators)] +#![feature(generator_trait)] #![feature(fn_traits)] #![feature(unsize)] #![feature(specialization)] #![feature(optin_builtin_traits)] #![feature(nll)] #![feature(allow_internal_unstable)] -#![feature(vec_resize_with)] #![feature(hash_raw_entry)] +#![feature(stmt_expr_attributes)] +#![feature(core_intrinsics)] +#![feature(integer_atomics)] #![cfg_attr(unix, feature(libc))] #![cfg_attr(test, feature(test))] -extern crate core; -extern crate ena; +#![deny(rust_2018_idioms)] + #[macro_use] extern crate log; +#[allow(unused_extern_crates)] extern crate serialize as rustc_serialize; // used by deriving #[cfg(unix)] extern crate libc; -extern crate parking_lot; #[macro_use] extern crate cfg_if; -extern crate stable_deref_trait; -extern crate rustc_rayon as rayon; -extern crate rustc_rayon_core as rayon_core; -extern crate rustc_hash; -extern crate serialize; -extern crate graphviz; -extern crate smallvec; // See librustc_cratesio_shim/Cargo.toml for a comment explaining this. #[allow(unused_extern_crates)] @@ -58,16 +43,44 @@ extern crate rustc_cratesio_shim; pub use rustc_serialize::hex::ToHex; +#[inline(never)] +#[cold] +pub fn cold_path R, R>(f: F) -> R { + f() +} + +#[macro_export] +macro_rules! likely { + ($e:expr) => { + #[allow(unused_unsafe)] + { + unsafe { std::intrinsics::likely($e) } + } + } +} + +#[macro_export] +macro_rules! unlikely { + ($e:expr) => { + #[allow(unused_unsafe)] + { + unsafe { std::intrinsics::unlikely($e) } + } + } +} + pub mod macros; pub mod svh; pub mod base_n; pub mod bit_set; +pub mod box_region; pub mod const_cstr; pub mod flock; pub mod fx; pub mod graph; pub mod indexed_vec; pub mod interner; +pub mod jobserver; pub mod obligation_forest; pub mod owning_ref; pub mod ptr_key; @@ -91,12 +104,14 @@ pub struct OnDrop(pub F); impl OnDrop { /// Forgets the function which prevents it from running. /// Ensure that the function owns no memory, otherwise it will be leaked. + #[inline] pub fn disable(self) { std::mem::forget(self); } } impl Drop for OnDrop { + #[inline] fn drop(&mut self) { (self.0)(); } diff --git a/src/librustc_data_structures/macros.rs b/src/librustc_data_structures/macros.rs index 286a374b280b1..029e7267c8247 100644 --- a/src/librustc_data_structures/macros.rs +++ b/src/librustc_data_structures/macros.rs @@ -1,17 +1,7 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// A simple static assertion macro. The first argument should be a unique /// ALL_CAPS identifier that describes the condition. #[macro_export] -#[allow_internal_unstable] +#[allow_internal_unstable(type_ascription)] macro_rules! static_assert { ($name:ident: $test:expr) => { // Use the bool to access an array such that if the bool is false, the access diff --git a/src/librustc_data_structures/obligation_forest/README.md b/src/librustc_data_structures/obligation_forest/README.md deleted file mode 100644 index 982a2bacce164..0000000000000 --- a/src/librustc_data_structures/obligation_forest/README.md +++ /dev/null @@ -1,81 +0,0 @@ -The `ObligationForest` is a utility data structure used in trait -matching to track the set of outstanding obligations (those not yet -resolved to success or error). It also tracks the "backtrace" of each -pending obligation (why we are trying to figure this out in the first -place). - -### External view - -`ObligationForest` supports two main public operations (there are a -few others not discussed here): - -1. Add a new root obligations (`push_tree`). -2. Process the pending obligations (`process_obligations`). - -When a new obligation `N` is added, it becomes the root of an -obligation tree. This tree can also carry some per-tree state `T`, -which is given at the same time. This tree is a singleton to start, so -`N` is both the root and the only leaf. Each time the -`process_obligations` method is called, it will invoke its callback -with every pending obligation (so that will include `N`, the first -time). The callback also receives a (mutable) reference to the -per-tree state `T`. The callback should process the obligation `O` -that it is given and return one of three results: - -- `Ok(None)` -> ambiguous result. Obligation was neither a success - nor a failure. It is assumed that further attempts to process the - obligation will yield the same result unless something in the - surrounding environment changes. -- `Ok(Some(C))` - the obligation was *shallowly successful*. The - vector `C` is a list of subobligations. The meaning of this is that - `O` was successful on the assumption that all the obligations in `C` - are also successful. Therefore, `O` is only considered a "true" - success if `C` is empty. Otherwise, `O` is put into a suspended - state and the obligations in `C` become the new pending - obligations. They will be processed the next time you call - `process_obligations`. -- `Err(E)` -> obligation failed with error `E`. We will collect this - error and return it from `process_obligations`, along with the - "backtrace" of obligations (that is, the list of obligations up to - and including the root of the failed obligation). No further - obligations from that same tree will be processed, since the tree is - now considered to be in error. - -When the call to `process_obligations` completes, you get back an `Outcome`, -which includes three bits of information: - -- `completed`: a list of obligations where processing was fully - completed without error (meaning that all transitive subobligations - have also been completed). So, for example, if the callback from - `process_obligations` returns `Ok(Some(C))` for some obligation `O`, - then `O` will be considered completed right away if `C` is the - empty vector. Otherwise it will only be considered completed once - all the obligations in `C` have been found completed. -- `errors`: a list of errors that occurred and associated backtraces - at the time of error, which can be used to give context to the user. -- `stalled`: if true, then none of the existing obligations were - *shallowly successful* (that is, no callback returned `Ok(Some(_))`). - This implies that all obligations were either errors or returned an - ambiguous result, which means that any further calls to - `process_obligations` would simply yield back further ambiguous - results. This is used by the `FulfillmentContext` to decide when it - has reached a steady state. - -#### Snapshots - -The `ObligationForest` supports a limited form of snapshots; see -`start_snapshot`; `commit_snapshot`; and `rollback_snapshot`. In -particular, you can use a snapshot to roll back new root -obligations. However, it is an error to attempt to -`process_obligations` during a snapshot. - -### Implementation details - -For the most part, comments specific to the implementation are in the -code. This file only contains a very high-level overview. Basically, -the forest is stored in a vector. Each element of the vector is a node -in some tree. Each node in the vector has the index of an (optional) -parent and (for convenience) its root (which may be itself). It also -has a current state, described by `NodeState`. After each -processing step, we compress the vector to remove completed and error -nodes, which aren't needed anymore. diff --git a/src/librustc_data_structures/obligation_forest/graphviz.rs b/src/librustc_data_structures/obligation_forest/graphviz.rs index dcd448ee44f51..a0363e165e049 100644 --- a/src/librustc_data_structures/obligation_forest/graphviz.rs +++ b/src/librustc_data_structures/obligation_forest/graphviz.rs @@ -1,15 +1,5 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - +use crate::obligation_forest::{ForestObligation, ObligationForest}; use graphviz as dot; -use obligation_forest::{ForestObligation, ObligationForest}; use std::env::var_os; use std::fs::File; use std::path::Path; @@ -17,8 +7,8 @@ use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; impl ObligationForest { - /// Create a graphviz representation of the obligation forest. Given a directory this will - /// create files with name of the format `_.gv`. The counter is + /// Creates a graphviz representation of the obligation forest. Given a directory this will + /// create files with name of the format `_.gv`. The counter is /// global and is maintained internally. /// /// Calling this will do nothing unless the environment variable @@ -51,22 +41,22 @@ impl<'a, O: ForestObligation + 'a> dot::Labeller<'a> for &'a ObligationForest type Node = usize; type Edge = (usize, usize); - fn graph_id(&self) -> dot::Id { + fn graph_id(&self) -> dot::Id<'_> { dot::Id::new("trait_obligation_forest").unwrap() } - fn node_id(&self, index: &Self::Node) -> dot::Id { + fn node_id(&self, index: &Self::Node) -> dot::Id<'_> { dot::Id::new(format!("obligation_{}", index)).unwrap() } - fn node_label(&self, index: &Self::Node) -> dot::LabelText { + fn node_label(&self, index: &Self::Node) -> dot::LabelText<'_> { let node = &self.nodes[*index]; let label = format!("{:?} ({:?})", node.obligation.as_predicate(), node.state.get()); dot::LabelText::LabelStr(label.into()) } - fn edge_label(&self, (_index_source, _index_target): &Self::Edge) -> dot::LabelText { + fn edge_label(&self, (_index_source, _index_target): &Self::Edge) -> dot::LabelText<'_> { dot::LabelText::LabelStr("".into()) } } @@ -75,11 +65,11 @@ impl<'a, O: ForestObligation + 'a> dot::GraphWalk<'a> for &'a ObligationForest dot::Nodes { + fn nodes(&self) -> dot::Nodes<'_, Self::Node> { (0..self.nodes.len()).collect() } - fn edges(&self) -> dot::Edges { + fn edges(&self) -> dot::Edges<'_, Self::Edge> { (0..self.nodes.len()) .flat_map(|i| { let node = &self.nodes[i]; diff --git a/src/librustc_data_structures/obligation_forest/mod.rs b/src/librustc_data_structures/obligation_forest/mod.rs index 2a1958357e0e6..4490e5f86d2bd 100644 --- a/src/librustc_data_structures/obligation_forest/mod.rs +++ b/src/librustc_data_structures/obligation_forest/mod.rs @@ -1,21 +1,86 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The `ObligationForest` is a utility data structure used in trait -//! matching to track the set of outstanding obligations (those not -//! yet resolved to success or error). It also tracks the "backtrace" -//! of each pending obligation (why we are trying to figure this out -//! in the first place). See README.md for a general overview of how -//! to use this class. - -use fx::{FxHashMap, FxHashSet}; +//! matching to track the set of outstanding obligations (those not yet +//! resolved to success or error). It also tracks the "backtrace" of each +//! pending obligation (why we are trying to figure this out in the first +//! place). +//! +//! ### External view +//! +//! `ObligationForest` supports two main public operations (there are a +//! few others not discussed here): +//! +//! 1. Add a new root obligations (`push_tree`). +//! 2. Process the pending obligations (`process_obligations`). +//! +//! When a new obligation `N` is added, it becomes the root of an +//! obligation tree. This tree can also carry some per-tree state `T`, +//! which is given at the same time. This tree is a singleton to start, so +//! `N` is both the root and the only leaf. Each time the +//! `process_obligations` method is called, it will invoke its callback +//! with every pending obligation (so that will include `N`, the first +//! time). The callback also receives a (mutable) reference to the +//! per-tree state `T`. The callback should process the obligation `O` +//! that it is given and return one of three results: +//! +//! - `Ok(None)` -> ambiguous result. Obligation was neither a success +//! nor a failure. It is assumed that further attempts to process the +//! obligation will yield the same result unless something in the +//! surrounding environment changes. +//! - `Ok(Some(C))` - the obligation was *shallowly successful*. The +//! vector `C` is a list of subobligations. The meaning of this is that +//! `O` was successful on the assumption that all the obligations in `C` +//! are also successful. Therefore, `O` is only considered a "true" +//! success if `C` is empty. Otherwise, `O` is put into a suspended +//! state and the obligations in `C` become the new pending +//! obligations. They will be processed the next time you call +//! `process_obligations`. +//! - `Err(E)` -> obligation failed with error `E`. We will collect this +//! error and return it from `process_obligations`, along with the +//! "backtrace" of obligations (that is, the list of obligations up to +//! and including the root of the failed obligation). No further +//! obligations from that same tree will be processed, since the tree is +//! now considered to be in error. +//! +//! When the call to `process_obligations` completes, you get back an `Outcome`, +//! which includes three bits of information: +//! +//! - `completed`: a list of obligations where processing was fully +//! completed without error (meaning that all transitive subobligations +//! have also been completed). So, for example, if the callback from +//! `process_obligations` returns `Ok(Some(C))` for some obligation `O`, +//! then `O` will be considered completed right away if `C` is the +//! empty vector. Otherwise it will only be considered completed once +//! all the obligations in `C` have been found completed. +//! - `errors`: a list of errors that occurred and associated backtraces +//! at the time of error, which can be used to give context to the user. +//! - `stalled`: if true, then none of the existing obligations were +//! *shallowly successful* (that is, no callback returned `Ok(Some(_))`). +//! This implies that all obligations were either errors or returned an +//! ambiguous result, which means that any further calls to +//! `process_obligations` would simply yield back further ambiguous +//! results. This is used by the `FulfillmentContext` to decide when it +//! has reached a steady state. +//! +//! #### Snapshots +//! +//! The `ObligationForest` supports a limited form of snapshots; see +//! `start_snapshot`, `commit_snapshot`, and `rollback_snapshot`. In +//! particular, you can use a snapshot to roll back new root +//! obligations. However, it is an error to attempt to +//! `process_obligations` during a snapshot. +//! +//! ### Implementation details +//! +//! For the most part, comments specific to the implementation are in the +//! code. This file only contains a very high-level overview. Basically, +//! the forest is stored in a vector. Each element of the vector is a node +//! in some tree. Each node in the vector has the index of an (optional) +//! parent and (for convenience) its root (which may be itself). It also +//! has a current state, described by `NodeState`. After each +//! processing step, we compress the vector to remove completed and error +//! nodes, which aren't needed anymore. + +use crate::fx::{FxHashMap, FxHashSet}; use std::cell::Cell; use std::collections::hash_map::Entry; @@ -98,7 +163,7 @@ pub struct ObligationForest { obligation_tree_id_generator: ObligationTreeIdGenerator, - /// Per tree error cache. This is used to deduplicate errors, + /// Per tree error cache. This is used to deduplicate errors, /// which is necessary to avoid trait resolution overflow in /// some cases. /// @@ -203,13 +268,13 @@ impl ObligationForest { } } - /// Return the total number of nodes in the forest that have not + /// Returns the total number of nodes in the forest that have not /// yet been fully resolved. pub fn len(&self) -> usize { self.nodes.len() } - /// Registers an obligation + /// Registers an obligation. /// /// This CAN be done in a snapshot pub fn register_obligation(&mut self, obligation: O) { @@ -276,7 +341,7 @@ impl ObligationForest { } } - /// Convert all remaining obligations to the given error. + /// Converts all remaining obligations to the given error. /// /// This cannot be done during a snapshot. pub fn to_errors(&mut self, error: E) -> Vec> { @@ -315,10 +380,10 @@ impl ObligationForest { .insert(node.obligation.as_predicate().clone()); } - /// Perform a pass through the obligation list. This must + /// Performs a pass through the obligation list. This must /// be called in a loop until `outcome.stalled` is false. /// - /// This CANNOT be unrolled (presently, at least). + /// This _cannot_ be unrolled (presently, at least). pub fn process_obligations

(&mut self, processor: &mut P, do_completed: DoCompleted) -> Outcome where P: ObligationProcessor @@ -396,7 +461,7 @@ impl ObligationForest { } } - /// Mark all NodeState::Success nodes as NodeState::Done and + /// Mark all `NodeState::Success` nodes as `NodeState::Done` and /// report all cycles between them. This should be called /// after `mark_as_waiting` marks all nodes with pending /// subobligations as NodeState::Waiting. @@ -501,7 +566,7 @@ impl ObligationForest { } } - /// Marks all nodes that depend on a pending node as NodeState::Waiting. + /// Marks all nodes that depend on a pending node as `NodeState::Waiting`. fn mark_as_waiting(&self) { for node in &self.nodes { if node.state.get() == NodeState::Waiting { @@ -668,7 +733,7 @@ impl Node { // I need a Clone closure #[derive(Clone)] -struct GetObligation<'a, O: 'a>(&'a [Node]); +struct GetObligation<'a, O>(&'a [Node]); impl<'a, 'b, O> FnOnce<(&'b usize,)> for GetObligation<'a, O> { type Output = &'a O; diff --git a/src/librustc_data_structures/obligation_forest/node_index.rs b/src/librustc_data_structures/obligation_forest/node_index.rs index d89bd22ec9637..69ea473e05461 100644 --- a/src/librustc_data_structures/obligation_forest/node_index.rs +++ b/src/librustc_data_structures/obligation_forest/node_index.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::num::NonZeroU32; use std::u32; diff --git a/src/librustc_data_structures/obligation_forest/test.rs b/src/librustc_data_structures/obligation_forest/test.rs index 2a418973fbda2..27d4bf4959e63 100644 --- a/src/librustc_data_structures/obligation_forest/test.rs +++ b/src/librustc_data_structures/obligation_forest/test.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![cfg(test)] use super::{Error, DoCompleted, ObligationForest, ObligationProcessor, Outcome, ProcessResult}; diff --git a/src/librustc_data_structures/owning_ref/mod.rs b/src/librustc_data_structures/owning_ref/mod.rs index 0b126e5c572ed..236559dcd7c10 100644 --- a/src/librustc_data_structures/owning_ref/mod.rs +++ b/src/librustc_data_structures/owning_ref/mod.rs @@ -286,7 +286,7 @@ impl Erased for T {} pub unsafe trait IntoErased<'a> { /// Owner with the dereference type substituted to `Erased`. type Erased; - /// Perform the type erasure. + /// Performs the type erasure. fn into_erased(self) -> Self::Erased; } @@ -296,7 +296,7 @@ pub unsafe trait IntoErased<'a> { pub unsafe trait IntoErasedSend<'a> { /// Owner with the dereference type substituted to `Erased + Send`. type Erased: Send; - /// Perform the type erasure. + /// Performs the type erasure. fn into_erased_send(self) -> Self::Erased; } @@ -306,7 +306,7 @@ pub unsafe trait IntoErasedSend<'a> { pub unsafe trait IntoErasedSendSync<'a> { /// Owner with the dereference type substituted to `Erased + Send + Sync`. type Erased: Send + Sync; - /// Perform the type erasure. + /// Performs the type erasure. fn into_erased_send_sync(self) -> Self::Erased; } @@ -844,7 +844,7 @@ pub trait ToHandleMut { impl OwningHandle where O: StableAddress, O::Target: ToHandle, H: Deref, { - /// Create a new `OwningHandle` for a type that implements `ToHandle`. For types + /// Creates a new `OwningHandle` for a type that implements `ToHandle`. For types /// that don't implement `ToHandle`, callers may invoke `new_with_fn`, which accepts /// a callback to perform the conversion. pub fn new(o: O) -> Self { @@ -855,7 +855,7 @@ impl OwningHandle impl OwningHandle where O: StableAddress, O::Target: ToHandleMut, H: DerefMut, { - /// Create a new mutable `OwningHandle` for a type that implements `ToHandleMut`. + /// Creates a new mutable `OwningHandle` for a type that implements `ToHandleMut`. pub fn new_mut(o: O) -> Self { OwningHandle::new_with_fn(o, |x| unsafe { O::Target::to_handle_mut(x) }) } @@ -864,7 +864,7 @@ impl OwningHandle impl OwningHandle where O: StableAddress, H: Deref, { - /// Create a new OwningHandle. The provided callback will be invoked with + /// Creates a new OwningHandle. The provided callback will be invoked with /// a pointer to the object owned by `o`, and the returned value is stored /// as the object to which this `OwningHandle` will forward `Deref` and /// `DerefMut`. @@ -882,7 +882,7 @@ impl OwningHandle } } - /// Create a new OwningHandle. The provided callback will be invoked with + /// Creates a new OwningHandle. The provided callback will be invoked with /// a pointer to the object owned by `o`, and the returned value is stored /// as the object to which this `OwningHandle` will forward `Deref` and /// `DerefMut`. @@ -1002,7 +1002,7 @@ impl Debug for OwningRef where O: Debug, T: Debug, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "OwningRef {{ owner: {:?}, reference: {:?} }}", self.owner(), @@ -1014,7 +1014,7 @@ impl Debug for OwningRefMut where O: Debug, T: Debug, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "OwningRefMut {{ owner: {:?}, reference: {:?} }}", self.owner(), @@ -1047,7 +1047,7 @@ unsafe impl Sync for OwningRefMut where O: Sync, for<'a> (&'a mut T): Sync {} impl Debug for dyn Erased { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "",) } } diff --git a/src/librustc_data_structures/ptr_key.rs b/src/librustc_data_structures/ptr_key.rs index 6835dab38df0a..bf3ae2d7af58f 100644 --- a/src/librustc_data_structures/ptr_key.rs +++ b/src/librustc_data_structures/ptr_key.rs @@ -1,20 +1,10 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::{hash, ptr}; use std::ops::Deref; /// A wrapper around reference that compares and hashes like a pointer. /// Can be used as a key in sets/maps indexed by pointers to avoid `unsafe`. #[derive(Debug)] -pub struct PtrKey<'a, T: 'a>(pub &'a T); +pub struct PtrKey<'a, T>(pub &'a T); impl<'a, T> Clone for PtrKey<'a, T> { fn clone(&self) -> Self { *self } diff --git a/src/librustc_data_structures/sip128.rs b/src/librustc_data_structures/sip128.rs index 1f0b0d9cbfb0b..06f157f972932 100644 --- a/src/librustc_data_structures/sip128.rs +++ b/src/librustc_data_structures/sip128.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This is a copy of `core::hash::sip` adapted to providing 128 bit hashes. use std::cmp; @@ -54,7 +44,7 @@ macro_rules! compress { }); } -/// Load an integer of the desired type from a byte stream, in LE order. Uses +/// Loads an integer of the desired type from a byte stream, in LE order. Uses /// `copy_nonoverlapping` to let the compiler generate the most efficient way /// to load it from a possibly unaligned address. /// @@ -71,7 +61,7 @@ macro_rules! load_int_le { }); } -/// Load an u64 using up to 7 bytes of a byte slice. +/// Loads an u64 using up to 7 bytes of a byte slice. /// /// Unsafe because: unchecked indexing at start..start+len #[inline] diff --git a/src/librustc_data_structures/small_c_str.rs b/src/librustc_data_structures/small_c_str.rs index 08794fbec8dc5..bde7911267f77 100644 --- a/src/librustc_data_structures/small_c_str.rs +++ b/src/librustc_data_structures/small_c_str.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::ffi; use std::ops::Deref; diff --git a/src/librustc_data_structures/snapshot_map/mod.rs b/src/librustc_data_structures/snapshot_map/mod.rs index c256506a19d42..91d6e29237002 100644 --- a/src/librustc_data_structures/snapshot_map/mod.rs +++ b/src/librustc_data_structures/snapshot_map/mod.rs @@ -1,14 +1,4 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use fx::FxHashMap; +use crate::fx::FxHashMap; use std::hash::Hash; use std::ops; use std::mem; diff --git a/src/librustc_data_structures/snapshot_map/test.rs b/src/librustc_data_structures/snapshot_map/test.rs index b4ecb85fc4302..72ca53c2be9ed 100644 --- a/src/librustc_data_structures/snapshot_map/test.rs +++ b/src/librustc_data_structures/snapshot_map/test.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::SnapshotMap; #[test] diff --git a/src/librustc_data_structures/sorted_map.rs b/src/librustc_data_structures/sorted_map.rs index 3bd3d11660797..1f674c1c664e4 100644 --- a/src/librustc_data_structures/sorted_map.rs +++ b/src/librustc_data_structures/sorted_map.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::borrow::Borrow; use std::cmp::Ordering; use std::iter::FromIterator; @@ -121,7 +111,7 @@ impl SortedMap { /// Iterate over elements, sorted by key #[inline] - pub fn iter(&self) -> ::std::slice::Iter<(K, V)> { + pub fn iter(&self) -> ::std::slice::Iter<'_, (K, V)> { self.data.iter() } diff --git a/src/librustc_data_structures/stable_hasher.rs b/src/librustc_data_structures/stable_hasher.rs index c85d771a18106..19343a9250df3 100644 --- a/src/librustc_data_structures/stable_hasher.rs +++ b/src/librustc_data_structures/stable_hasher.rs @@ -1,17 +1,9 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::hash::{Hash, Hasher, BuildHasher}; use std::marker::PhantomData; use std::mem; -use sip128::SipHasher128; +use crate::sip128::SipHasher128; +use crate::indexed_vec; +use crate::bit_set; /// When hashing something that ends up affecting properties like symbol names, /// we want these symbol names to be calculated independently of other factors @@ -27,7 +19,7 @@ pub struct StableHasher { } impl ::std::fmt::Debug for StableHasher { - fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { write!(f, "{:?}", self.state) } } @@ -443,7 +435,7 @@ impl HashStable for ::std::mem::Discriminant { } } -impl HashStable for ::indexed_vec::IndexVec +impl HashStable for indexed_vec::IndexVec where T: HashStable, { fn hash_stable(&self, @@ -457,7 +449,7 @@ impl HashStable for ::indexed_vec::IndexVec< } -impl HashStable for ::bit_set::BitSet +impl HashStable for bit_set::BitSet { fn hash_stable(&self, ctx: &mut CTX, diff --git a/src/librustc_data_structures/svh.rs b/src/librustc_data_structures/svh.rs index 3d17824608cc8..df4f61768375e 100644 --- a/src/librustc_data_structures/svh.rs +++ b/src/librustc_data_structures/svh.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Calculation and management of a Strict Version Hash for crates //! //! The SVH is used for incremental compilation to track when HIR @@ -19,7 +9,7 @@ use std::fmt; use std::hash::{Hash, Hasher}; use serialize::{Encodable, Decodable, Encoder, Decoder}; -use stable_hasher; +use crate::stable_hasher; #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct Svh { @@ -27,7 +17,7 @@ pub struct Svh { } impl Svh { - /// Create a new `Svh` given the hash. If you actually want to + /// Creates a new `Svh` given the hash. If you actually want to /// compute the SVH from some HIR, you want the `calculate_svh` /// function found in `librustc_incremental`. pub fn new(hash: u64) -> Svh { @@ -50,7 +40,7 @@ impl Hash for Svh { } impl fmt::Display for Svh { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.pad(&self.to_string()) } } diff --git a/src/librustc_data_structures/sync.rs b/src/librustc_data_structures/sync.rs index 6a4012c81984d..73247c1469efd 100644 --- a/src/librustc_data_structures/sync.rs +++ b/src/librustc_data_structures/sync.rs @@ -1,45 +1,27 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This module defines types which are thread safe if cfg!(parallel_queries) is true. +//! This module defines types which are thread safe if cfg!(parallel_compiler) is true. //! //! `Lrc` is an alias of either Rc or Arc. //! //! `Lock` is a mutex. -//! It internally uses `parking_lot::Mutex` if cfg!(parallel_queries) is true, +//! It internally uses `parking_lot::Mutex` if cfg!(parallel_compiler) is true, //! `RefCell` otherwise. //! //! `RwLock` is a read-write lock. -//! It internally uses `parking_lot::RwLock` if cfg!(parallel_queries) is true, +//! It internally uses `parking_lot::RwLock` if cfg!(parallel_compiler) is true, //! `RefCell` otherwise. //! -//! `LockCell` is a thread safe version of `Cell`, with `set` and `get` operations. -//! It can never deadlock. It uses `Cell` when -//! cfg!(parallel_queries) is false, otherwise it is a `Lock`. +//! `MTLock` is a mutex which disappears if cfg!(parallel_compiler) is false. //! -//! `MTLock` is a mutex which disappears if cfg!(parallel_queries) is false. -//! -//! `MTRef` is a immutable reference if cfg!(parallel_queries), and an mutable reference otherwise. +//! `MTRef` is a immutable reference if cfg!(parallel_compiler), and an mutable reference otherwise. //! //! `rustc_erase_owner!` erases a OwningRef owner into Erased or Erased + Send + Sync -//! depending on the value of cfg!(parallel_queries). +//! depending on the value of cfg!(parallel_compiler). use std::collections::HashMap; use std::hash::{Hash, BuildHasher}; -use std::cmp::Ordering; use std::marker::PhantomData; -use std::fmt::Debug; -use std::fmt::Formatter; -use std::fmt; use std::ops::{Deref, DerefMut}; -use owning_ref::{Erased, OwningRef}; +use crate::owning_ref::{Erased, OwningRef}; pub fn serial_join(oper_a: A, oper_b: B) -> (RA, RB) where A: FnOnce() -> RA, @@ -64,8 +46,11 @@ pub fn serial_scope(f: F) -> R f(&SerialScope) } +pub use std::sync::atomic::Ordering::SeqCst; +pub use std::sync::atomic::Ordering; + cfg_if! { - if #[cfg(not(parallel_queries))] { + if #[cfg(not(parallel_compiler))] { pub auto trait Send {} pub auto trait Sync {} @@ -79,15 +64,117 @@ cfg_if! { } } + use std::ops::Add; + use std::panic::{resume_unwind, catch_unwind, AssertUnwindSafe}; + + #[derive(Debug)] + pub struct Atomic(Cell); + + impl Atomic { + #[inline] + pub fn new(v: T) -> Self { + Atomic(Cell::new(v)) + } + } + + impl Atomic { + pub fn into_inner(self) -> T { + self.0.into_inner() + } + + #[inline] + pub fn load(&self, _: Ordering) -> T { + self.0.get() + } + + #[inline] + pub fn store(&self, val: T, _: Ordering) { + self.0.set(val) + } + + pub fn swap(&self, val: T, _: Ordering) -> T { + self.0.replace(val) + } + + pub fn compare_exchange(&self, + current: T, + new: T, + _: Ordering, + _: Ordering) + -> Result { + let read = self.0.get(); + if read == current { + self.0.set(new); + Ok(read) + } else { + Err(read) + } + } + } + + impl + Copy> Atomic { + pub fn fetch_add(&self, val: T, _: Ordering) -> T { + let old = self.0.get(); + self.0.set(old + val); + old + } + } + + pub type AtomicUsize = Atomic; + pub type AtomicBool = Atomic; + pub type AtomicU32 = Atomic; + pub type AtomicU64 = Atomic; + pub use self::serial_join as join; pub use self::serial_scope as scope; + #[macro_export] + macro_rules! parallel { + ($($blocks:tt),*) => { + // We catch panics here ensuring that all the blocks execute. + // This makes behavior consistent with the parallel compiler. + let mut panic = None; + $( + if let Err(p) = ::std::panic::catch_unwind( + ::std::panic::AssertUnwindSafe(|| $blocks) + ) { + if panic.is_none() { + panic = Some(p); + } + } + )* + if let Some(panic) = panic { + ::std::panic::resume_unwind(panic); + } + } + } + pub use std::iter::Iterator as ParallelIterator; pub fn par_iter(t: T) -> T::IntoIter { t.into_iter() } + pub fn par_for_each_in( + t: T, + for_each: + impl Fn(<::IntoIter as Iterator>::Item) + Sync + Send + ) { + // We catch panics here ensuring that all the loop iterations execute. + // This makes behavior consistent with the parallel compiler. + let mut panic = None; + t.into_iter().for_each(|i| { + if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) { + if panic.is_none() { + panic = Some(p); + } + } + }); + if let Some(panic) = panic { + resume_unwind(panic); + } + } + pub type MetadataRef = OwningRef, [u8]>; pub use std::rc::Rc as Lrc; @@ -170,47 +257,6 @@ cfg_if! { MTLock(self.0.clone()) } } - - pub struct LockCell(Cell); - - impl LockCell { - #[inline(always)] - pub fn new(inner: T) -> Self { - LockCell(Cell::new(inner)) - } - - #[inline(always)] - pub fn into_inner(self) -> T { - self.0.into_inner() - } - - #[inline(always)] - pub fn set(&self, new_inner: T) { - self.0.set(new_inner); - } - - #[inline(always)] - pub fn get(&self) -> T where T: Copy { - self.0.get() - } - - #[inline(always)] - pub fn set_mut(&mut self, new_inner: T) { - self.0.set(new_inner); - } - - #[inline(always)] - pub fn get_mut(&mut self) -> T where T: Copy { - self.0.get() - } - } - - impl LockCell> { - #[inline(always)] - pub fn take(&self) -> Option { - unsafe { (*self.0.as_ptr()).take() } - } - } } else { pub use std::marker::Send as Send; pub use std::marker::Sync as Sync; @@ -223,6 +269,8 @@ cfg_if! { pub use parking_lot::MutexGuard as LockGuard; pub use parking_lot::MappedMutexGuard as MappedLockGuard; + pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64}; + pub use std::sync::Arc as Lrc; pub use std::sync::Weak as Weak; @@ -248,12 +296,12 @@ cfg_if! { } #[inline(always)] - pub fn lock(&self) -> LockGuard { + pub fn lock(&self) -> LockGuard<'_, T> { self.0.lock() } #[inline(always)] - pub fn lock_mut(&self) -> LockGuard { + pub fn lock_mut(&self) -> LockGuard<'_, T> { self.lock() } } @@ -265,6 +313,29 @@ cfg_if! { use std::thread; pub use rayon::{join, scope}; + /// Runs a list of blocks in parallel. The first block is executed immediately on + /// the current thread. Use that for the longest running block. + #[macro_export] + macro_rules! parallel { + (impl $fblock:tt [$($c:tt,)*] [$block:tt $(, $rest:tt)*]) => { + parallel!(impl $fblock [$block, $($c,)*] [$($rest),*]) + }; + (impl $fblock:tt [$($blocks:tt,)*] []) => { + ::rustc_data_structures::sync::scope(|s| { + $( + s.spawn(|_| $blocks); + )* + $fblock; + }) + }; + ($fblock:tt, $($blocks:tt),*) => { + // Reverse the order of the later blocks since Rayon executes them in reverse order + // when using a single thread. This ensures the execution order matches that + // of a single threaded rustc + parallel!(impl $fblock [] [$($blocks),*]); + }; + } + pub use rayon_core::WorkerLocal; pub use rayon::iter::ParallelIterator; @@ -274,6 +345,15 @@ cfg_if! { t.into_par_iter() } + pub fn par_for_each_in( + t: T, + for_each: impl Fn( + <::Iter as ParallelIterator>::Item + ) + Sync + Send + ) { + t.into_par_iter().for_each(for_each) + } + pub type MetadataRef = OwningRef, [u8]>; /// This makes locks panic if they are already held. @@ -288,51 +368,11 @@ cfg_if! { v.erase_send_sync_owner() }} } - - pub struct LockCell(Lock); - - impl LockCell { - #[inline(always)] - pub fn new(inner: T) -> Self { - LockCell(Lock::new(inner)) - } - - #[inline(always)] - pub fn into_inner(self) -> T { - self.0.into_inner() - } - - #[inline(always)] - pub fn set(&self, new_inner: T) { - *self.0.lock() = new_inner; - } - - #[inline(always)] - pub fn get(&self) -> T where T: Copy { - *self.0.lock() - } - - #[inline(always)] - pub fn set_mut(&mut self, new_inner: T) { - *self.0.get_mut() = new_inner; - } - - #[inline(always)] - pub fn get_mut(&mut self) -> T where T: Copy { - *self.0.get_mut() - } - } - - impl LockCell> { - #[inline(always)] - pub fn take(&self) -> Option { - self.0.lock().take() - } - } } } pub fn assert_sync() {} +pub fn assert_send() {} pub fn assert_send_val(_t: &T) {} pub fn assert_send_sync_val(_t: &T) {} @@ -476,65 +516,6 @@ impl Once { } } -impl Debug for LockCell { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - f.debug_struct("LockCell") - .field("value", &self.get()) - .finish() - } -} - -impl Default for LockCell { - /// Creates a `LockCell`, with the `Default` value for T. - #[inline] - fn default() -> LockCell { - LockCell::new(Default::default()) - } -} - -impl PartialEq for LockCell { - #[inline] - fn eq(&self, other: &LockCell) -> bool { - self.get() == other.get() - } -} - -impl Eq for LockCell {} - -impl PartialOrd for LockCell { - #[inline] - fn partial_cmp(&self, other: &LockCell) -> Option { - self.get().partial_cmp(&other.get()) - } - - #[inline] - fn lt(&self, other: &LockCell) -> bool { - self.get() < other.get() - } - - #[inline] - fn le(&self, other: &LockCell) -> bool { - self.get() <= other.get() - } - - #[inline] - fn gt(&self, other: &LockCell) -> bool { - self.get() > other.get() - } - - #[inline] - fn ge(&self, other: &LockCell) -> bool { - self.get() >= other.get() - } -} - -impl Ord for LockCell { - #[inline] - fn cmp(&self, other: &LockCell) -> Ordering { - self.get().cmp(&other.get()) - } -} - #[derive(Debug)] pub struct Lock(InnerLock); @@ -554,21 +535,21 @@ impl Lock { self.0.get_mut() } - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] #[inline(always)] - pub fn try_lock(&self) -> Option> { + pub fn try_lock(&self) -> Option> { self.0.try_lock() } - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] #[inline(always)] - pub fn try_lock(&self) -> Option> { + pub fn try_lock(&self) -> Option> { self.0.try_borrow_mut().ok() } - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] #[inline(always)] - pub fn lock(&self) -> LockGuard { + pub fn lock(&self) -> LockGuard<'_, T> { if ERROR_CHECKING { self.0.try_lock().expect("lock was already held") } else { @@ -576,9 +557,9 @@ impl Lock { } } - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] #[inline(always)] - pub fn lock(&self) -> LockGuard { + pub fn lock(&self) -> LockGuard<'_, T> { self.0.borrow_mut() } @@ -588,12 +569,12 @@ impl Lock { } #[inline(always)] - pub fn borrow(&self) -> LockGuard { + pub fn borrow(&self) -> LockGuard<'_, T> { self.lock() } #[inline(always)] - pub fn borrow_mut(&self) -> LockGuard { + pub fn borrow_mut(&self) -> LockGuard<'_, T> { self.lock() } } @@ -632,15 +613,15 @@ impl RwLock { self.0.get_mut() } - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] #[inline(always)] - pub fn read(&self) -> ReadGuard { + pub fn read(&self) -> ReadGuard<'_, T> { self.0.borrow() } - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] #[inline(always)] - pub fn read(&self) -> ReadGuard { + pub fn read(&self) -> ReadGuard<'_, T> { if ERROR_CHECKING { self.0.try_read().expect("lock was already held") } else { @@ -653,27 +634,27 @@ impl RwLock { f(&*self.read()) } - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] #[inline(always)] - pub fn try_write(&self) -> Result, ()> { + pub fn try_write(&self) -> Result, ()> { self.0.try_borrow_mut().map_err(|_| ()) } - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] #[inline(always)] - pub fn try_write(&self) -> Result, ()> { + pub fn try_write(&self) -> Result, ()> { self.0.try_write().ok_or(()) } - #[cfg(not(parallel_queries))] + #[cfg(not(parallel_compiler))] #[inline(always)] - pub fn write(&self) -> WriteGuard { + pub fn write(&self) -> WriteGuard<'_, T> { self.0.borrow_mut() } - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] #[inline(always)] - pub fn write(&self) -> WriteGuard { + pub fn write(&self) -> WriteGuard<'_, T> { if ERROR_CHECKING { self.0.try_write().expect("lock was already held") } else { @@ -687,12 +668,12 @@ impl RwLock { } #[inline(always)] - pub fn borrow(&self) -> ReadGuard { + pub fn borrow(&self) -> ReadGuard<'_, T> { self.read() } #[inline(always)] - pub fn borrow_mut(&self) -> WriteGuard { + pub fn borrow_mut(&self) -> WriteGuard<'_, T> { self.write() } } @@ -709,27 +690,27 @@ impl Clone for RwLock { /// It will panic if it is used on multiple threads. #[derive(Copy, Clone, Hash, Debug, Eq, PartialEq)] pub struct OneThread { - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] thread: thread::ThreadId, inner: T, } -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] unsafe impl std::marker::Sync for OneThread {} -#[cfg(parallel_queries)] +#[cfg(parallel_compiler)] unsafe impl std::marker::Send for OneThread {} impl OneThread { #[inline(always)] fn check(&self) { - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] assert_eq!(thread::current().id(), self.thread); } #[inline(always)] pub fn new(inner: T) -> Self { OneThread { - #[cfg(parallel_queries)] + #[cfg(parallel_compiler)] thread: thread::current().id(), inner, } diff --git a/src/librustc_data_structures/thin_vec.rs b/src/librustc_data_structures/thin_vec.rs index 5b7ea161b2897..52f23f4893eeb 100644 --- a/src/librustc_data_structures/thin_vec.rs +++ b/src/librustc_data_structures/thin_vec.rs @@ -1,12 +1,4 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +use crate::stable_hasher::{StableHasher, StableHasherResult, HashStable}; /// A vector type optimized for cases where this size is usually 0 (cf. `SmallVector`). /// The `Option>` wrapping allows us to represent a zero sized vector with `None`, @@ -49,6 +41,15 @@ impl ::std::ops::Deref for ThinVec { } } +impl ::std::ops::DerefMut for ThinVec { + fn deref_mut(&mut self) -> &mut [T] { + match *self { + ThinVec(None) => &mut [], + ThinVec(Some(ref mut vec)) => vec, + } + } +} + impl Extend for ThinVec { fn extend>(&mut self, iter: I) { match *self { @@ -57,3 +58,11 @@ impl Extend for ThinVec { } } } + +impl, CTX> HashStable for ThinVec { + fn hash_stable(&self, + hcx: &mut CTX, + hasher: &mut StableHasher) { + (**self).hash_stable(hcx, hasher) + } +} diff --git a/src/librustc_data_structures/tiny_list.rs b/src/librustc_data_structures/tiny_list.rs index 9dbf0ea9f438c..3d74516d9c326 100644 --- a/src/librustc_data_structures/tiny_list.rs +++ b/src/librustc_data_structures/tiny_list.rs @@ -1,14 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - //! A singly-linked list. //! //! Using this data structure only makes sense under very specific @@ -134,7 +123,7 @@ impl Element { mod test { use super::*; extern crate test; - use self::test::Bencher; + use test::Bencher; #[test] fn test_contains_and_insert() { diff --git a/src/librustc_data_structures/transitive_relation.rs b/src/librustc_data_structures/transitive_relation.rs index 250dad8136e14..0974607fabea8 100644 --- a/src/librustc_data_structures/transitive_relation.rs +++ b/src/librustc_data_structures/transitive_relation.rs @@ -1,18 +1,8 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use bit_set::BitMatrix; -use fx::FxHashMap; -use sync::Lock; +use crate::bit_set::BitMatrix; +use crate::fx::FxHashMap; +use crate::stable_hasher::{HashStable, StableHasher, StableHasherResult}; +use crate::sync::Lock; use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; -use stable_hasher::{HashStable, StableHasher, StableHasherResult}; use std::fmt::Debug; use std::hash::Hash; use std::mem; @@ -92,7 +82,7 @@ impl TransitiveRelation { } /// Applies the (partial) function to each edge and returns a new - /// relation. If `f` returns `None` for any end-point, returns + /// relation. If `f` returns `None` for any end-point, returns /// `None`. pub fn maybe_map(&self, mut f: F) -> Option> where F: FnMut(&T) -> Option, @@ -121,7 +111,7 @@ impl TransitiveRelation { } } - /// Check whether `a < target` (transitively) + /// Checks whether `a < target` (transitively) pub fn contains(&self, a: &T, b: &T) -> bool { match (self.index(a), self.index(b)) { (Some(a), Some(b)) => self.with_closure(|closure| closure.contains(a.0, b.0)), @@ -132,7 +122,7 @@ impl TransitiveRelation { /// Thinking of `x R y` as an edge `x -> y` in a graph, this /// returns all things reachable from `a`. /// - /// Really this probably ought to be `impl Iterator`, but + /// Really this probably ought to be `impl Iterator`, but /// I'm too lazy to make that work, and -- given the caching /// strategy -- it'd be a touch tricky anyhow. pub fn reachable_from(&self, a: &T) -> Vec<&T> { @@ -162,20 +152,20 @@ impl TransitiveRelation { /// the query is `postdom_upper_bound(a, b)`: /// /// ```text - /// // returns Some(x), which is also LUB + /// // Returns Some(x), which is also LUB. /// a -> a1 -> x /// ^ /// | /// b -> b1 ---+ /// - /// // returns Some(x), which is not LUB (there is none) - /// // diagonal edges run left-to-right + /// // Returns `Some(x)`, which is not LUB (there is none) + /// // diagonal edges run left-to-right. /// a -> a1 -> x /// \/ ^ /// /\ | /// b -> b1 ---+ /// - /// // returns None + /// // Returns `None`. /// a -> a1 /// b -> b1 /// ``` diff --git a/src/librustc_data_structures/vec_linked_list.rs b/src/librustc_data_structures/vec_linked_list.rs index 390dca6b9055f..c00c707a43542 100644 --- a/src/librustc_data_structures/vec_linked_list.rs +++ b/src/librustc_data_structures/vec_linked_list.rs @@ -1,14 +1,4 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use indexed_vec::{Idx, IndexVec}; +use crate::indexed_vec::{Idx, IndexVec}; pub fn iter( first: Option, diff --git a/src/librustc_data_structures/work_queue.rs b/src/librustc_data_structures/work_queue.rs index eff80a98c9de9..193025aafad20 100644 --- a/src/librustc_data_structures/work_queue.rs +++ b/src/librustc_data_structures/work_queue.rs @@ -1,15 +1,5 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use bit_set::BitSet; -use indexed_vec::Idx; +use crate::bit_set::BitSet; +use crate::indexed_vec::Idx; use std::collections::VecDeque; /// A work queue is a handy data structure for tracking work left to @@ -24,7 +14,7 @@ pub struct WorkQueue { } impl WorkQueue { - /// Create a new work queue with all the elements from (0..len). + /// Creates a new work queue with all the elements from (0..len). #[inline] pub fn with_all(len: usize) -> Self { WorkQueue { @@ -33,7 +23,7 @@ impl WorkQueue { } } - /// Create a new work queue that starts empty, where elements range from (0..len). + /// Creates a new work queue that starts empty, where elements range from (0..len). #[inline] pub fn with_none(len: usize) -> Self { WorkQueue { @@ -64,7 +54,7 @@ impl WorkQueue { } } - /// True if nothing is enqueued. + /// Returns `true` if nothing is enqueued. #[inline] pub fn is_empty(&self) -> bool { self.deque.is_empty() diff --git a/src/librustc_driver/Cargo.toml b/src/librustc_driver/Cargo.toml index 0356cc9bd545f..5432f80a1712c 100644 --- a/src/librustc_driver/Cargo.toml +++ b/src/librustc_driver/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustc_driver" version = "0.0.0" +edition = "2018" [lib] name = "rustc_driver" @@ -13,14 +14,14 @@ arena = { path = "../libarena" } graphviz = { path = "../libgraphviz" } log = "0.4" env_logger = { version = "0.5", default-features = false } -rustc-rayon = "0.1.1" -scoped-tls = { version = "0.1.1", features = ["nightly"] } +rayon = { version = "0.1.2", package = "rustc-rayon" } +scoped-tls = "1.0" rustc = { path = "../librustc" } rustc_allocator = { path = "../librustc_allocator" } rustc_target = { path = "../librustc_target" } rustc_borrowck = { path = "../librustc_borrowck" } rustc_data_structures = { path = "../librustc_data_structures" } -rustc_errors = { path = "../librustc_errors" } +errors = { path = "../librustc_errors", package = "rustc_errors" } rustc_incremental = { path = "../librustc_incremental" } rustc_lint = { path = "../librustc_lint" } rustc_metadata = { path = "../librustc_metadata" } @@ -33,13 +34,9 @@ rustc_save_analysis = { path = "../librustc_save_analysis" } rustc_traits = { path = "../librustc_traits" } rustc_codegen_utils = { path = "../librustc_codegen_utils" } rustc_typeck = { path = "../librustc_typeck" } +rustc_interface = { path = "../librustc_interface" } serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } smallvec = { version = "0.6.7", features = ["union", "may_dangle"] } syntax_ext = { path = "../libsyntax_ext" } syntax_pos = { path = "../libsyntax_pos" } - -[dependencies.jemalloc-sys] -version = '0.1.8' -optional = true -features = ['unprefixed_malloc_on_supported_platforms'] diff --git a/src/librustc_driver/build.rs b/src/librustc_driver/build.rs index 9844f3b557a1f..414d13445f01e 100644 --- a/src/librustc_driver/build.rs +++ b/src/librustc_driver/build.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - fn main() { println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rerun-if-env-changed=CFG_RELEASE"); diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs deleted file mode 100644 index b26d81b9c6729..0000000000000 --- a/src/librustc_driver/driver.rs +++ /dev/null @@ -1,1708 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::dep_graph::DepGraph; -use rustc::hir; -use rustc::hir::lowering::lower_crate; -use rustc::hir::map as hir_map; -use rustc::lint; -use rustc::middle::{self, reachable, resolve_lifetime, stability}; -use rustc::middle::privacy::AccessLevels; -use rustc::ty::{self, AllArenas, Resolutions, TyCtxt}; -use rustc::traits; -use rustc::util::common::{install_panic_hook, time, ErrorReported}; -use rustc::util::profiling::ProfileCategory; -use rustc::session::{CompileResult, CrateDisambiguator, Session}; -use rustc::session::CompileIncomplete; -use rustc::session::config::{self, Input, OutputFilenames, OutputType}; -use rustc::session::search_paths::PathKind; -use rustc_allocator as allocator; -use rustc_borrowck as borrowck; -use rustc_codegen_utils::codegen_backend::CodegenBackend; -use rustc_data_structures::fingerprint::Fingerprint; -use rustc_data_structures::stable_hasher::StableHasher; -use rustc_data_structures::sync::{self, Lrc, Lock}; -use rustc_incremental; -use rustc_metadata::creader::CrateLoader; -use rustc_metadata::cstore::{self, CStore}; -use rustc_mir as mir; -use rustc_passes::{self, ast_validation, hir_stats, loops, rvalue_promotion}; -use rustc_plugin as plugin; -use rustc_plugin::registry::Registry; -use rustc_privacy; -use rustc_resolve::{MakeGlobMap, Resolver, ResolverArenas}; -use rustc_traits; -use rustc_typeck as typeck; -use syntax::{self, ast, attr, diagnostics, visit}; -use syntax::early_buffered_lints::BufferedEarlyLint; -use syntax::ext::base::ExtCtxt; -use syntax::fold::Folder; -use syntax::parse::{self, PResult}; -use syntax::util::node_count::NodeCounter; -use syntax::util::lev_distance::find_best_match_for_name; -use syntax::symbol::Symbol; -use syntax_pos::{FileName, hygiene}; -use syntax_ext; - -use serialize::json; - -use std::any::Any; -use std::env; -use std::ffi::OsString; -use std::fs; -use std::io::{self, Write}; -use std::iter; -use std::path::{Path, PathBuf}; -use std::sync::mpsc; - -use pretty::ReplaceBodyWithLoop; -use proc_macro_decls; -use profile; -use super::Compilation; - -#[cfg(not(parallel_queries))] -pub fn spawn_thread_pool R + sync::Send, R: sync::Send>( - opts: config::Options, - f: F -) -> R { - ty::tls::GCX_PTR.set(&Lock::new(0), || { - f(opts) - }) -} - -#[cfg(parallel_queries)] -pub fn spawn_thread_pool R + sync::Send, R: sync::Send>( - opts: config::Options, - f: F -) -> R { - use syntax; - use syntax_pos; - use rayon::{ThreadPoolBuilder, ThreadPool}; - - let gcx_ptr = &Lock::new(0); - - let config = ThreadPoolBuilder::new() - .num_threads(Session::query_threads_from_opts(&opts)) - .deadlock_handler(|| unsafe { ty::query::handle_deadlock() }) - .stack_size(::STACK_SIZE); - - let with_pool = move |pool: &ThreadPool| { - pool.install(move || f(opts)) - }; - - syntax::GLOBALS.with(|syntax_globals| { - syntax_pos::GLOBALS.with(|syntax_pos_globals| { - // The main handler run for each Rayon worker thread and sets up - // the thread local rustc uses. syntax_globals and syntax_pos_globals are - // captured and set on the new threads. ty::tls::with_thread_locals sets up - // thread local callbacks from libsyntax - let main_handler = move |worker: &mut dyn FnMut()| { - syntax::GLOBALS.set(syntax_globals, || { - syntax_pos::GLOBALS.set(syntax_pos_globals, || { - ty::tls::with_thread_locals(|| { - ty::tls::GCX_PTR.set(gcx_ptr, || { - worker() - }) - }) - }) - }) - }; - - ThreadPool::scoped_pool(config, main_handler, with_pool).unwrap() - }) - }) -} - -pub fn compile_input( - codegen_backend: Box, - sess: &Session, - cstore: &CStore, - input_path: &Option, - input: &Input, - outdir: &Option, - output: &Option, - addl_plugins: Option>, - control: &CompileController, -) -> CompileResult { - macro_rules! controller_entry_point { - ($point: ident, $tsess: expr, $make_state: expr, $phase_result: expr) => {{ - let state = &mut $make_state; - let phase_result: &CompileResult = &$phase_result; - if phase_result.is_ok() || control.$point.run_callback_on_error { - (control.$point.callback)(state); - } - - if control.$point.stop == Compilation::Stop { - // FIXME: shouldn't this return Err(CompileIncomplete::Stopped) - // if there are no errors? - return $tsess.compile_status(); - } - }} - } - - if sess.profile_queries() { - profile::begin(sess); - } - - // We need nested scopes here, because the intermediate results can keep - // large chunks of memory alive and we want to free them as soon as - // possible to keep the peak memory usage low - let (outputs, ongoing_codegen, dep_graph) = { - let krate = match phase_1_parse_input(control, sess, input) { - Ok(krate) => krate, - Err(mut parse_error) => { - parse_error.emit(); - return Err(CompileIncomplete::Errored(ErrorReported)); - } - }; - - let (krate, registry) = { - let mut compile_state = - CompileState::state_after_parse(input, sess, outdir, output, krate, &cstore); - controller_entry_point!(after_parse, sess, compile_state, Ok(())); - - (compile_state.krate.unwrap(), compile_state.registry) - }; - - let outputs = build_output_filenames(input, outdir, output, &krate.attrs, sess); - let crate_name = - ::rustc_codegen_utils::link::find_crate_name(Some(sess), &krate.attrs, input); - install_panic_hook(); - - let ExpansionResult { - expanded_crate, - defs, - analysis, - resolutions, - mut hir_forest, - } = { - phase_2_configure_and_expand( - sess, - &cstore, - krate, - registry, - &crate_name, - addl_plugins, - control.make_glob_map, - |expanded_crate| { - let mut state = CompileState::state_after_expand( - input, - sess, - outdir, - output, - &cstore, - expanded_crate, - &crate_name, - ); - controller_entry_point!(after_expand, sess, state, Ok(())); - Ok(()) - }, - )? - }; - - let output_paths = generated_output_paths(sess, &outputs, output.is_some(), &crate_name); - - // Ensure the source file isn't accidentally overwritten during compilation. - if let Some(ref input_path) = *input_path { - if sess.opts.will_create_output_file() { - if output_contains_path(&output_paths, input_path) { - sess.err(&format!( - "the input file \"{}\" would be overwritten by the generated \ - executable", - input_path.display() - )); - return Err(CompileIncomplete::Stopped); - } - if let Some(dir_path) = output_conflicts_with_dir(&output_paths) { - sess.err(&format!( - "the generated executable for the input file \"{}\" conflicts with the \ - existing directory \"{}\"", - input_path.display(), - dir_path.display() - )); - return Err(CompileIncomplete::Stopped); - } - } - } - - write_out_deps(sess, &outputs, &output_paths); - if sess.opts.output_types.contains_key(&OutputType::DepInfo) - && sess.opts.output_types.len() == 1 - { - return Ok(()); - } - - if let &Some(ref dir) = outdir { - if fs::create_dir_all(dir).is_err() { - sess.err("failed to find or create the directory specified by --out-dir"); - return Err(CompileIncomplete::Stopped); - } - } - - let arenas = AllArenas::new(); - - // Construct the HIR map - let hir_map = time(sess, "indexing hir", || { - hir_map::map_crate(sess, cstore, &mut hir_forest, &defs) - }); - - { - hir_map.dep_graph.assert_ignored(); - controller_entry_point!( - after_hir_lowering, - sess, - CompileState::state_after_hir_lowering( - input, - sess, - outdir, - output, - &arenas, - &cstore, - &hir_map, - &analysis, - &resolutions, - &expanded_crate, - &hir_map.krate(), - &outputs, - &crate_name - ), - Ok(()) - ); - } - - let opt_crate = if control.keep_ast { - Some(&expanded_crate) - } else { - drop(expanded_crate); - None - }; - - phase_3_run_analysis_passes( - &*codegen_backend, - control, - sess, - cstore, - hir_map, - analysis, - resolutions, - &arenas, - &crate_name, - &outputs, - |tcx, analysis, rx, result| { - { - // Eventually, we will want to track plugins. - tcx.dep_graph.with_ignore(|| { - let mut state = CompileState::state_after_analysis( - input, - sess, - outdir, - output, - opt_crate, - tcx.hir().krate(), - &analysis, - tcx, - &crate_name, - ); - (control.after_analysis.callback)(&mut state); - }); - - if control.after_analysis.stop == Compilation::Stop { - return result.and_then(|_| Err(CompileIncomplete::Stopped)); - } - } - - result?; - - if log_enabled!(::log::Level::Info) { - println!("Pre-codegen"); - tcx.print_debug_stats(); - } - - let ongoing_codegen = phase_4_codegen(&*codegen_backend, tcx, rx); - - if log_enabled!(::log::Level::Info) { - println!("Post-codegen"); - tcx.print_debug_stats(); - } - - if tcx.sess.opts.output_types.contains_key(&OutputType::Mir) { - if let Err(e) = mir::transform::dump_mir::emit_mir(tcx, &outputs) { - sess.err(&format!("could not emit MIR: {}", e)); - sess.abort_if_errors(); - } - } - - Ok((outputs.clone(), ongoing_codegen, tcx.dep_graph.clone())) - }, - )?? - }; - - if sess.opts.debugging_opts.print_type_sizes { - sess.code_stats.borrow().print_type_sizes(); - } - - codegen_backend.join_codegen_and_link(ongoing_codegen, sess, &dep_graph, &outputs)?; - - if sess.opts.debugging_opts.perf_stats { - sess.print_perf_stats(); - } - - if sess.opts.debugging_opts.self_profile { - sess.print_profiler_results(); - } - - if sess.opts.debugging_opts.profile_json { - sess.save_json_results(); - } - - controller_entry_point!( - compilation_done, - sess, - CompileState::state_when_compilation_done(input, sess, outdir, output), - Ok(()) - ); - - Ok(()) -} - -pub fn source_name(input: &Input) -> FileName { - match *input { - Input::File(ref ifile) => ifile.clone().into(), - Input::Str { ref name, .. } => name.clone(), - } -} - -/// CompileController is used to customize compilation, it allows compilation to -/// be stopped and/or to call arbitrary code at various points in compilation. -/// It also allows for various flags to be set to influence what information gets -/// collected during compilation. -/// -/// This is a somewhat higher level controller than a Session - the Session -/// controls what happens in each phase, whereas the CompileController controls -/// whether a phase is run at all and whether other code (from outside the -/// compiler) is run between phases. -/// -/// Note that if compilation is set to stop and a callback is provided for a -/// given entry point, the callback is called before compilation is stopped. -/// -/// Expect more entry points to be added in the future. -pub struct CompileController<'a> { - pub after_parse: PhaseController<'a>, - pub after_expand: PhaseController<'a>, - pub after_hir_lowering: PhaseController<'a>, - pub after_analysis: PhaseController<'a>, - pub compilation_done: PhaseController<'a>, - - // FIXME we probably want to group the below options together and offer a - // better API, rather than this ad-hoc approach. - pub make_glob_map: MakeGlobMap, - // Whether the compiler should keep the ast beyond parsing. - pub keep_ast: bool, - // -Zcontinue-parse-after-error - pub continue_parse_after_error: bool, - - /// Allows overriding default rustc query providers, - /// after `default_provide` has installed them. - pub provide: Box, - /// Same as `provide`, but only for non-local crates, - /// applied after `default_provide_extern`. - pub provide_extern: Box, -} - -impl<'a> CompileController<'a> { - pub fn basic() -> CompileController<'a> { - CompileController { - after_parse: PhaseController::basic(), - after_expand: PhaseController::basic(), - after_hir_lowering: PhaseController::basic(), - after_analysis: PhaseController::basic(), - compilation_done: PhaseController::basic(), - make_glob_map: MakeGlobMap::No, - keep_ast: false, - continue_parse_after_error: false, - provide: box |_| {}, - provide_extern: box |_| {}, - } - } -} - -/// This implementation makes it easier to create a custom driver when you only want to hook -/// into callbacks from `CompileController`. -/// -/// # Example -/// -/// ```no_run -/// # extern crate rustc_driver; -/// # use rustc_driver::driver::CompileController; -/// let mut controller = CompileController::basic(); -/// controller.after_analysis.callback = Box::new(move |_state| {}); -/// rustc_driver::run_compiler(&[], Box::new(controller), None, None); -/// ``` -impl<'a> ::CompilerCalls<'a> for CompileController<'a> { - fn early_callback( - &mut self, - matches: &::getopts::Matches, - sopts: &config::Options, - cfg: &ast::CrateConfig, - descriptions: &::errors::registry::Registry, - output: ::ErrorOutputType, - ) -> Compilation { - ::RustcDefaultCalls.early_callback( - matches, - sopts, - cfg, - descriptions, - output, - ) - } - fn no_input( - &mut self, - matches: &::getopts::Matches, - sopts: &config::Options, - cfg: &ast::CrateConfig, - odir: &Option, - ofile: &Option, - descriptions: &::errors::registry::Registry, - ) -> Option<(Input, Option)> { - ::RustcDefaultCalls.no_input( - matches, - sopts, - cfg, - odir, - ofile, - descriptions, - ) - } - fn late_callback( - &mut self, - codegen_backend: &dyn (::CodegenBackend), - matches: &::getopts::Matches, - sess: &Session, - cstore: &CStore, - input: &Input, - odir: &Option, - ofile: &Option, - ) -> Compilation { - ::RustcDefaultCalls - .late_callback(codegen_backend, matches, sess, cstore, input, odir, ofile) - } - fn build_controller( - self: Box, - _: &Session, - _: &::getopts::Matches - ) -> CompileController<'a> { - *self - } -} - -pub struct PhaseController<'a> { - pub stop: Compilation, - // If true then the compiler will try to run the callback even if the phase - // ends with an error. Note that this is not always possible. - pub run_callback_on_error: bool, - pub callback: Box, -} - -impl<'a> PhaseController<'a> { - pub fn basic() -> PhaseController<'a> { - PhaseController { - stop: Compilation::Continue, - run_callback_on_error: false, - callback: box |_| {}, - } - } -} - -/// State that is passed to a callback. What state is available depends on when -/// during compilation the callback is made. See the various constructor methods -/// (`state_*`) in the impl to see which data is provided for any given entry point. -pub struct CompileState<'a, 'tcx: 'a> { - pub input: &'a Input, - pub session: &'tcx Session, - pub krate: Option, - pub registry: Option>, - pub cstore: Option<&'tcx CStore>, - pub crate_name: Option<&'a str>, - pub output_filenames: Option<&'a OutputFilenames>, - pub out_dir: Option<&'a Path>, - pub out_file: Option<&'a Path>, - pub arenas: Option<&'tcx AllArenas<'tcx>>, - pub expanded_crate: Option<&'a ast::Crate>, - pub hir_crate: Option<&'a hir::Crate>, - pub hir_map: Option<&'a hir_map::Map<'tcx>>, - pub resolutions: Option<&'a Resolutions>, - pub analysis: Option<&'a ty::CrateAnalysis>, - pub tcx: Option>, -} - -impl<'a, 'tcx> CompileState<'a, 'tcx> { - fn empty(input: &'a Input, session: &'tcx Session, out_dir: &'a Option) -> Self { - CompileState { - input, - session, - out_dir: out_dir.as_ref().map(|s| &**s), - out_file: None, - arenas: None, - krate: None, - registry: None, - cstore: None, - crate_name: None, - output_filenames: None, - expanded_crate: None, - hir_crate: None, - hir_map: None, - resolutions: None, - analysis: None, - tcx: None, - } - } - - fn state_after_parse( - input: &'a Input, - session: &'tcx Session, - out_dir: &'a Option, - out_file: &'a Option, - krate: ast::Crate, - cstore: &'tcx CStore, - ) -> Self { - CompileState { - // Initialize the registry before moving `krate` - registry: Some(Registry::new(&session, krate.span)), - krate: Some(krate), - cstore: Some(cstore), - out_file: out_file.as_ref().map(|s| &**s), - ..CompileState::empty(input, session, out_dir) - } - } - - fn state_after_expand( - input: &'a Input, - session: &'tcx Session, - out_dir: &'a Option, - out_file: &'a Option, - cstore: &'tcx CStore, - expanded_crate: &'a ast::Crate, - crate_name: &'a str, - ) -> Self { - CompileState { - crate_name: Some(crate_name), - cstore: Some(cstore), - expanded_crate: Some(expanded_crate), - out_file: out_file.as_ref().map(|s| &**s), - ..CompileState::empty(input, session, out_dir) - } - } - - fn state_after_hir_lowering( - input: &'a Input, - session: &'tcx Session, - out_dir: &'a Option, - out_file: &'a Option, - arenas: &'tcx AllArenas<'tcx>, - cstore: &'tcx CStore, - hir_map: &'a hir_map::Map<'tcx>, - analysis: &'a ty::CrateAnalysis, - resolutions: &'a Resolutions, - krate: &'a ast::Crate, - hir_crate: &'a hir::Crate, - output_filenames: &'a OutputFilenames, - crate_name: &'a str, - ) -> Self { - CompileState { - crate_name: Some(crate_name), - arenas: Some(arenas), - cstore: Some(cstore), - hir_map: Some(hir_map), - analysis: Some(analysis), - resolutions: Some(resolutions), - expanded_crate: Some(krate), - hir_crate: Some(hir_crate), - output_filenames: Some(output_filenames), - out_file: out_file.as_ref().map(|s| &**s), - ..CompileState::empty(input, session, out_dir) - } - } - - fn state_after_analysis( - input: &'a Input, - session: &'tcx Session, - out_dir: &'a Option, - out_file: &'a Option, - krate: Option<&'a ast::Crate>, - hir_crate: &'a hir::Crate, - analysis: &'a ty::CrateAnalysis, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - crate_name: &'a str, - ) -> Self { - CompileState { - analysis: Some(analysis), - tcx: Some(tcx), - expanded_crate: krate, - hir_crate: Some(hir_crate), - crate_name: Some(crate_name), - out_file: out_file.as_ref().map(|s| &**s), - ..CompileState::empty(input, session, out_dir) - } - } - - fn state_when_compilation_done( - input: &'a Input, - session: &'tcx Session, - out_dir: &'a Option, - out_file: &'a Option, - ) -> Self { - CompileState { - out_file: out_file.as_ref().map(|s| &**s), - ..CompileState::empty(input, session, out_dir) - } - } -} - -pub fn phase_1_parse_input<'a>( - control: &CompileController, - sess: &'a Session, - input: &Input, -) -> PResult<'a, ast::Crate> { - sess.diagnostic() - .set_continue_after_error(control.continue_parse_after_error); - hygiene::set_default_edition(sess.edition()); - - if sess.profile_queries() { - profile::begin(sess); - } - - sess.profiler(|p| p.start_activity(ProfileCategory::Parsing)); - let krate = time(sess, "parsing", || match *input { - Input::File(ref file) => parse::parse_crate_from_file(file, &sess.parse_sess), - Input::Str { - ref input, - ref name, - } => parse::parse_crate_from_source_str(name.clone(), input.clone(), &sess.parse_sess), - })?; - sess.profiler(|p| p.end_activity(ProfileCategory::Parsing)); - - sess.diagnostic().set_continue_after_error(true); - - if sess.opts.debugging_opts.ast_json_noexpand { - println!("{}", json::as_json(&krate)); - } - - if sess.opts.debugging_opts.input_stats { - println!( - "Lines of code: {}", - sess.source_map().count_lines() - ); - println!("Pre-expansion node count: {}", count_nodes(&krate)); - } - - if let Some(ref s) = sess.opts.debugging_opts.show_span { - syntax::show_span::run(sess.diagnostic(), s, &krate); - } - - if sess.opts.debugging_opts.hir_stats { - hir_stats::print_ast_stats(&krate, "PRE EXPANSION AST STATS"); - } - - Ok(krate) -} - -fn count_nodes(krate: &ast::Crate) -> usize { - let mut counter = NodeCounter::new(); - visit::walk_crate(&mut counter, krate); - counter.count -} - -// For continuing compilation after a parsed crate has been -// modified - -pub struct ExpansionResult { - pub expanded_crate: ast::Crate, - pub defs: hir_map::Definitions, - pub analysis: ty::CrateAnalysis, - pub resolutions: Resolutions, - pub hir_forest: hir_map::Forest, -} - -pub struct InnerExpansionResult<'a, 'b: 'a> { - pub expanded_crate: ast::Crate, - pub resolver: Resolver<'a, 'b>, - pub hir_forest: hir_map::Forest, -} - -/// Run the "early phases" of the compiler: initial `cfg` processing, -/// loading compiler plugins (including those from `addl_plugins`), -/// syntax expansion, secondary `cfg` expansion, synthesis of a test -/// harness if one is to be provided, injection of a dependency on the -/// standard library and prelude, and name resolution. -/// -/// Returns `None` if we're aborting after handling -W help. -pub fn phase_2_configure_and_expand( - sess: &Session, - cstore: &CStore, - krate: ast::Crate, - registry: Option, - crate_name: &str, - addl_plugins: Option>, - make_glob_map: MakeGlobMap, - after_expand: F, -) -> Result -where - F: FnOnce(&ast::Crate) -> CompileResult, -{ - // Currently, we ignore the name resolution data structures for the purposes of dependency - // tracking. Instead we will run name resolution and include its output in the hash of each - // item, much like we do for macro expansion. In other words, the hash reflects not just - // its contents but the results of name resolution on those contents. Hopefully we'll push - // this back at some point. - let mut crate_loader = CrateLoader::new(sess, &cstore, &crate_name); - let resolver_arenas = Resolver::arenas(); - let result = phase_2_configure_and_expand_inner( - sess, - cstore, - krate, - registry, - crate_name, - addl_plugins, - make_glob_map, - &resolver_arenas, - &mut crate_loader, - after_expand, - ); - match result { - Ok(InnerExpansionResult { - expanded_crate, - resolver, - hir_forest, - }) => Ok(ExpansionResult { - expanded_crate, - defs: resolver.definitions, - hir_forest, - resolutions: Resolutions { - freevars: resolver.freevars, - export_map: resolver.export_map, - trait_map: resolver.trait_map, - maybe_unused_trait_imports: resolver.maybe_unused_trait_imports, - maybe_unused_extern_crates: resolver.maybe_unused_extern_crates, - extern_prelude: resolver.extern_prelude.iter().map(|(ident, entry)| { - (ident.name, entry.introduced_by_item) - }).collect(), - }, - - analysis: ty::CrateAnalysis { - access_levels: Lrc::new(AccessLevels::default()), - name: crate_name.to_string(), - glob_map: if resolver.make_glob_map { - Some(resolver.glob_map) - } else { - None - }, - }, - }), - Err(x) => Err(x), - } -} - -/// Same as phase_2_configure_and_expand, but doesn't let you keep the resolver -/// around -pub fn phase_2_configure_and_expand_inner<'a, 'b: 'a, F>( - sess: &'a Session, - cstore: &'a CStore, - mut krate: ast::Crate, - registry: Option, - crate_name: &str, - addl_plugins: Option>, - make_glob_map: MakeGlobMap, - resolver_arenas: &'a ResolverArenas<'a>, - crate_loader: &'a mut CrateLoader<'b>, - after_expand: F, -) -> Result, CompileIncomplete> -where - F: FnOnce(&ast::Crate) -> CompileResult, -{ - krate = time(sess, "attributes injection", || { - syntax::attr::inject(krate, &sess.parse_sess, &sess.opts.debugging_opts.crate_attr) - }); - - let (mut krate, features) = syntax::config::features( - krate, - &sess.parse_sess, - sess.edition(), - ); - // these need to be set "early" so that expansion sees `quote` if enabled. - sess.init_features(features); - - let crate_types = collect_crate_types(sess, &krate.attrs); - sess.crate_types.set(crate_types); - - let disambiguator = compute_crate_disambiguator(sess); - sess.crate_disambiguator.set(disambiguator); - rustc_incremental::prepare_session_directory(sess, &crate_name, disambiguator); - - if sess.opts.incremental.is_some() { - time(sess, "garbage collect incremental cache directory", || { - if let Err(e) = rustc_incremental::garbage_collect_session_directories(sess) { - warn!( - "Error while trying to garbage collect incremental \ - compilation cache directory: {}", - e - ); - } - }); - } - - // If necessary, compute the dependency graph (in the background). - let future_dep_graph = if sess.opts.build_dep_graph() { - Some(rustc_incremental::load_dep_graph(sess)) - } else { - None - }; - - time(sess, "recursion limit", || { - middle::recursion_limit::update_limits(sess, &krate); - }); - - krate = time(sess, "crate injection", || { - let alt_std_name = sess.opts.alt_std_name.as_ref().map(|s| &**s); - syntax::std_inject::maybe_inject_crates_ref(krate, alt_std_name, sess.edition()) - }); - - let mut addl_plugins = Some(addl_plugins); - let registrars = time(sess, "plugin loading", || { - plugin::load::load_plugins( - sess, - &cstore, - &krate, - crate_name, - addl_plugins.take().unwrap(), - ) - }); - - let mut registry = registry.unwrap_or_else(|| Registry::new(sess, krate.span)); - - time(sess, "plugin registration", || { - if sess.features_untracked().rustc_diagnostic_macros { - registry.register_macro( - "__diagnostic_used", - diagnostics::plugin::expand_diagnostic_used, - ); - registry.register_macro( - "__register_diagnostic", - diagnostics::plugin::expand_register_diagnostic, - ); - registry.register_macro( - "__build_diagnostic_array", - diagnostics::plugin::expand_build_diagnostic_array, - ); - } - - for registrar in registrars { - registry.args_hidden = Some(registrar.args); - (registrar.fun)(&mut registry); - } - }); - - let Registry { - syntax_exts, - early_lint_passes, - late_lint_passes, - lint_groups, - llvm_passes, - attributes, - .. - } = registry; - - sess.track_errors(|| { - let mut ls = sess.lint_store.borrow_mut(); - for pass in early_lint_passes { - ls.register_early_pass(Some(sess), true, pass); - } - for pass in late_lint_passes { - ls.register_late_pass(Some(sess), true, pass); - } - - for (name, (to, deprecated_name)) in lint_groups { - ls.register_group(Some(sess), true, name, deprecated_name, to); - } - - *sess.plugin_llvm_passes.borrow_mut() = llvm_passes; - *sess.plugin_attributes.borrow_mut() = attributes.clone(); - })?; - - // Lint plugins are registered; now we can process command line flags. - if sess.opts.describe_lints { - super::describe_lints(&sess, &sess.lint_store.borrow(), true); - return Err(CompileIncomplete::Stopped); - } - - time(sess, "pre ast expansion lint checks", || { - lint::check_ast_crate(sess, &krate, true) - }); - - let mut resolver = Resolver::new( - sess, - cstore, - &krate, - crate_name, - make_glob_map, - crate_loader, - &resolver_arenas, - ); - syntax_ext::register_builtins(&mut resolver, syntax_exts, sess.features_untracked().quote); - - // Expand all macros - sess.profiler(|p| p.start_activity(ProfileCategory::Expansion)); - krate = time(sess, "expansion", || { - // Windows dlls do not have rpaths, so they don't know how to find their - // dependencies. It's up to us to tell the system where to find all the - // dependent dlls. Note that this uses cfg!(windows) as opposed to - // targ_cfg because syntax extensions are always loaded for the host - // compiler, not for the target. - // - // This is somewhat of an inherently racy operation, however, as - // multiple threads calling this function could possibly continue - // extending PATH far beyond what it should. To solve this for now we - // just don't add any new elements to PATH which are already there - // within PATH. This is basically a targeted fix at #17360 for rustdoc - // which runs rustc in parallel but has been seen (#33844) to cause - // problems with PATH becoming too long. - let mut old_path = OsString::new(); - if cfg!(windows) { - old_path = env::var_os("PATH").unwrap_or(old_path); - let mut new_path = sess.host_filesearch(PathKind::All).search_path_dirs(); - for path in env::split_paths(&old_path) { - if !new_path.contains(&path) { - new_path.push(path); - } - } - env::set_var( - "PATH", - &env::join_paths( - new_path - .iter() - .filter(|p| env::join_paths(iter::once(p)).is_ok()), - ).unwrap(), - ); - } - - // Create the config for macro expansion - let features = sess.features_untracked(); - let cfg = syntax::ext::expand::ExpansionConfig { - features: Some(&features), - recursion_limit: *sess.recursion_limit.get(), - trace_mac: sess.opts.debugging_opts.trace_macros, - should_test: sess.opts.test, - ..syntax::ext::expand::ExpansionConfig::default(crate_name.to_string()) - }; - - let mut ecx = ExtCtxt::new(&sess.parse_sess, cfg, &mut resolver); - let err_count = ecx.parse_sess.span_diagnostic.err_count(); - - // Expand macros now! - let krate = time(sess, "expand crate", || { - ecx.monotonic_expander().expand_crate(krate) - }); - - // The rest is error reporting - - time(sess, "check unused macros", || { - ecx.check_unused_macros(); - }); - - let mut missing_fragment_specifiers: Vec<_> = ecx.parse_sess - .missing_fragment_specifiers - .borrow() - .iter() - .cloned() - .collect(); - missing_fragment_specifiers.sort(); - - for span in missing_fragment_specifiers { - let lint = lint::builtin::MISSING_FRAGMENT_SPECIFIER; - let msg = "missing fragment specifier"; - sess.buffer_lint(lint, ast::CRATE_NODE_ID, span, msg); - } - if ecx.parse_sess.span_diagnostic.err_count() - ecx.resolve_err_count > err_count { - ecx.parse_sess.span_diagnostic.abort_if_errors(); - } - if cfg!(windows) { - env::set_var("PATH", &old_path); - } - krate - }); - sess.profiler(|p| p.end_activity(ProfileCategory::Expansion)); - - krate = time(sess, "maybe building test harness", || { - syntax::test::modify_for_testing( - &sess.parse_sess, - &mut resolver, - sess.opts.test, - krate, - sess.diagnostic(), - &sess.features_untracked(), - ) - }); - - // If we're actually rustdoc then there's no need to actually compile - // anything, so switch everything to just looping - if sess.opts.actually_rustdoc { - krate = ReplaceBodyWithLoop::new(sess).fold_crate(krate); - } - - // If we're in rustdoc we're always compiling as an rlib, but that'll trip a - // bunch of checks in the `modify` function below. For now just skip this - // step entirely if we're rustdoc as it's not too useful anyway. - if !sess.opts.actually_rustdoc { - krate = time(sess, "maybe creating a macro crate", || { - let crate_types = sess.crate_types.borrow(); - let num_crate_types = crate_types.len(); - let is_proc_macro_crate = crate_types.contains(&config::CrateType::ProcMacro); - let is_test_crate = sess.opts.test; - syntax_ext::proc_macro_decls::modify( - &sess.parse_sess, - &mut resolver, - krate, - is_proc_macro_crate, - is_test_crate, - num_crate_types, - sess.diagnostic(), - ) - }); - } - - // Expand global allocators, which are treated as an in-tree proc macro - krate = time(sess, "creating allocators", || { - allocator::expand::modify( - &sess.parse_sess, - &mut resolver, - krate, - crate_name.to_string(), - sess.diagnostic(), - ) - }); - - // Add all buffered lints from the `ParseSess` to the `Session`. - sess.parse_sess.buffered_lints.with_lock(|buffered_lints| { - info!("{} parse sess buffered_lints", buffered_lints.len()); - for BufferedEarlyLint{id, span, msg, lint_id} in buffered_lints.drain(..) { - let lint = lint::Lint::from_parser_lint_id(lint_id); - sess.buffer_lint(lint, id, span, &msg); - } - }); - - // Done with macro expansion! - - after_expand(&krate)?; - - if sess.opts.debugging_opts.input_stats { - println!("Post-expansion node count: {}", count_nodes(&krate)); - } - - if sess.opts.debugging_opts.hir_stats { - hir_stats::print_ast_stats(&krate, "POST EXPANSION AST STATS"); - } - - if sess.opts.debugging_opts.ast_json { - println!("{}", json::as_json(&krate)); - } - - time(sess, "AST validation", || { - ast_validation::check_crate(sess, &krate) - }); - - time(sess, "name resolution", || -> CompileResult { - resolver.resolve_crate(&krate); - Ok(()) - })?; - - // Needs to go *after* expansion to be able to check the results of macro expansion. - time(sess, "complete gated feature checking", || { - sess.track_errors(|| { - syntax::feature_gate::check_crate( - &krate, - &sess.parse_sess, - &sess.features_untracked(), - &attributes, - sess.opts.unstable_features, - ); - }) - })?; - - // Unresolved macros might be due to mistyped `#[macro_use]`, - // so abort after checking for unknown attributes. (#49074) - if resolver.found_unresolved_macro { - sess.diagnostic().abort_if_errors(); - } - - // Lower ast -> hir. - // First, we need to collect the dep_graph. - let dep_graph = match future_dep_graph { - None => DepGraph::new_disabled(), - Some(future) => { - let (prev_graph, prev_work_products) = - time(sess, "blocked while dep-graph loading finishes", || { - future - .open() - .unwrap_or_else(|e| rustc_incremental::LoadResult::Error { - message: format!("could not decode incremental cache: {:?}", e), - }) - .open(sess) - }); - DepGraph::new(prev_graph, prev_work_products) - } - }; - let hir_forest = time(sess, "lowering ast -> hir", || { - let hir_crate = lower_crate(sess, cstore, &dep_graph, &krate, &mut resolver); - - if sess.opts.debugging_opts.hir_stats { - hir_stats::print_hir_stats(&hir_crate); - } - - hir_map::Forest::new(hir_crate, &dep_graph) - }); - - time(sess, "early lint checks", || { - lint::check_ast_crate(sess, &krate, false) - }); - - // Discard hygiene data, which isn't required after lowering to HIR. - if !sess.opts.debugging_opts.keep_hygiene_data { - syntax::ext::hygiene::clear_markings(); - } - - Ok(InnerExpansionResult { - expanded_crate: krate, - resolver, - hir_forest, - }) -} - -pub fn default_provide(providers: &mut ty::query::Providers) { - hir::provide(providers); - borrowck::provide(providers); - mir::provide(providers); - reachable::provide(providers); - resolve_lifetime::provide(providers); - rustc_privacy::provide(providers); - typeck::provide(providers); - ty::provide(providers); - traits::provide(providers); - reachable::provide(providers); - rustc_passes::provide(providers); - rustc_traits::provide(providers); - middle::region::provide(providers); - cstore::provide(providers); - lint::provide(providers); -} - -pub fn default_provide_extern(providers: &mut ty::query::Providers) { - cstore::provide_extern(providers); -} - -/// Run the resolution, typechecking, region checking and other -/// miscellaneous analysis passes on the crate. Return various -/// structures carrying the results of the analysis. -pub fn phase_3_run_analysis_passes<'tcx, F, R>( - codegen_backend: &dyn CodegenBackend, - control: &CompileController, - sess: &'tcx Session, - cstore: &'tcx CStore, - hir_map: hir_map::Map<'tcx>, - mut analysis: ty::CrateAnalysis, - resolutions: Resolutions, - arenas: &'tcx AllArenas<'tcx>, - name: &str, - output_filenames: &OutputFilenames, - f: F, -) -> Result -where - F: for<'a> FnOnce( - TyCtxt<'a, 'tcx, 'tcx>, - ty::CrateAnalysis, - mpsc::Receiver>, - CompileResult, - ) -> R, -{ - let query_result_on_disk_cache = time(sess, "load query result cache", || { - rustc_incremental::load_query_result_cache(sess) - }); - - time(sess, "looking for entry point", || { - middle::entry::find_entry_point(sess, &hir_map, name) - }); - - sess.plugin_registrar_fn - .set(time(sess, "looking for plugin registrar", || { - plugin::build::find_plugin_registrar(sess.diagnostic(), &hir_map) - })); - sess.proc_macro_decls_static - .set(proc_macro_decls::find(&hir_map)); - - time(sess, "loop checking", || loops::check_crate(sess, &hir_map)); - - let mut local_providers = ty::query::Providers::default(); - default_provide(&mut local_providers); - codegen_backend.provide(&mut local_providers); - (control.provide)(&mut local_providers); - - let mut extern_providers = local_providers; - default_provide_extern(&mut extern_providers); - codegen_backend.provide_extern(&mut extern_providers); - (control.provide_extern)(&mut extern_providers); - - let (tx, rx) = mpsc::channel(); - - TyCtxt::create_and_enter( - sess, - cstore, - local_providers, - extern_providers, - arenas, - resolutions, - hir_map, - query_result_on_disk_cache, - name, - tx, - output_filenames, - |tcx| { - // Do some initialization of the DepGraph that can only be done with the - // tcx available. - rustc_incremental::dep_graph_tcx_init(tcx); - - time(sess, "attribute checking", || { - hir::check_attr::check_crate(tcx) - }); - - time(sess, "stability checking", || { - stability::check_unstable_api_usage(tcx) - }); - - // passes are timed inside typeck - match typeck::check_crate(tcx) { - Ok(x) => x, - Err(x) => { - f(tcx, analysis, rx, Err(x)); - return Err(x); - } - } - - time(sess, "rvalue promotion", || { - rvalue_promotion::check_crate(tcx) - }); - - analysis.access_levels = - time(sess, "privacy checking", || rustc_privacy::check_crate(tcx)); - - time(sess, "intrinsic checking", || { - middle::intrinsicck::check_crate(tcx) - }); - - time(sess, "match checking", || mir::matchck_crate(tcx)); - - // this must run before MIR dump, because - // "not all control paths return a value" is reported here. - // - // maybe move the check to a MIR pass? - time(sess, "liveness checking", || { - middle::liveness::check_crate(tcx) - }); - - time(sess, "borrow checking", || { - if tcx.use_ast_borrowck() { - borrowck::check_crate(tcx); - } - }); - - time(sess, - "MIR borrow checking", - || tcx.par_body_owners(|def_id| { tcx.mir_borrowck(def_id); })); - - time(sess, "dumping chalk-like clauses", || { - rustc_traits::lowering::dump_program_clauses(tcx); - }); - - time(sess, "MIR effect checking", || { - for def_id in tcx.body_owners() { - mir::transform::check_unsafety::check_unsafety(tcx, def_id) - } - }); - // Avoid overwhelming user with errors if type checking failed. - // I'm not sure how helpful this is, to be honest, but it avoids - // a - // lot of annoying errors in the compile-fail tests (basically, - // lint warnings and so on -- kindck used to do this abort, but - // kindck is gone now). -nmatsakis - if sess.err_count() > 0 { - return Ok(f(tcx, analysis, rx, sess.compile_status())); - } - - time(sess, "death checking", || middle::dead::check_crate(tcx)); - - time(sess, "unused lib feature checking", || { - stability::check_unused_or_stable_features(tcx) - }); - - time(sess, "lint checking", || lint::check_crate(tcx)); - - return Ok(f(tcx, analysis, rx, tcx.sess.compile_status())); - }, - ) -} - -/// Run the codegen backend, after which the AST and analysis can -/// be discarded. -pub fn phase_4_codegen<'a, 'tcx>( - codegen_backend: &dyn CodegenBackend, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - rx: mpsc::Receiver>, -) -> Box { - time(tcx.sess, "resolving dependency formats", || { - ::rustc::middle::dependency_format::calculate(tcx) - }); - - tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen)); - let codegen = time(tcx.sess, "codegen", move || codegen_backend.codegen_crate(tcx, rx)); - tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen)); - if tcx.sess.profile_queries() { - profile::dump(&tcx.sess, "profile_queries".to_string()) - } - - codegen -} - -fn escape_dep_filename(filename: &FileName) -> String { - // Apparently clang and gcc *only* escape spaces: - // http://llvm.org/klaus/clang/commit/9d50634cfc268ecc9a7250226dd5ca0e945240d4 - filename.to_string().replace(" ", "\\ ") -} - -// Returns all the paths that correspond to generated files. -fn generated_output_paths( - sess: &Session, - outputs: &OutputFilenames, - exact_name: bool, - crate_name: &str, -) -> Vec { - let mut out_filenames = Vec::new(); - for output_type in sess.opts.output_types.keys() { - let file = outputs.path(*output_type); - match *output_type { - // If the filename has been overridden using `-o`, it will not be modified - // by appending `.rlib`, `.exe`, etc., so we can skip this transformation. - OutputType::Exe if !exact_name => for crate_type in sess.crate_types.borrow().iter() { - let p = ::rustc_codegen_utils::link::filename_for_input( - sess, - *crate_type, - crate_name, - outputs, - ); - out_filenames.push(p); - }, - OutputType::DepInfo if sess.opts.debugging_opts.dep_info_omit_d_target => { - // Don't add the dep-info output when omitting it from dep-info targets - } - _ => { - out_filenames.push(file); - } - } - } - out_filenames -} - -// Runs `f` on every output file path and returns the first non-None result, or None if `f` -// returns None for every file path. -fn check_output(output_paths: &[PathBuf], f: F) -> Option -where - F: Fn(&PathBuf) -> Option, -{ - for output_path in output_paths { - if let Some(result) = f(output_path) { - return Some(result); - } - } - None -} - -pub fn output_contains_path(output_paths: &[PathBuf], input_path: &PathBuf) -> bool { - let input_path = input_path.canonicalize().ok(); - if input_path.is_none() { - return false; - } - let check = |output_path: &PathBuf| { - if output_path.canonicalize().ok() == input_path { - Some(()) - } else { - None - } - }; - check_output(output_paths, check).is_some() -} - -pub fn output_conflicts_with_dir(output_paths: &[PathBuf]) -> Option { - let check = |output_path: &PathBuf| { - if output_path.is_dir() { - Some(output_path.clone()) - } else { - None - } - }; - check_output(output_paths, check) -} - -fn write_out_deps(sess: &Session, outputs: &OutputFilenames, out_filenames: &[PathBuf]) { - // Write out dependency rules to the dep-info file if requested - if !sess.opts.output_types.contains_key(&OutputType::DepInfo) { - return; - } - let deps_filename = outputs.path(OutputType::DepInfo); - - let result = (|| -> io::Result<()> { - // Build a list of files used to compile the output and - // write Makefile-compatible dependency rules - let files: Vec = sess.source_map() - .files() - .iter() - .filter(|fmap| fmap.is_real_file()) - .filter(|fmap| !fmap.is_imported()) - .map(|fmap| escape_dep_filename(&fmap.name)) - .collect(); - let mut file = fs::File::create(&deps_filename)?; - for path in out_filenames { - writeln!(file, "{}: {}\n", path.display(), files.join(" "))?; - } - - // Emit a fake target for each input file to the compilation. This - // prevents `make` from spitting out an error if a file is later - // deleted. For more info see #28735 - for path in files { - writeln!(file, "{}:", path)?; - } - Ok(()) - })(); - - if let Err(e) = result { - sess.fatal(&format!( - "error writing dependencies to `{}`: {}", - deps_filename.display(), - e - )); - } -} - -pub fn collect_crate_types(session: &Session, attrs: &[ast::Attribute]) -> Vec { - // Unconditionally collect crate types from attributes to make them used - let attr_types: Vec = attrs - .iter() - .filter_map(|a| { - if a.check_name("crate_type") { - match a.value_str() { - Some(ref n) if *n == "rlib" => Some(config::CrateType::Rlib), - Some(ref n) if *n == "dylib" => Some(config::CrateType::Dylib), - Some(ref n) if *n == "cdylib" => Some(config::CrateType::Cdylib), - Some(ref n) if *n == "lib" => Some(config::default_lib_output()), - Some(ref n) if *n == "staticlib" => Some(config::CrateType::Staticlib), - Some(ref n) if *n == "proc-macro" => Some(config::CrateType::ProcMacro), - Some(ref n) if *n == "bin" => Some(config::CrateType::Executable), - Some(ref n) => { - let crate_types = vec![ - Symbol::intern("rlib"), - Symbol::intern("dylib"), - Symbol::intern("cdylib"), - Symbol::intern("lib"), - Symbol::intern("staticlib"), - Symbol::intern("proc-macro"), - Symbol::intern("bin") - ]; - - if let ast::MetaItemKind::NameValue(spanned) = a.meta().unwrap().node { - let span = spanned.span; - let lev_candidate = find_best_match_for_name( - crate_types.iter(), - &n.as_str(), - None - ); - if let Some(candidate) = lev_candidate { - session.buffer_lint_with_diagnostic( - lint::builtin::UNKNOWN_CRATE_TYPES, - ast::CRATE_NODE_ID, - span, - "invalid `crate_type` value", - lint::builtin::BuiltinLintDiagnostics:: - UnknownCrateTypes( - span, - "did you mean".to_string(), - format!("\"{}\"", candidate) - ) - ); - } else { - session.buffer_lint( - lint::builtin::UNKNOWN_CRATE_TYPES, - ast::CRATE_NODE_ID, - span, - "invalid `crate_type` value" - ); - } - } - None - } - None => { - session - .struct_span_err(a.span, "`crate_type` requires a value") - .note("for example: `#![crate_type=\"lib\"]`") - .emit(); - None - } - } - } else { - None - } - }) - .collect(); - - // If we're generating a test executable, then ignore all other output - // styles at all other locations - if session.opts.test { - return vec![config::CrateType::Executable]; - } - - // Only check command line flags if present. If no types are specified by - // command line, then reuse the empty `base` Vec to hold the types that - // will be found in crate attributes. - let mut base = session.opts.crate_types.clone(); - if base.is_empty() { - base.extend(attr_types); - if base.is_empty() { - base.push(::rustc_codegen_utils::link::default_output_for_target( - session, - )); - } else { - base.sort(); - base.dedup(); - } - } - - base.retain(|crate_type| { - let res = !::rustc_codegen_utils::link::invalid_output_for_target(session, *crate_type); - - if !res { - session.warn(&format!( - "dropping unsupported crate type `{}` for target `{}`", - *crate_type, session.opts.target_triple - )); - } - - res - }); - - base -} - -pub fn compute_crate_disambiguator(session: &Session) -> CrateDisambiguator { - use std::hash::Hasher; - - // The crate_disambiguator is a 128 bit hash. The disambiguator is fed - // into various other hashes quite a bit (symbol hashes, incr. comp. hashes, - // debuginfo type IDs, etc), so we don't want it to be too wide. 128 bits - // should still be safe enough to avoid collisions in practice. - let mut hasher = StableHasher::::new(); - - let mut metadata = session.opts.cg.metadata.clone(); - // We don't want the crate_disambiguator to dependent on the order - // -C metadata arguments, so sort them: - metadata.sort(); - // Every distinct -C metadata value is only incorporated once: - metadata.dedup(); - - hasher.write(b"metadata"); - for s in &metadata { - // Also incorporate the length of a metadata string, so that we generate - // different values for `-Cmetadata=ab -Cmetadata=c` and - // `-Cmetadata=a -Cmetadata=bc` - hasher.write_usize(s.len()); - hasher.write(s.as_bytes()); - } - - // Also incorporate crate type, so that we don't get symbol conflicts when - // linking against a library of the same name, if this is an executable. - let is_exe = session - .crate_types - .borrow() - .contains(&config::CrateType::Executable); - hasher.write(if is_exe { b"exe" } else { b"lib" }); - - CrateDisambiguator::from(hasher.finish()) -} - -pub fn build_output_filenames( - input: &Input, - odir: &Option, - ofile: &Option, - attrs: &[ast::Attribute], - sess: &Session, -) -> OutputFilenames { - match *ofile { - None => { - // "-" as input file will cause the parser to read from stdin so we - // have to make up a name - // We want to toss everything after the final '.' - let dirpath = (*odir).as_ref().cloned().unwrap_or_default(); - - // If a crate name is present, we use it as the link name - let stem = sess.opts - .crate_name - .clone() - .or_else(|| attr::find_crate_name(attrs).map(|n| n.to_string())) - .unwrap_or_else(|| input.filestem().to_owned()); - - OutputFilenames { - out_directory: dirpath, - out_filestem: stem, - single_output_file: None, - extra: sess.opts.cg.extra_filename.clone(), - outputs: sess.opts.output_types.clone(), - } - } - - Some(ref out_file) => { - let unnamed_output_types = sess.opts - .output_types - .values() - .filter(|a| a.is_none()) - .count(); - let ofile = if unnamed_output_types > 1 { - sess.warn( - "due to multiple output types requested, the explicitly specified \ - output file name will be adapted for each output type", - ); - None - } else { - Some(out_file.clone()) - }; - if *odir != None { - sess.warn("ignoring --out-dir flag due to -o flag"); - } - if !sess.opts.cg.extra_filename.is_empty() { - sess.warn("ignoring -C extra-filename flag due to -o flag"); - } - - OutputFilenames { - out_directory: out_file.parent().unwrap_or_else(|| Path::new("")).to_path_buf(), - out_filestem: out_file - .file_stem() - .unwrap_or_default() - .to_str() - .unwrap() - .to_string(), - single_output_file: ofile, - extra: sess.opts.cg.extra_filename.clone(), - outputs: sess.opts.output_types.clone(), - } - } - } -} diff --git a/src/librustc_driver/lib.rs b/src/librustc_driver/lib.rs index 41c9b22afe06f..66c7c9d0eae7e 100644 --- a/src/librustc_driver/lib.rs +++ b/src/librustc_driver/lib.rs @@ -1,163 +1,77 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The Rust compiler. //! //! # Note //! //! This API is completely unstable and subject to change. -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![feature(box_syntax)] #![cfg_attr(unix, feature(libc))] #![feature(nll)] -#![feature(quote)] #![feature(rustc_diagnostic_macros)] -#![feature(slice_sort_by_cached_key)] #![feature(set_stdio)] -#![feature(rustc_stack_internals)] #![feature(no_debug)] +#![feature(integer_atomics)] #![recursion_limit="256"] -extern crate arena; +#![deny(rust_2018_idioms)] +#![cfg_attr(not(stage0), deny(internal))] + pub extern crate getopts; -extern crate graphviz; -extern crate env_logger; #[cfg(unix)] extern crate libc; -extern crate rustc_rayon as rayon; -extern crate rustc; -extern crate rustc_allocator; -extern crate rustc_target; -extern crate rustc_borrowck; -extern crate rustc_data_structures; -extern crate rustc_errors as errors; -extern crate rustc_passes; -extern crate rustc_lint; -extern crate rustc_plugin; -extern crate rustc_privacy; -extern crate rustc_incremental; -extern crate rustc_metadata; -extern crate rustc_mir; -extern crate rustc_resolve; -extern crate rustc_save_analysis; -extern crate rustc_traits; -extern crate rustc_codegen_utils; -extern crate rustc_typeck; -extern crate scoped_tls; -extern crate serialize; -extern crate smallvec; #[macro_use] extern crate log; -extern crate syntax; -extern crate syntax_ext; -extern crate syntax_pos; - -// Note that the linkage here should be all that we need, on Linux we're not -// prefixing the symbols here so this should naturally override our default -// allocator. On OSX it should override via the zone allocator. We shouldn't -// enable this by default on other platforms, so other platforms aren't handled -// here yet. -#[cfg(feature = "jemalloc-sys")] -extern crate jemalloc_sys; - -use driver::CompileController; + use pretty::{PpMode, UserIdentifiedItem}; -use rustc_resolve as resolve; +//use rustc_resolve as resolve; use rustc_save_analysis as save; use rustc_save_analysis::DumpHandler; -use rustc_data_structures::sync::{self, Lrc}; -use rustc_data_structures::OnDrop; -use rustc::session::{self, config, Session, build_session, CompileResult}; -use rustc::session::CompileIncomplete; -use rustc::session::config::{Input, PrintRequest, ErrorOutputType}; +use rustc::session::{config, Session, DiagnosticOutput}; +use rustc::session::config::{Input, PrintRequest, ErrorOutputType, OutputType}; use rustc::session::config::nightly_options; -use rustc::session::filesearch; use rustc::session::{early_error, early_warn}; use rustc::lint::Lint; use rustc::lint; +use rustc::hir::def_id::LOCAL_CRATE; +use rustc::util::common::{time, ErrorReported, install_panic_hook}; use rustc_metadata::locator; use rustc_metadata::cstore::CStore; -use rustc_metadata::dynamic_lib::DynamicLibrary; -use rustc::util::common::{time, ErrorReported}; use rustc_codegen_utils::codegen_backend::CodegenBackend; +use rustc_interface::interface; +use rustc_interface::util::get_codegen_sysroot; +use rustc_data_structures::sync::SeqCst; use serialize::json::ToJson; -use std::any::Any; use std::borrow::Cow; use std::cmp::max; use std::default::Default; -use std::env::consts::{DLL_PREFIX, DLL_SUFFIX}; use std::env; -use std::error::Error; use std::ffi::OsString; -use std::fmt::{self, Display}; use std::io::{self, Read, Write}; -use std::mem; -use std::panic; -use std::path::{PathBuf, Path}; +use std::panic::{self, catch_unwind}; +use std::path::PathBuf; use std::process::{self, Command, Stdio}; use std::str; -use std::sync::atomic::{AtomicBool, ATOMIC_BOOL_INIT, Ordering}; -use std::sync::{Once, ONCE_INIT}; -use std::thread; +use std::mem; use syntax::ast; -use syntax::source_map::{SourceMap, FileLoader, RealFileLoader}; +use syntax::source_map::FileLoader; use syntax::feature_gate::{GatedCfg, UnstableFeatures}; use syntax::parse::{self, PResult}; use syntax_pos::{DUMMY_SP, MultiSpan, FileName}; -#[cfg(test)] -mod test; - -pub mod profile; -pub mod driver; pub mod pretty; -mod proc_macro_decls; - -pub mod target_features { - use syntax::ast; - use syntax::symbol::Symbol; - use rustc::session::Session; - use rustc_codegen_utils::codegen_backend::CodegenBackend; - - /// Add `target_feature = "..."` cfgs for a variety of platform - /// specific features (SSE, NEON etc.). - /// - /// This is performed by checking whether a whitelisted set of - /// features is available on the target machine, by querying LLVM. - pub fn add_configuration(cfg: &mut ast::CrateConfig, - sess: &Session, - codegen_backend: &dyn CodegenBackend) { - let tf = Symbol::intern("target_feature"); - - cfg.extend(codegen_backend.target_features(sess).into_iter().map(|feat| (tf, Some(feat)))); - - if sess.crt_static_feature() { - cfg.insert((tf, Some(Symbol::intern("crt-static")))); - } - } -} /// Exit status code used for successful compilation and help output. -pub const EXIT_SUCCESS: isize = 0; +pub const EXIT_SUCCESS: i32 = 0; /// Exit status code used for compilation failures and invalid flags. -pub const EXIT_FAILURE: isize = 1; +pub const EXIT_FAILURE: i32 = 1; const BUG_REPORT_URL: &str = "https://github.com/rust-lang/rust/blob/master/CONTRIBUTING.\ md#bug-reports"; @@ -168,399 +82,295 @@ const ICE_REPORT_COMPILER_FLAGS_EXCLUDE: &[&str] = &["metadata", "extra-filename const ICE_REPORT_COMPILER_FLAGS_STRIP_VALUE: &[&str] = &["incremental"]; -pub fn abort_on_err(result: Result, sess: &Session) -> T { +pub fn source_name(input: &Input) -> FileName { + match *input { + Input::File(ref ifile) => ifile.clone().into(), + Input::Str { ref name, .. } => name.clone(), + } +} + +pub fn abort_on_err(result: Result, sess: &Session) -> T { match result { - Err(CompileIncomplete::Errored(ErrorReported)) => { + Err(..) => { sess.abort_if_errors(); panic!("error reported but abort_if_errors didn't abort???"); } - Err(CompileIncomplete::Stopped) => { - sess.fatal("compilation terminated"); - } Ok(x) => x, } } -pub fn run(run_compiler: F) -> isize - where F: FnOnce() -> (CompileResult, Option) + Send + 'static -{ - let result = monitor(move || { - syntax::with_globals(|| { - let (result, session) = run_compiler(); - if let Err(CompileIncomplete::Errored(_)) = result { - match session { - Some(sess) => { - sess.abort_if_errors(); - panic!("error reported but abort_if_errors didn't abort???"); - } - None => { - let emitter = - errors::emitter::EmitterWriter::stderr( - errors::ColorConfig::Auto, - None, - true, - false - ); - let handler = errors::Handler::with_emitter(true, false, Box::new(emitter)); - handler.emit(&MultiSpan::new(), - "aborting due to previous error(s)", - errors::Level::Fatal); - panic::resume_unwind(Box::new(errors::FatalErrorMarker)); - } - } - } - }); - }); - - match result { - Ok(()) => EXIT_SUCCESS, - Err(_) => EXIT_FAILURE, +pub trait Callbacks { + /// Called before creating the compiler instance + fn config(&mut self, _config: &mut interface::Config) {} + /// Called after parsing and returns true to continue execution + fn after_parsing(&mut self, _compiler: &interface::Compiler) -> bool { + true } -} - -fn load_backend_from_dylib(path: &Path) -> fn() -> Box { - // Note that we're specifically using `open_global_now` here rather than - // `open`, namely we want the behavior on Unix of RTLD_GLOBAL and RTLD_NOW, - // where NOW means "bind everything right now" because we don't want - // surprises later on and RTLD_GLOBAL allows the symbols to be made - // available for future dynamic libraries opened. This is currently used by - // loading LLVM and then making its symbols available for other dynamic - // libraries. - let lib = DynamicLibrary::open_global_now(path).unwrap_or_else(|err| { - let err = format!("couldn't load codegen backend {:?}: {:?}", path, err); - early_error(ErrorOutputType::default(), &err); - }); - unsafe { - match lib.symbol("__rustc_codegen_backend") { - Ok(f) => { - mem::forget(lib); - mem::transmute::<*mut u8, _>(f) - } - Err(e) => { - let err = format!("couldn't load codegen backend as it \ - doesn't export the `__rustc_codegen_backend` \ - symbol: {:?}", e); - early_error(ErrorOutputType::default(), &err); - } - } + /// Called after analysis and returns true to continue execution + fn after_analysis(&mut self, _compiler: &interface::Compiler) -> bool { + true } } -pub fn get_codegen_backend(sess: &Session) -> Box { - static INIT: Once = ONCE_INIT; - - #[allow(deprecated)] - #[no_debug] - static mut LOAD: fn() -> Box = || unreachable!(); - - INIT.call_once(|| { - let codegen_name = sess.opts.debugging_opts.codegen_backend.as_ref() - .unwrap_or(&sess.target.target.options.codegen_backend); - let backend = match &codegen_name[..] { - "metadata_only" => { - rustc_codegen_utils::codegen_backend::MetadataOnlyCodegenBackend::boxed - } - filename if filename.contains(".") => { - load_backend_from_dylib(filename.as_ref()) - } - codegen_name => get_codegen_sysroot(codegen_name), - }; - - unsafe { - LOAD = backend; - } - }); - let backend = unsafe { LOAD() }; - backend.init(sess); - backend -} +pub struct DefaultCallbacks; -fn get_codegen_sysroot(backend_name: &str) -> fn() -> Box { - // For now we only allow this function to be called once as it'll dlopen a - // few things, which seems to work best if we only do that once. In - // general this assertion never trips due to the once guard in `get_codegen_backend`, - // but there's a few manual calls to this function in this file we protect - // against. - static LOADED: AtomicBool = ATOMIC_BOOL_INIT; - assert!(!LOADED.fetch_or(true, Ordering::SeqCst), - "cannot load the default codegen backend twice"); - - // When we're compiling this library with `--test` it'll run as a binary but - // not actually exercise much functionality. As a result most of the logic - // here is defunkt (it assumes we're a dynamic library in a sysroot) so - // let's just return a dummy creation function which won't be used in - // general anyway. - if cfg!(test) { - return rustc_codegen_utils::codegen_backend::MetadataOnlyCodegenBackend::boxed - } +impl Callbacks for DefaultCallbacks {} - let target = session::config::host_triple(); - let mut sysroot_candidates = vec![filesearch::get_or_default_sysroot()]; - let path = current_dll_path() - .and_then(|s| s.canonicalize().ok()); - if let Some(dll) = path { - // use `parent` twice to chop off the file name and then also the - // directory containing the dll which should be either `lib` or `bin`. - if let Some(path) = dll.parent().and_then(|p| p.parent()) { - // The original `path` pointed at the `rustc_driver` crate's dll. - // Now that dll should only be in one of two locations. The first is - // in the compiler's libdir, for example `$sysroot/lib/*.dll`. The - // other is the target's libdir, for example - // `$sysroot/lib/rustlib/$target/lib/*.dll`. - // - // We don't know which, so let's assume that if our `path` above - // ends in `$target` we *could* be in the target libdir, and always - // assume that we may be in the main libdir. - sysroot_candidates.push(path.to_owned()); - - if path.ends_with(target) { - sysroot_candidates.extend(path.parent() // chop off `$target` - .and_then(|p| p.parent()) // chop off `rustlib` - .and_then(|p| p.parent()) // chop off `lib` - .map(|s| s.to_owned())); - } - } - } +// Parse args and run the compiler. This is the primary entry point for rustc. +// See comments on CompilerCalls below for details about the callbacks argument. +// The FileLoader provides a way to load files from sources other than the file system. +pub fn run_compiler( + args: &[String], + callbacks: &mut (dyn Callbacks + Send), + file_loader: Option>, + emitter: Option> +) -> interface::Result<()> { + let diagnostic_output = emitter.map(|emitter| DiagnosticOutput::Raw(emitter)) + .unwrap_or(DiagnosticOutput::Default); + let matches = match handle_options(args) { + Some(matches) => matches, + None => return Ok(()), + }; - let sysroot = sysroot_candidates.iter() - .map(|sysroot| { - let libdir = filesearch::relative_target_lib_path(&sysroot, &target); - sysroot.join(libdir).with_file_name( - option_env!("CFG_CODEGEN_BACKENDS_DIR").unwrap_or("codegen-backends")) - }) - .filter(|f| { - info!("codegen backend candidate: {}", f.display()); - f.exists() - }) - .next(); - let sysroot = sysroot.unwrap_or_else(|| { - let candidates = sysroot_candidates.iter() - .map(|p| p.display().to_string()) - .collect::>() - .join("\n* "); - let err = format!("failed to find a `codegen-backends` folder \ - in the sysroot candidates:\n* {}", candidates); - early_error(ErrorOutputType::default(), &err); - }); - info!("probing {} for a codegen backend", sysroot.display()); + install_panic_hook(); - let d = sysroot.read_dir().unwrap_or_else(|e| { - let err = format!("failed to load default codegen backend, couldn't \ - read `{}`: {}", sysroot.display(), e); - early_error(ErrorOutputType::default(), &err); - }); - - let mut file: Option = None; + let (sopts, cfg) = config::build_session_options_and_crate_config(&matches); - let expected_name = format!("rustc_codegen_llvm-{}", backend_name); - for entry in d.filter_map(|e| e.ok()) { - let path = entry.path(); - let filename = match path.file_name().and_then(|s| s.to_str()) { - Some(s) => s, - None => continue, + let mut dummy_config = |sopts, cfg, diagnostic_output| { + let mut config = interface::Config { + opts: sopts, + crate_cfg: cfg, + input: Input::File(PathBuf::new()), + input_path: None, + output_file: None, + output_dir: None, + file_loader: None, + diagnostic_output, + stderr: None, + crate_name: None, + lint_caps: Default::default(), }; - if !(filename.starts_with(DLL_PREFIX) && filename.ends_with(DLL_SUFFIX)) { - continue - } - let name = &filename[DLL_PREFIX.len() .. filename.len() - DLL_SUFFIX.len()]; - if name != expected_name { - continue - } - if let Some(ref prev) = file { - let err = format!("duplicate codegen backends found\n\ - first: {}\n\ - second: {}\n\ - ", prev.display(), path.display()); - early_error(ErrorOutputType::default(), &err); - } - file = Some(path.clone()); + callbacks.config(&mut config); + config + }; + + if let Some(ref code) = matches.opt_str("explain") { + handle_explain(code, sopts.error_format); + return Ok(()); } - match file { - Some(ref s) => return load_backend_from_dylib(s), + let (odir, ofile) = make_output(&matches); + let (input, input_file_path, input_err) = match make_input(&matches.free) { + Some(v) => v, None => { - let err = format!("failed to load default codegen backend for `{}`, \ - no appropriate codegen dylib found in `{}`", - backend_name, sysroot.display()); - early_error(ErrorOutputType::default(), &err); - } - } + match matches.free.len() { + 0 => { + let config = dummy_config(sopts, cfg, diagnostic_output); + interface::run_compiler(config, |compiler| { + let sopts = &compiler.session().opts; + if sopts.describe_lints { + describe_lints( + compiler.session(), + &*compiler.session().lint_store.borrow(), + false + ); + return; + } + let should_stop = RustcDefaultCalls::print_crate_info( + &***compiler.codegen_backend(), + compiler.session(), + None, + &odir, + &ofile + ); - #[cfg(unix)] - fn current_dll_path() -> Option { - use std::ffi::{OsStr, CStr}; - use std::os::unix::prelude::*; - - unsafe { - let addr = current_dll_path as usize as *mut _; - let mut info = mem::zeroed(); - if libc::dladdr(addr, &mut info) == 0 { - info!("dladdr failed"); - return None - } - if info.dli_fname.is_null() { - info!("dladdr returned null pointer"); - return None + if should_stop == Compilation::Stop { + return; + } + early_error(sopts.error_format, "no input filename given") + }); + return Ok(()); + } + 1 => panic!("make_input should have provided valid inputs"), + _ => early_error(sopts.error_format, &format!( + "multiple input filenames provided (first two filenames are `{}` and `{}`)", + matches.free[0], + matches.free[1], + )), } - let bytes = CStr::from_ptr(info.dli_fname).to_bytes(); - let os = OsStr::from_bytes(bytes); - Some(PathBuf::from(os)) } + }; + + if let Some(err) = input_err { + // Immediately stop compilation if there was an issue reading + // the input (for example if the input stream is not UTF-8). + interface::run_compiler(dummy_config(sopts, cfg, diagnostic_output), |compiler| { + compiler.session().err(&err.to_string()); + }); + return Err(ErrorReported); } - #[cfg(windows)] - fn current_dll_path() -> Option { - use std::ffi::OsString; - use std::os::windows::prelude::*; - - extern "system" { - fn GetModuleHandleExW(dwFlags: u32, - lpModuleName: usize, - phModule: *mut usize) -> i32; - fn GetModuleFileNameW(hModule: usize, - lpFilename: *mut u16, - nSize: u32) -> u32; + let mut config = interface::Config { + opts: sopts, + crate_cfg: cfg, + input, + input_path: input_file_path, + output_file: ofile, + output_dir: odir, + file_loader, + diagnostic_output, + stderr: None, + crate_name: None, + lint_caps: Default::default(), + }; + + callbacks.config(&mut config); + + interface::run_compiler(config, |compiler| { + let sess = compiler.session(); + let should_stop = RustcDefaultCalls::print_crate_info( + &***compiler.codegen_backend(), + sess, + Some(compiler.input()), + compiler.output_dir(), + compiler.output_file(), + ).and_then(|| RustcDefaultCalls::list_metadata( + sess, + compiler.cstore(), + &matches, + compiler.input() + )); + + if should_stop == Compilation::Stop { + return sess.compile_status(); } - const GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS: u32 = 0x00000004; + let pretty_info = parse_pretty(sess, &matches); - unsafe { - let mut module = 0; - let r = GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, - current_dll_path as usize, - &mut module); - if r == 0 { - info!("GetModuleHandleExW failed: {}", io::Error::last_os_error()); - return None - } - let mut space = Vec::with_capacity(1024); - let r = GetModuleFileNameW(module, - space.as_mut_ptr(), - space.capacity() as u32); - if r == 0 { - info!("GetModuleFileNameW failed: {}", io::Error::last_os_error()); - return None - } - let r = r as usize; - if r >= space.capacity() { - info!("our buffer was too small? {}", - io::Error::last_os_error()); - return None + compiler.parse()?; + + if let Some((ppm, opt_uii)) = pretty_info { + if ppm.needs_ast_map(&opt_uii) { + pretty::visit_crate(sess, &mut compiler.parse()?.peek_mut(), ppm); + compiler.global_ctxt()?.peek_mut().enter(|tcx| { + let expanded_crate = compiler.expansion()?.take().0; + pretty::print_after_hir_lowering( + tcx, + compiler.input(), + &expanded_crate, + ppm, + opt_uii.clone(), + compiler.output_file().as_ref().map(|p| &**p), + ); + Ok(()) + })?; + return sess.compile_status(); + } else { + let mut krate = compiler.parse()?.take(); + pretty::visit_crate(sess, &mut krate, ppm); + pretty::print_after_parsing( + sess, + &compiler.input(), + &krate, + ppm, + compiler.output_file().as_ref().map(|p| &**p), + ); + return sess.compile_status(); } - space.set_len(r); - let os = OsString::from_wide(&space); - Some(PathBuf::from(os)) } - } -} -// Parse args and run the compiler. This is the primary entry point for rustc. -// See comments on CompilerCalls below for details about the callbacks argument. -// The FileLoader provides a way to load files from sources other than the file system. -pub fn run_compiler<'a>(args: &[String], - callbacks: Box + sync::Send + 'a>, - file_loader: Option>, - emitter_dest: Option>) - -> (CompileResult, Option) -{ - let matches = match handle_options(args) { - Some(matches) => matches, - None => return (Ok(()), None), - }; + if !callbacks.after_parsing(compiler) { + return sess.compile_status(); + } - let (sopts, cfg) = config::build_session_options_and_crate_config(&matches); + if sess.opts.debugging_opts.parse_only || + sess.opts.debugging_opts.show_span.is_some() || + sess.opts.debugging_opts.ast_json_noexpand { + return sess.compile_status(); + } - driver::spawn_thread_pool(sopts, |sopts| { - run_compiler_with_pool(matches, sopts, cfg, callbacks, file_loader, emitter_dest) - }) -} + compiler.register_plugins()?; -fn run_compiler_with_pool<'a>( - matches: getopts::Matches, - sopts: config::Options, - cfg: ast::CrateConfig, - mut callbacks: Box + sync::Send + 'a>, - file_loader: Option>, - emitter_dest: Option> -) -> (CompileResult, Option) { - macro_rules! do_or_return {($expr: expr, $sess: expr) => { - match $expr { - Compilation::Stop => return (Ok(()), $sess), - Compilation::Continue => {} + // Lint plugins are registered; now we can process command line flags. + if sess.opts.describe_lints { + describe_lints(&sess, &sess.lint_store.borrow(), true); + return sess.compile_status(); } - }} - let descriptions = diagnostics_registry(); + compiler.prepare_outputs()?; - do_or_return!(callbacks.early_callback(&matches, - &sopts, - &cfg, - &descriptions, - sopts.error_format), - None); + if sess.opts.output_types.contains_key(&OutputType::DepInfo) + && sess.opts.output_types.len() == 1 + { + return sess.compile_status(); + } - let (odir, ofile) = make_output(&matches); - let (input, input_file_path, input_err) = match make_input(&matches.free) { - Some((input, input_file_path, input_err)) => { - let (input, input_file_path) = callbacks.some_input(input, input_file_path); - (input, input_file_path, input_err) - }, - None => match callbacks.no_input(&matches, &sopts, &cfg, &odir, &ofile, &descriptions) { - Some((input, input_file_path)) => (input, input_file_path, None), - None => return (Ok(()), None), - }, - }; + compiler.global_ctxt()?; - let loader = file_loader.unwrap_or(box RealFileLoader); - let source_map = Lrc::new(SourceMap::with_file_loader(loader, sopts.file_path_mapping())); - let mut sess = session::build_session_with_source_map( - sopts, input_file_path.clone(), descriptions, source_map, emitter_dest, - ); + if sess.opts.debugging_opts.no_analysis || + sess.opts.debugging_opts.ast_json { + return sess.compile_status(); + } - if let Some(err) = input_err { - // Immediately stop compilation if there was an issue reading - // the input (for example if the input stream is not UTF-8). - sess.err(&err.to_string()); - return (Err(CompileIncomplete::Stopped), Some(sess)); - } + if sess.opts.debugging_opts.save_analysis { + let expanded_crate = &compiler.expansion()?.peek().0; + let crate_name = compiler.crate_name()?.peek().clone(); + compiler.global_ctxt()?.peek_mut().enter(|tcx| { + let result = tcx.analysis(LOCAL_CRATE); + + time(sess, "save analysis", || { + save::process_crate( + tcx, + &expanded_crate, + &crate_name, + &compiler.input(), + None, + DumpHandler::new(compiler.output_dir().as_ref().map(|p| &**p), &crate_name) + ) + }); + + result + // AST will be dropped *after* the `after_analysis` callback + // (needed by the RLS) + })?; + } else { + // Drop AST after creating GlobalCtxt to free memory + mem::drop(compiler.expansion()?.take()); + } - let codegen_backend = get_codegen_backend(&sess); + compiler.global_ctxt()?.peek_mut().enter(|tcx| tcx.analysis(LOCAL_CRATE))?; - rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); + if !callbacks.after_analysis(compiler) { + return sess.compile_status(); + } - let mut cfg = config::build_configuration(&sess, cfg); - target_features::add_configuration(&mut cfg, &sess, &*codegen_backend); - sess.parse_sess.config = cfg; + if sess.opts.debugging_opts.save_analysis { + mem::drop(compiler.expansion()?.take()); + } - let result = { - let plugins = sess.opts.debugging_opts.extra_plugins.clone(); + compiler.ongoing_codegen()?; - let cstore = CStore::new(codegen_backend.metadata_loader()); + // Drop GlobalCtxt after starting codegen to free memory + mem::drop(compiler.global_ctxt()?.take()); - do_or_return!(callbacks.late_callback(&*codegen_backend, - &matches, - &sess, - &cstore, - &input, - &odir, - &ofile), Some(sess)); + if sess.opts.debugging_opts.print_type_sizes { + sess.code_stats.borrow().print_type_sizes(); + } - let _sess_abort_error = OnDrop(|| sess.diagnostic().print_error_count()); + compiler.link()?; - let control = callbacks.build_controller(&sess, &matches); + if sess.opts.debugging_opts.perf_stats { + sess.print_perf_stats(); + } - driver::compile_input(codegen_backend, - &sess, - &cstore, - &input_file_path, - &input, - &odir, - &ofile, - Some(plugins), - &control) - }; + if sess.print_fuel_crate.is_some() { + eprintln!("Fuel used by {}: {}", + sess.print_fuel_crate.as_ref().unwrap(), + sess.print_fuel.load(SeqCst)); + } - (result, Some(sess)) + Ok(()) + }) } #[cfg(unix)] @@ -643,72 +453,6 @@ impl Compilation { } } -/// A trait for customizing the compilation process. Offers a number of hooks for -/// executing custom code or customizing input. -pub trait CompilerCalls<'a> { - /// Hook for a callback early in the process of handling arguments. This will - /// be called straight after options have been parsed but before anything - /// else (e.g., selecting input and output). - fn early_callback(&mut self, - _: &getopts::Matches, - _: &config::Options, - _: &ast::CrateConfig, - _: &errors::registry::Registry, - _: ErrorOutputType) - -> Compilation { - Compilation::Continue - } - - /// Hook for a callback late in the process of handling arguments. This will - /// be called just before actual compilation starts (and before build_controller - /// is called), after all arguments etc. have been completely handled. - fn late_callback(&mut self, - _: &dyn CodegenBackend, - _: &getopts::Matches, - _: &Session, - _: &CStore, - _: &Input, - _: &Option, - _: &Option) - -> Compilation { - Compilation::Continue - } - - /// Called after we extract the input from the arguments. Gives the implementer - /// an opportunity to change the inputs or to add some custom input handling. - /// The default behaviour is to simply pass through the inputs. - fn some_input(&mut self, - input: Input, - input_path: Option) - -> (Input, Option) { - (input, input_path) - } - - /// Called after we extract the input from the arguments if there is no valid - /// input. Gives the implementer an opportunity to supply alternate input (by - /// returning a Some value) or to add custom behaviour for this error such as - /// emitting error messages. Returning None will cause compilation to stop - /// at this point. - fn no_input(&mut self, - _: &getopts::Matches, - _: &config::Options, - _: &ast::CrateConfig, - _: &Option, - _: &Option, - _: &errors::registry::Registry) - -> Option<(Input, Option)> { - None - } - - // Create a CompilController struct for controlling the behaviour of - // compilation. - fn build_controller( - self: Box, - _: &Session, - _: &getopts::Matches - ) -> CompileController<'a>; -} - /// CompilerCalls instance for a regular rustc build. #[derive(Copy, Clone)] pub struct RustcDefaultCalls; @@ -739,8 +483,8 @@ fn stdout_isatty() -> bool { } fn handle_explain(code: &str, - descriptions: &errors::registry::Registry, output: ErrorOutputType) { + let descriptions = rustc_interface::util::diagnostics_registry(); let normalised = if code.starts_with("E") { code.to_string() } else { @@ -812,173 +556,6 @@ fn show_content_with_pager(content: &String) { } } -impl<'a> CompilerCalls<'a> for RustcDefaultCalls { - fn early_callback(&mut self, - matches: &getopts::Matches, - _: &config::Options, - _: &ast::CrateConfig, - descriptions: &errors::registry::Registry, - output: ErrorOutputType) - -> Compilation { - if let Some(ref code) = matches.opt_str("explain") { - handle_explain(code, descriptions, output); - return Compilation::Stop; - } - - Compilation::Continue - } - - fn no_input(&mut self, - matches: &getopts::Matches, - sopts: &config::Options, - cfg: &ast::CrateConfig, - odir: &Option, - ofile: &Option, - descriptions: &errors::registry::Registry) - -> Option<(Input, Option)> { - match matches.free.len() { - 0 => { - let mut sess = build_session(sopts.clone(), - None, - descriptions.clone()); - if sopts.describe_lints { - let mut ls = lint::LintStore::new(); - rustc_lint::register_builtins(&mut ls, Some(&sess)); - describe_lints(&sess, &ls, false); - return None; - } - rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); - let mut cfg = config::build_configuration(&sess, cfg.clone()); - let codegen_backend = get_codegen_backend(&sess); - target_features::add_configuration(&mut cfg, &sess, &*codegen_backend); - sess.parse_sess.config = cfg; - let should_stop = RustcDefaultCalls::print_crate_info( - &*codegen_backend, - &sess, - None, - odir, - ofile - ); - - if should_stop == Compilation::Stop { - return None; - } - early_error(sopts.error_format, "no input filename given"); - } - 1 => panic!("make_input should have provided valid inputs"), - _ => early_error(sopts.error_format, "multiple input filenames provided"), - } - } - - fn late_callback(&mut self, - codegen_backend: &dyn CodegenBackend, - matches: &getopts::Matches, - sess: &Session, - cstore: &CStore, - input: &Input, - odir: &Option, - ofile: &Option) - -> Compilation { - RustcDefaultCalls::print_crate_info(codegen_backend, sess, Some(input), odir, ofile) - .and_then(|| RustcDefaultCalls::list_metadata(sess, cstore, matches, input)) - } - - fn build_controller(self: Box, - sess: &Session, - matches: &getopts::Matches) - -> CompileController<'a> { - let mut control = CompileController::basic(); - - control.keep_ast = sess.opts.debugging_opts.keep_ast; - control.continue_parse_after_error = sess.opts.debugging_opts.continue_parse_after_error; - - if let Some((ppm, opt_uii)) = parse_pretty(sess, matches) { - if ppm.needs_ast_map(&opt_uii) { - control.after_hir_lowering.stop = Compilation::Stop; - - control.after_parse.callback = box move |state| { - state.krate = Some(pretty::fold_crate(state.session, - state.krate.take().unwrap(), - ppm)); - }; - control.after_hir_lowering.callback = box move |state| { - pretty::print_after_hir_lowering(state.session, - state.cstore.unwrap(), - state.hir_map.unwrap(), - state.analysis.unwrap(), - state.resolutions.unwrap(), - state.input, - &state.expanded_crate.take().unwrap(), - state.crate_name.unwrap(), - ppm, - state.arenas.unwrap(), - state.output_filenames.unwrap(), - opt_uii.clone(), - state.out_file); - }; - } else { - control.after_parse.stop = Compilation::Stop; - - control.after_parse.callback = box move |state| { - let krate = pretty::fold_crate(state.session, state.krate.take().unwrap(), ppm); - pretty::print_after_parsing(state.session, - state.input, - &krate, - ppm, - state.out_file); - }; - } - - return control; - } - - if sess.opts.debugging_opts.parse_only || - sess.opts.debugging_opts.show_span.is_some() || - sess.opts.debugging_opts.ast_json_noexpand { - control.after_parse.stop = Compilation::Stop; - } - - if sess.opts.debugging_opts.no_analysis || - sess.opts.debugging_opts.ast_json { - control.after_hir_lowering.stop = Compilation::Stop; - } - - if sess.opts.debugging_opts.save_analysis { - enable_save_analysis(&mut control); - } - - if sess.print_fuel_crate.is_some() { - let old_callback = control.compilation_done.callback; - control.compilation_done.callback = box move |state| { - old_callback(state); - let sess = state.session; - eprintln!("Fuel used by {}: {}", - sess.print_fuel_crate.as_ref().unwrap(), - sess.print_fuel.get()); - } - } - control - } -} - -pub fn enable_save_analysis(control: &mut CompileController) { - control.keep_ast = true; - control.after_analysis.callback = box |state| { - time(state.session, "save analysis", || { - save::process_crate(state.tcx.unwrap(), - state.expanded_crate.unwrap(), - state.analysis.unwrap(), - state.crate_name.unwrap(), - state.input, - None, - DumpHandler::new(state.out_dir, - state.crate_name.unwrap())) - }); - }; - control.after_analysis.run_callback_on_error = true; - control.make_glob_map = resolve::MakeGlobMap::Yes; -} - impl RustcDefaultCalls { pub fn list_metadata(sess: &Session, cstore: &CStore, @@ -1048,13 +625,19 @@ impl RustcDefaultCalls { let input = input.unwrap_or_else(|| early_error(ErrorOutputType::default(), "no input file provided")); let attrs = attrs.as_ref().unwrap(); - let t_outputs = driver::build_output_filenames(input, odir, ofile, attrs, sess); + let t_outputs = rustc_interface::util::build_output_filenames( + input, + odir, + ofile, + attrs, + sess + ); let id = rustc_codegen_utils::link::find_crate_name(Some(sess), attrs, input); if *req == PrintRequest::CrateName { println!("{}", id); continue; } - let crate_types = driver::collect_crate_types(sess, attrs); + let crate_types = rustc_interface::util::collect_crate_types(sess, attrs); for &style in &crate_types { let fname = rustc_codegen_utils::link::filename_for_input( sess, @@ -1071,7 +654,7 @@ impl RustcDefaultCalls { let mut cfgs = sess.parse_sess.config.iter().filter_map(|&(name, ref value)| { let gated_cfg = GatedCfg::gate(&ast::MetaItem { - ident: ast::Path::from_ident(ast::Ident::with_empty_ctxt(name)), + path: ast::Path::from_ident(ast::Ident::with_empty_ctxt(name)), node: ast::MetaItemKind::Word, span: DUMMY_SP, }); @@ -1161,7 +744,7 @@ fn usage(verbose: bool, include_unstable_options: bool) { } let message = "Usage: rustc [OPTIONS] INPUT"; let nightly_help = if nightly_options::is_nightly_build() { - "\n -Z help Print internal options for debugging rustc" + "\n -Z help Print unstable compiler options" } else { "" }; @@ -1309,7 +892,7 @@ Available lint options: } fn describe_debug_flags() { - println!("\nAvailable debug options:\n"); + println!("\nAvailable options:\n"); print_flag_list("-Z", config::DB_OPTIONS); } @@ -1347,7 +930,7 @@ fn print_flag_list(cmdline_opt: &str, /// Process command line options. Emits messages as appropriate. If compilation /// should continue, returns a getopts::Matches object parsed from args, -/// otherwise returns None. +/// otherwise returns `None`. /// /// The compiler's handling of options is a little complicated as it ties into /// our stability story, and it's even *more* complicated by historical @@ -1468,106 +1051,7 @@ fn parse_crate_attrs<'a>(sess: &'a Session, input: &Input) -> PResult<'a, Vec(name: String, f: F) -> Result> - where F: FnOnce() -> R + Send + 'static, - R: Send + 'static, -{ - #[cfg(all(unix, not(target_os = "haiku")))] - let spawn_thread = unsafe { - // Fetch the current resource limits - let mut rlim = libc::rlimit { - rlim_cur: 0, - rlim_max: 0, - }; - if libc::getrlimit(libc::RLIMIT_STACK, &mut rlim) != 0 { - let err = io::Error::last_os_error(); - error!("in_rustc_thread: error calling getrlimit: {}", err); - true - } else if rlim.rlim_max < STACK_SIZE as libc::rlim_t { - true - } else if rlim.rlim_cur < STACK_SIZE as libc::rlim_t { - std::rt::deinit_stack_guard(); - rlim.rlim_cur = STACK_SIZE as libc::rlim_t; - if libc::setrlimit(libc::RLIMIT_STACK, &mut rlim) != 0 { - let err = io::Error::last_os_error(); - error!("in_rustc_thread: error calling setrlimit: {}", err); - std::rt::update_stack_guard(); - true - } else { - std::rt::update_stack_guard(); - false - } - } else { - false - } - }; - - // We set the stack size at link time. See src/rustc/rustc.rs. - #[cfg(windows)] - let spawn_thread = false; - - #[cfg(target_os = "haiku")] - let spawn_thread = unsafe { - // Haiku does not have setrlimit implemented for the stack size. - // By default it does have the 16 MB stack limit, but we check this in - // case the minimum STACK_SIZE changes or Haiku's defaults change. - let mut rlim = libc::rlimit { - rlim_cur: 0, - rlim_max: 0, - }; - if libc::getrlimit(libc::RLIMIT_STACK, &mut rlim) != 0 { - let err = io::Error::last_os_error(); - error!("in_rustc_thread: error calling getrlimit: {}", err); - true - } else if rlim.rlim_cur >= STACK_SIZE { - false - } else { - true - } - }; - - #[cfg(not(any(windows, unix)))] - let spawn_thread = true; - - // The or condition is added from backward compatibility. - if spawn_thread || env::var_os("RUST_MIN_STACK").is_some() { - let mut cfg = thread::Builder::new().name(name); - - // FIXME: Hacks on hacks. If the env is trying to override the stack size - // then *don't* set it explicitly. - if env::var_os("RUST_MIN_STACK").is_none() { - cfg = cfg.stack_size(STACK_SIZE); - } - - let thread = cfg.spawn(f); - thread.unwrap().join() - } else { - let f = panic::AssertUnwindSafe(f); - panic::catch_unwind(f) - } -} - -/// Runs `f` in a suitable thread for running `rustc`; returns a -/// `Result` with either the return value of `f` or -- if a panic -/// occurs -- the panic value. -pub fn in_rustc_thread(f: F) -> Result> - where F: FnOnce() -> R + Send + 'static, - R: Send + 'static, -{ - in_named_rustc_thread("rustc".to_string(), f) -} - -/// Get a list of extra command-line flags provided by the user, as strings. +/// Gets a list of extra command-line flags provided by the user, as strings. /// /// This function is used during ICEs to show more information useful for /// debugging, since some ICEs only happens with non-default compiler flags @@ -1621,28 +1105,15 @@ fn extra_compiler_flags() -> Option<(Vec, bool)> { } } -#[derive(Debug)] -pub struct CompilationFailure; - -impl Error for CompilationFailure {} - -impl Display for CompilationFailure { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "compilation had errors") - } -} - -/// Run a procedure which will detect panics in the compiler and print nicer +/// Runs a procedure which will detect panics in the compiler and print nicer /// error messages rather than just failing the test. /// /// The diagnostic emitter yielded to the procedure should be used for reporting /// errors of the compiler. -pub fn monitor(f: F) -> Result<(), CompilationFailure> { - in_rustc_thread(move || { - f() - }).map_err(|value| { +pub fn report_ices_to_stderr_if_any R, R>(f: F) -> Result { + catch_unwind(panic::AssertUnwindSafe(f)).map_err(|value| { if value.is::() { - CompilationFailure + ErrorReported } else { // Thread panicked without emitting a fatal diagnostic eprintln!(""); @@ -1652,7 +1123,7 @@ pub fn monitor(f: F) -> Result<(), CompilationFail None, false, false)); - let handler = errors::Handler::with_emitter(true, false, emitter); + let handler = errors::Handler::with_emitter(true, None, emitter); // a .span_bug or .bug call has already printed what // it wants to print. @@ -1689,25 +1160,6 @@ pub fn monitor(f: F) -> Result<(), CompilationFail }) } -pub fn diagnostics_registry() -> errors::registry::Registry { - use errors::registry::Registry; - - let mut all_errors = Vec::new(); - all_errors.extend_from_slice(&rustc::DIAGNOSTICS); - all_errors.extend_from_slice(&rustc_typeck::DIAGNOSTICS); - all_errors.extend_from_slice(&rustc_resolve::DIAGNOSTICS); - all_errors.extend_from_slice(&rustc_privacy::DIAGNOSTICS); - // FIXME: need to figure out a way to get these back in here - // all_errors.extend_from_slice(get_codegen_backend(sess).diagnostics()); - all_errors.extend_from_slice(&rustc_metadata::DIAGNOSTICS); - all_errors.extend_from_slice(&rustc_passes::DIAGNOSTICS); - all_errors.extend_from_slice(&rustc_plugin::DIAGNOSTICS); - all_errors.extend_from_slice(&rustc_mir::DIAGNOSTICS); - all_errors.extend_from_slice(&syntax::DIAGNOSTICS); - - Registry::new(&all_errors) -} - /// This allows tools to enable rust logging without having to magically match rustc's /// log crate version pub fn init_rustc_env_logger() { @@ -1716,17 +1168,17 @@ pub fn init_rustc_env_logger() { pub fn main() { init_rustc_env_logger(); - let result = run(|| { + let result = report_ices_to_stderr_if_any(|| { let args = env::args_os().enumerate() .map(|(i, arg)| arg.into_string().unwrap_or_else(|arg| { early_error(ErrorOutputType::default(), &format!("Argument {} is not valid Unicode: {:?}", i, arg)) })) .collect::>(); - run_compiler(&args, - Box::new(RustcDefaultCalls), - None, - None) + run_compiler(&args, &mut DefaultCallbacks, None, None) + }).and_then(|result| result); + process::exit(match result { + Ok(_) => EXIT_SUCCESS, + Err(_) => EXIT_FAILURE, }); - process::exit(result as i32); } diff --git a/src/librustc_driver/pretty.rs b/src/librustc_driver/pretty.rs index b41b0d081ced9..5cefc35607db0 100644 --- a/src/librustc_driver/pretty.rs +++ b/src/librustc_driver/pretty.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The various pretty-printing routines. use rustc::cfg; @@ -16,24 +6,23 @@ use rustc::hir; use rustc::hir::map as hir_map; use rustc::hir::map::blocks; use rustc::hir::print as pprust_hir; +use rustc::hir::def_id::LOCAL_CRATE; use rustc::session::Session; -use rustc::session::config::{Input, OutputFilenames}; -use rustc::ty::{self, TyCtxt, Resolutions, AllArenas}; +use rustc::session::config::Input; +use rustc::ty::{self, TyCtxt}; +use rustc::util::common::ErrorReported; +use rustc_interface::util::ReplaceBodyWithLoop; use rustc_borrowck as borrowck; use rustc_borrowck::graphviz as borrowck_dot; -use rustc_data_structures::thin_vec::ThinVec; -use rustc_metadata::cstore::CStore; use rustc_mir::util::{write_mir_pretty, write_mir_graphviz}; -use syntax::ast::{self, BlockCheckMode}; -use syntax::fold::{self, Folder}; +use syntax::ast; +use syntax::mut_visit::MutVisitor; use syntax::print::{pprust}; use syntax::print::pprust::PrintState; -use syntax::ptr::P; -use syntax_pos::{self, FileName}; +use syntax_pos::FileName; use graphviz as dot; -use smallvec::SmallVec; use std::cell::Cell; use std::fs::File; @@ -41,13 +30,14 @@ use std::io::{self, Write}; use std::option; use std::path::Path; use std::str::FromStr; -use std::mem; pub use self::UserIdentifiedItem::*; pub use self::PpSourceMode::*; pub use self::PpMode::*; use self::NodesMatchingUII::*; -use {abort_on_err, driver}; +use crate::abort_on_err; + +use crate::source_name; #[derive(Copy, Clone, PartialEq, Debug)] pub enum PpSourceMode { @@ -133,7 +123,8 @@ pub fn parse_pretty(sess: &Session, sess.fatal(&format!("argument to `unpretty` must be one of `normal`, \ `expanded`, `flowgraph[,unlabelled]=`, \ `identified`, `expanded,identified`, `everybody_loops`, \ - `hir`, `hir,identified`, `hir,typed`, or `mir`; got {}", + `hir`, `hir,identified`, `hir,typed`, `hir-tree`, \ + `mir` or `mir-cfg`; got {}", name)); } else { sess.fatal(&format!("argument to `pretty` must be one of `normal`, `expanded`, \ @@ -165,7 +156,7 @@ impl PpSourceMode { /// Constructs a `PrinterSupport` object and passes it to `f`. fn call_with_pp_support<'tcx, A, F>(&self, sess: &'tcx Session, - hir_map: Option<&hir_map::Map<'tcx>>, + tcx: Option>, f: F) -> A where F: FnOnce(&dyn PrinterSupport) -> A @@ -174,7 +165,7 @@ impl PpSourceMode { PpmNormal | PpmEveryBodyLoops | PpmExpanded => { let annotation = NoAnn { sess, - hir_map: hir_map.map(|m| m.clone()), + tcx, }; f(&annotation) } @@ -182,7 +173,7 @@ impl PpSourceMode { PpmIdentified | PpmExpandedIdentified => { let annotation = IdentifiedAnnotation { sess, - hir_map: hir_map.map(|m| m.clone()), + tcx, }; f(&annotation) } @@ -197,58 +188,38 @@ impl PpSourceMode { } fn call_with_pp_support_hir<'tcx, A, F>( &self, - sess: &'tcx Session, - cstore: &'tcx CStore, - hir_map: &hir_map::Map<'tcx>, - analysis: &ty::CrateAnalysis, - resolutions: &Resolutions, - arenas: &'tcx AllArenas<'tcx>, - output_filenames: &OutputFilenames, - id: &str, + tcx: TyCtxt<'tcx, 'tcx, 'tcx>, f: F ) -> A - where F: FnOnce(&dyn HirPrinterSupport, &hir::Crate) -> A + where F: FnOnce(&dyn HirPrinterSupport<'_>, &hir::Crate) -> A { match *self { PpmNormal => { let annotation = NoAnn { - sess, - hir_map: Some(hir_map.clone()), + sess: tcx.sess, + tcx: Some(tcx), }; - f(&annotation, hir_map.forest.krate()) + f(&annotation, tcx.hir().forest.krate()) } PpmIdentified => { let annotation = IdentifiedAnnotation { - sess, - hir_map: Some(hir_map.clone()), + sess: tcx.sess, + tcx: Some(tcx), }; - f(&annotation, hir_map.forest.krate()) + f(&annotation, tcx.hir().forest.krate()) } PpmTyped => { - let control = &driver::CompileController::basic(); - let codegen_backend = ::get_codegen_backend(sess); - abort_on_err(driver::phase_3_run_analysis_passes(&*codegen_backend, - control, - sess, - cstore, - hir_map.clone(), - analysis.clone(), - resolutions.clone(), - arenas, - id, - output_filenames, - |tcx, _, _, _| { - let empty_tables = ty::TypeckTables::empty(None); - let annotation = TypedAnnotation { - tcx, - tables: Cell::new(&empty_tables) - }; - tcx.dep_graph.with_ignore(|| { - f(&annotation, hir_map.forest.krate()) - }) - }), - sess) + abort_on_err(tcx.analysis(LOCAL_CRATE), tcx.sess); + + let empty_tables = ty::TypeckTables::empty(None); + let annotation = TypedAnnotation { + tcx, + tables: Cell::new(&empty_tables) + }; + tcx.dep_graph.with_ignore(|| { + f(&annotation, tcx.hir().forest.krate()) + }) } _ => panic!("Should use call_with_pp_support"), } @@ -296,7 +267,7 @@ trait HirPrinterSupport<'hir>: pprust_hir::PpAnn { struct NoAnn<'hir> { sess: &'hir Session, - hir_map: Option>, + tcx: Option>, } impl<'hir> PrinterSupport for NoAnn<'hir> { @@ -315,7 +286,7 @@ impl<'hir> HirPrinterSupport<'hir> for NoAnn<'hir> { } fn hir_map<'a>(&'a self) -> Option<&'a hir_map::Map<'hir>> { - self.hir_map.as_ref() + self.tcx.map(|tcx| tcx.hir()) } fn pp_ann<'a>(&'a self) -> &'a dyn pprust_hir::PpAnn { @@ -325,10 +296,10 @@ impl<'hir> HirPrinterSupport<'hir> for NoAnn<'hir> { impl<'hir> pprust::PpAnn for NoAnn<'hir> {} impl<'hir> pprust_hir::PpAnn for NoAnn<'hir> { - fn nested(&self, state: &mut pprust_hir::State, nested: pprust_hir::Nested) + fn nested(&self, state: &mut pprust_hir::State<'_>, nested: pprust_hir::Nested) -> io::Result<()> { - if let Some(ref map) = self.hir_map { - pprust_hir::PpAnn::nested(map, state, nested) + if let Some(tcx) = self.tcx { + pprust_hir::PpAnn::nested(tcx.hir(), state, nested) } else { Ok(()) } @@ -337,7 +308,7 @@ impl<'hir> pprust_hir::PpAnn for NoAnn<'hir> { struct IdentifiedAnnotation<'hir> { sess: &'hir Session, - hir_map: Option>, + tcx: Option>, } impl<'hir> PrinterSupport for IdentifiedAnnotation<'hir> { @@ -351,13 +322,13 @@ impl<'hir> PrinterSupport for IdentifiedAnnotation<'hir> { } impl<'hir> pprust::PpAnn for IdentifiedAnnotation<'hir> { - fn pre(&self, s: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> { + fn pre(&self, s: &mut pprust::State<'_>, node: pprust::AnnNode<'_>) -> io::Result<()> { match node { pprust::AnnNode::Expr(_) => s.popen(), _ => Ok(()), } } - fn post(&self, s: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> { + fn post(&self, s: &mut pprust::State<'_>, node: pprust::AnnNode<'_>) -> io::Result<()> { match node { pprust::AnnNode::Ident(_) | pprust::AnnNode::Name(_) => Ok(()), @@ -393,7 +364,7 @@ impl<'hir> HirPrinterSupport<'hir> for IdentifiedAnnotation<'hir> { } fn hir_map<'a>(&'a self) -> Option<&'a hir_map::Map<'hir>> { - self.hir_map.as_ref() + self.tcx.map(|tcx| tcx.hir()) } fn pp_ann<'a>(&'a self) -> &'a dyn pprust_hir::PpAnn { @@ -402,27 +373,27 @@ impl<'hir> HirPrinterSupport<'hir> for IdentifiedAnnotation<'hir> { } impl<'hir> pprust_hir::PpAnn for IdentifiedAnnotation<'hir> { - fn nested(&self, state: &mut pprust_hir::State, nested: pprust_hir::Nested) + fn nested(&self, state: &mut pprust_hir::State<'_>, nested: pprust_hir::Nested) -> io::Result<()> { - if let Some(ref map) = self.hir_map { - pprust_hir::PpAnn::nested(map, state, nested) + if let Some(ref tcx) = self.tcx { + pprust_hir::PpAnn::nested(tcx.hir(), state, nested) } else { Ok(()) } } - fn pre(&self, s: &mut pprust_hir::State, node: pprust_hir::AnnNode) -> io::Result<()> { + fn pre(&self, s: &mut pprust_hir::State<'_>, node: pprust_hir::AnnNode<'_>) -> io::Result<()> { match node { pprust_hir::AnnNode::Expr(_) => s.popen(), _ => Ok(()), } } - fn post(&self, s: &mut pprust_hir::State, node: pprust_hir::AnnNode) -> io::Result<()> { + fn post(&self, s: &mut pprust_hir::State<'_>, node: pprust_hir::AnnNode<'_>) -> io::Result<()> { match node { pprust_hir::AnnNode::Name(_) => Ok(()), pprust_hir::AnnNode::Item(item) => { s.s.space()?; - s.synth_comment(format!("node_id: {} hir local_id: {}", - item.id, item.hir_id.local_id.as_u32())) + s.synth_comment(format!("hir_id: {} hir local_id: {}", + item.hir_id, item.hir_id.local_id.as_u32())) } pprust_hir::AnnNode::SubItem(id) => { s.s.space()?; @@ -430,19 +401,19 @@ impl<'hir> pprust_hir::PpAnn for IdentifiedAnnotation<'hir> { } pprust_hir::AnnNode::Block(blk) => { s.s.space()?; - s.synth_comment(format!("block node_id: {} hir local_id: {}", - blk.id, blk.hir_id.local_id.as_u32())) + s.synth_comment(format!("block hir_id: {} hir local_id: {}", + blk.hir_id, blk.hir_id.local_id.as_u32())) } pprust_hir::AnnNode::Expr(expr) => { s.s.space()?; - s.synth_comment(format!("node_id: {} hir local_id: {}", - expr.id, expr.hir_id.local_id.as_u32()))?; + s.synth_comment(format!("expr hir_id: {} hir local_id: {}", + expr.hir_id, expr.hir_id.local_id.as_u32()))?; s.pclose() } pprust_hir::AnnNode::Pat(pat) => { s.s.space()?; - s.synth_comment(format!("pat node_id: {} hir local_id: {}", - pat.id, pat.hir_id.local_id.as_u32())) + s.synth_comment(format!("pat hir_id: {} hir local_id: {}", + pat.hir_id, pat.hir_id.local_id.as_u32())) } } } @@ -463,7 +434,7 @@ impl<'a> PrinterSupport for HygieneAnnotation<'a> { } impl<'a> pprust::PpAnn for HygieneAnnotation<'a> { - fn post(&self, s: &mut pprust::State, node: pprust::AnnNode) -> io::Result<()> { + fn post(&self, s: &mut pprust::State<'_>, node: pprust::AnnNode<'_>) -> io::Result<()> { match node { pprust::AnnNode::Ident(&ast::Ident { name, span }) => { s.s.space()?; @@ -500,12 +471,12 @@ impl<'b, 'tcx> HirPrinterSupport<'tcx> for TypedAnnotation<'b, 'tcx> { } fn node_path(&self, id: ast::NodeId) -> Option { - Some(self.tcx.node_path_str(id)) + Some(self.tcx.def_path_str(self.tcx.hir().local_def_id(id))) } } impl<'a, 'tcx> pprust_hir::PpAnn for TypedAnnotation<'a, 'tcx> { - fn nested(&self, state: &mut pprust_hir::State, nested: pprust_hir::Nested) + fn nested(&self, state: &mut pprust_hir::State<'_>, nested: pprust_hir::Nested) -> io::Result<()> { let old_tables = self.tables.get(); if let pprust_hir::Nested::Body(id) = nested { @@ -515,13 +486,13 @@ impl<'a, 'tcx> pprust_hir::PpAnn for TypedAnnotation<'a, 'tcx> { self.tables.set(old_tables); Ok(()) } - fn pre(&self, s: &mut pprust_hir::State, node: pprust_hir::AnnNode) -> io::Result<()> { + fn pre(&self, s: &mut pprust_hir::State<'_>, node: pprust_hir::AnnNode<'_>) -> io::Result<()> { match node { pprust_hir::AnnNode::Expr(_) => s.popen(), _ => Ok(()), } } - fn post(&self, s: &mut pprust_hir::State, node: pprust_hir::AnnNode) -> io::Result<()> { + fn post(&self, s: &mut pprust_hir::State<'_>, node: pprust_hir::AnnNode<'_>) -> io::Result<()> { match node { pprust_hir::AnnNode::Expr(expr) => { s.s.space()?; @@ -609,7 +580,11 @@ impl UserIdentifiedItem { } } - fn to_one_node_id(self, user_option: &str, sess: &Session, map: &hir_map::Map) -> ast::NodeId { + fn to_one_node_id(self, + user_option: &str, + sess: &Session, + map: &hir_map::Map<'_>) + -> ast::NodeId { let fail_because = |is_wrong_because| -> ast::NodeId { let message = format!("{} needs NodeId (int) or unique path suffix (b::c::d); got \ {}, which {}", @@ -637,206 +612,6 @@ impl UserIdentifiedItem { } } -// Note: Also used by librustdoc, see PR #43348. Consider moving this struct elsewhere. -// -// FIXME: Currently the `everybody_loops` transformation is not applied to: -// * `const fn`, due to issue #43636 that `loop` is not supported for const evaluation. We are -// waiting for miri to fix that. -// * `impl Trait`, due to issue #43869 that functions returning impl Trait cannot be diverging. -// Solving this may require `!` to implement every trait, which relies on the an even more -// ambitious form of the closed RFC #1637. See also [#34511]. -// -// [#34511]: https://github.com/rust-lang/rust/issues/34511#issuecomment-322340401 -pub struct ReplaceBodyWithLoop<'a> { - within_static_or_const: bool, - nested_blocks: Option>, - sess: &'a Session, -} - -impl<'a> ReplaceBodyWithLoop<'a> { - pub fn new(sess: &'a Session) -> ReplaceBodyWithLoop<'a> { - ReplaceBodyWithLoop { - within_static_or_const: false, - nested_blocks: None, - sess - } - } - - fn run R>(&mut self, is_const: bool, action: F) -> R { - let old_const = mem::replace(&mut self.within_static_or_const, is_const); - let old_blocks = self.nested_blocks.take(); - let ret = action(self); - self.within_static_or_const = old_const; - self.nested_blocks = old_blocks; - ret - } - - fn should_ignore_fn(ret_ty: &ast::FnDecl) -> bool { - if let ast::FunctionRetTy::Ty(ref ty) = ret_ty.output { - fn involves_impl_trait(ty: &ast::Ty) -> bool { - match ty.node { - ast::TyKind::ImplTrait(..) => true, - ast::TyKind::Slice(ref subty) | - ast::TyKind::Array(ref subty, _) | - ast::TyKind::Ptr(ast::MutTy { ty: ref subty, .. }) | - ast::TyKind::Rptr(_, ast::MutTy { ty: ref subty, .. }) | - ast::TyKind::Paren(ref subty) => involves_impl_trait(subty), - ast::TyKind::Tup(ref tys) => any_involves_impl_trait(tys.iter()), - ast::TyKind::Path(_, ref path) => path.segments.iter().any(|seg| { - match seg.args.as_ref().map(|generic_arg| &**generic_arg) { - None => false, - Some(&ast::GenericArgs::AngleBracketed(ref data)) => { - let types = data.args.iter().filter_map(|arg| match arg { - ast::GenericArg::Type(ty) => Some(ty), - _ => None, - }); - any_involves_impl_trait(types.into_iter()) || - any_involves_impl_trait(data.bindings.iter().map(|b| &b.ty)) - }, - Some(&ast::GenericArgs::Parenthesized(ref data)) => { - any_involves_impl_trait(data.inputs.iter()) || - any_involves_impl_trait(data.output.iter()) - } - } - }), - _ => false, - } - } - - fn any_involves_impl_trait<'a, I: Iterator>>(mut it: I) -> bool { - it.any(|subty| involves_impl_trait(subty)) - } - - involves_impl_trait(ty) - } else { - false - } - } -} - -impl<'a> fold::Folder for ReplaceBodyWithLoop<'a> { - fn fold_item_kind(&mut self, i: ast::ItemKind) -> ast::ItemKind { - let is_const = match i { - ast::ItemKind::Static(..) | ast::ItemKind::Const(..) => true, - ast::ItemKind::Fn(ref decl, ref header, _, _) => - header.constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), - _ => false, - }; - self.run(is_const, |s| fold::noop_fold_item_kind(i, s)) - } - - fn fold_trait_item(&mut self, i: ast::TraitItem) -> SmallVec<[ast::TraitItem; 1]> { - let is_const = match i.node { - ast::TraitItemKind::Const(..) => true, - ast::TraitItemKind::Method(ast::MethodSig { ref decl, ref header, .. }, _) => - header.constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), - _ => false, - }; - self.run(is_const, |s| fold::noop_fold_trait_item(i, s)) - } - - fn fold_impl_item(&mut self, i: ast::ImplItem) -> SmallVec<[ast::ImplItem; 1]> { - let is_const = match i.node { - ast::ImplItemKind::Const(..) => true, - ast::ImplItemKind::Method(ast::MethodSig { ref decl, ref header, .. }, _) => - header.constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), - _ => false, - }; - self.run(is_const, |s| fold::noop_fold_impl_item(i, s)) - } - - fn fold_anon_const(&mut self, c: ast::AnonConst) -> ast::AnonConst { - self.run(true, |s| fold::noop_fold_anon_const(c, s)) - } - - fn fold_block(&mut self, b: P) -> P { - fn stmt_to_block(rules: ast::BlockCheckMode, - recovered: bool, - s: Option, - sess: &Session) -> ast::Block { - ast::Block { - stmts: s.into_iter().collect(), - rules, - id: sess.next_node_id(), - span: syntax_pos::DUMMY_SP, - recovered, - } - } - - fn block_to_stmt(b: ast::Block, sess: &Session) -> ast::Stmt { - let expr = P(ast::Expr { - id: sess.next_node_id(), - node: ast::ExprKind::Block(P(b), None), - span: syntax_pos::DUMMY_SP, - attrs: ThinVec::new(), - }); - - ast::Stmt { - id: sess.next_node_id(), - node: ast::StmtKind::Expr(expr), - span: syntax_pos::DUMMY_SP, - } - } - - let empty_block = stmt_to_block(BlockCheckMode::Default, false, None, self.sess); - let loop_expr = P(ast::Expr { - node: ast::ExprKind::Loop(P(empty_block), None), - id: self.sess.next_node_id(), - span: syntax_pos::DUMMY_SP, - attrs: ThinVec::new(), - }); - - let loop_stmt = ast::Stmt { - id: self.sess.next_node_id(), - span: syntax_pos::DUMMY_SP, - node: ast::StmtKind::Expr(loop_expr), - }; - - if self.within_static_or_const { - fold::noop_fold_block(b, self) - } else { - b.map(|b| { - let mut stmts = vec![]; - for s in b.stmts { - let old_blocks = self.nested_blocks.replace(vec![]); - - stmts.extend(self.fold_stmt(s).into_iter().filter(|s| s.is_item())); - - // we put a Some in there earlier with that replace(), so this is valid - let new_blocks = self.nested_blocks.take().unwrap(); - self.nested_blocks = old_blocks; - stmts.extend(new_blocks.into_iter().map(|b| block_to_stmt(b, &self.sess))); - } - - let mut new_block = ast::Block { - stmts, - ..b - }; - - if let Some(old_blocks) = self.nested_blocks.as_mut() { - //push our fresh block onto the cache and yield an empty block with `loop {}` - if !new_block.stmts.is_empty() { - old_blocks.push(new_block); - } - - stmt_to_block(b.rules, b.recovered, Some(loop_stmt), self.sess) - } else { - //push `loop {}` onto the end of our fresh block and yield that - new_block.stmts.push(loop_stmt); - - new_block - } - }) - } - } - - // in general the pretty printer processes unexpanded code, so - // we override the default `fold_mac` method which panics. - fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac { - fold::noop_fold_mac(mac, self) - } -} - fn print_flowgraph<'a, 'tcx, W: Write>(variants: Vec, tcx: TyCtxt<'a, 'tcx, 'tcx>, code: blocks::Code<'tcx>, @@ -846,15 +621,15 @@ fn print_flowgraph<'a, 'tcx, W: Write>(variants: Vec, let body_id = match code { blocks::Code::Expr(expr) => { // Find the function this expression is from. - let mut node_id = expr.id; + let mut hir_id = expr.hir_id; loop { - let node = tcx.hir().get(node_id); + let node = tcx.hir().get_by_hir_id(hir_id); if let Some(n) = hir::map::blocks::FnLikeNode::from_node(node) { break n.body(); } - let parent = tcx.hir().get_parent_node(node_id); - assert_ne!(node_id, parent); - node_id = parent; + let parent = tcx.hir().get_parent_node_by_hir_id(hir_id); + assert_ne!(hir_id, parent); + hir_id = parent; } } blocks::Code::FnLike(fn_like) => fn_like.body(), @@ -862,10 +637,20 @@ fn print_flowgraph<'a, 'tcx, W: Write>(variants: Vec, let body = tcx.hir().body(body_id); let cfg = cfg::CFG::new(tcx, &body); let labelled_edges = mode != PpFlowGraphMode::UnlabelledEdges; + let hir_id = code.id(); + // We have to disassemble the hir_id because name must be ASCII + // alphanumeric. This does not appear in the rendered graph, so it does not + // have to be user friendly. + let name = format!( + "hir_id_{}_{}_{}", + hir_id.owner.address_space().index(), + hir_id.owner.as_array_index(), + hir_id.local_id.index(), + ); let lcfg = LabelledCFG { tcx, cfg: &cfg, - name: format!("node_{}", code.id()), + name, labelled_edges, }; @@ -902,17 +687,14 @@ fn print_flowgraph<'a, 'tcx, W: Write>(variants: Vec, } } -pub fn fold_crate(sess: &Session, krate: ast::Crate, ppm: PpMode) -> ast::Crate { +pub fn visit_crate(sess: &Session, krate: &mut ast::Crate, ppm: PpMode) { if let PpmSource(PpmEveryBodyLoops) = ppm { - let mut fold = ReplaceBodyWithLoop::new(sess); - fold.fold_crate(krate) - } else { - krate + ReplaceBodyWithLoop::new(sess).visit_crate(krate); } } fn get_source(input: &Input, sess: &Session) -> (Vec, FileName) { - let src_name = driver::source_name(input); + let src_name = source_name(input); let src = sess.source_map() .get_source_file(&src_name) .unwrap() @@ -968,35 +750,24 @@ pub fn print_after_parsing(sess: &Session, write_output(out, ofile); } -pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, - cstore: &'tcx CStore, - hir_map: &hir_map::Map<'tcx>, - analysis: &ty::CrateAnalysis, - resolutions: &Resolutions, - input: &Input, - krate: &ast::Crate, - crate_name: &str, - ppm: PpMode, - arenas: &'tcx AllArenas<'tcx>, - output_filenames: &OutputFilenames, - opt_uii: Option, - ofile: Option<&Path>) { +pub fn print_after_hir_lowering<'tcx>( + tcx: TyCtxt<'tcx, 'tcx, 'tcx>, + input: &Input, + krate: &ast::Crate, + ppm: PpMode, + opt_uii: Option, + ofile: Option<&Path>) { if ppm.needs_analysis() { - print_with_analysis(sess, - cstore, - hir_map, - analysis, - resolutions, - crate_name, - arenas, - output_filenames, - ppm, - opt_uii, - ofile); + abort_on_err(print_with_analysis( + tcx, + ppm, + opt_uii, + ofile + ), tcx.sess); return; } - let (src, src_name) = get_source(input, sess); + let (src, src_name) = get_source(input, tcx.sess); let mut rdr = &src[..]; let mut out = Vec::new(); @@ -1005,7 +776,7 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, (PpmSource(s), _) => { // Silently ignores an identified node. let out: &mut dyn Write = &mut out; - s.call_with_pp_support(sess, Some(hir_map), move |annotation| { + s.call_with_pp_support(tcx.sess, Some(tcx), move |annotation| { debug!("pretty printing source code {:?}", s); let sess = annotation.sess(); pprust::print_crate(sess.source_map(), @@ -1021,15 +792,7 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, (PpmHir(s), None) => { let out: &mut dyn Write = &mut out; - s.call_with_pp_support_hir(sess, - cstore, - hir_map, - analysis, - resolutions, - arenas, - output_filenames, - crate_name, - move |annotation, krate| { + s.call_with_pp_support_hir(tcx, move |annotation, krate| { debug!("pretty printing source code {:?}", s); let sess = annotation.sess(); pprust_hir::print_crate(sess.source_map(), @@ -1045,15 +808,7 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, (PpmHirTree(s), None) => { let out: &mut dyn Write = &mut out; - s.call_with_pp_support_hir(sess, - cstore, - hir_map, - analysis, - resolutions, - arenas, - output_filenames, - crate_name, - move |_annotation, krate| { + s.call_with_pp_support_hir(tcx, move |_annotation, krate| { debug!("pretty printing source code {:?}", s); write!(out, "{:#?}", krate) }) @@ -1061,15 +816,7 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, (PpmHir(s), Some(uii)) => { let out: &mut dyn Write = &mut out; - s.call_with_pp_support_hir(sess, - cstore, - hir_map, - analysis, - resolutions, - arenas, - output_filenames, - crate_name, - move |annotation, _| { + s.call_with_pp_support_hir(tcx, move |annotation, _| { debug!("pretty printing source code {:?}", s); let sess = annotation.sess(); let hir_map = annotation.hir_map().expect("-Z unpretty missing HIR map"); @@ -1095,18 +842,10 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, (PpmHirTree(s), Some(uii)) => { let out: &mut dyn Write = &mut out; - s.call_with_pp_support_hir(sess, - cstore, - hir_map, - analysis, - resolutions, - arenas, - output_filenames, - crate_name, - move |_annotation, _krate| { + s.call_with_pp_support_hir(tcx, move |_annotation, _krate| { debug!("pretty printing source code {:?}", s); - for node_id in uii.all_matching_node_ids(hir_map) { - let node = hir_map.get(node_id); + for node_id in uii.all_matching_node_ids(tcx.hir()) { + let node = tcx.hir().get(node_id); write!(out, "{:#?}", node)?; } Ok(()) @@ -1121,23 +860,18 @@ pub fn print_after_hir_lowering<'tcx, 'a: 'tcx>(sess: &'a Session, } // In an ideal world, this would be a public function called by the driver after -// analsysis is performed. However, we want to call `phase_3_run_analysis_passes` +// analysis is performed. However, we want to call `phase_3_run_analysis_passes` // with a different callback than the standard driver, so that isn't easy. // Instead, we call that function ourselves. -fn print_with_analysis<'tcx, 'a: 'tcx>(sess: &'a Session, - cstore: &'a CStore, - hir_map: &hir_map::Map<'tcx>, - analysis: &ty::CrateAnalysis, - resolutions: &Resolutions, - crate_name: &str, - arenas: &'tcx AllArenas<'tcx>, - output_filenames: &OutputFilenames, - ppm: PpMode, - uii: Option, - ofile: Option<&Path>) { +fn print_with_analysis<'tcx>( + tcx: TyCtxt<'_, 'tcx, 'tcx>, + ppm: PpMode, + uii: Option, + ofile: Option<&Path> +) -> Result<(), ErrorReported> { let nodeid = if let Some(uii) = uii { debug!("pretty printing for {:?}", uii); - Some(uii.to_one_node_id("-Z unpretty", sess, &hir_map)) + Some(uii.to_one_node_id("-Z unpretty", tcx.sess, tcx.hir())) } else { debug!("pretty printing for whole crate"); None @@ -1145,67 +879,57 @@ fn print_with_analysis<'tcx, 'a: 'tcx>(sess: &'a Session, let mut out = Vec::new(); - let control = &driver::CompileController::basic(); - let codegen_backend = ::get_codegen_backend(sess); - abort_on_err(driver::phase_3_run_analysis_passes(&*codegen_backend, - control, - sess, - cstore, - hir_map.clone(), - analysis.clone(), - resolutions.clone(), - arenas, - crate_name, - output_filenames, - |tcx, _, _, _| { - match ppm { - PpmMir | PpmMirCFG => { - if let Some(nodeid) = nodeid { - let def_id = tcx.hir().local_def_id(nodeid); - match ppm { - PpmMir => write_mir_pretty(tcx, Some(def_id), &mut out), - PpmMirCFG => write_mir_graphviz(tcx, Some(def_id), &mut out), - _ => unreachable!(), - }?; - } else { - match ppm { - PpmMir => write_mir_pretty(tcx, None, &mut out), - PpmMirCFG => write_mir_graphviz(tcx, None, &mut out), - _ => unreachable!(), - }?; - } - Ok(()) + tcx.analysis(LOCAL_CRATE)?; + + let mut print = || match ppm { + PpmMir | PpmMirCFG => { + if let Some(nodeid) = nodeid { + let def_id = tcx.hir().local_def_id(nodeid); + match ppm { + PpmMir => write_mir_pretty(tcx, Some(def_id), &mut out), + PpmMirCFG => write_mir_graphviz(tcx, Some(def_id), &mut out), + _ => unreachable!(), + }?; + } else { + match ppm { + PpmMir => write_mir_pretty(tcx, None, &mut out), + PpmMirCFG => write_mir_graphviz(tcx, None, &mut out), + _ => unreachable!(), + }?; } - PpmFlowGraph(mode) => { - let nodeid = - nodeid.expect("`pretty flowgraph=..` needs NodeId (int) or unique path \ - suffix (b::c::d)"); - let node = tcx.hir().find(nodeid).unwrap_or_else(|| { - tcx.sess.fatal(&format!("--pretty flowgraph couldn't find id: {}", nodeid)) - }); + Ok(()) + } + PpmFlowGraph(mode) => { + let nodeid = + nodeid.expect("`pretty flowgraph=..` needs NodeId (int) or unique path \ + suffix (b::c::d)"); + let node = tcx.hir().find(nodeid).unwrap_or_else(|| { + tcx.sess.fatal(&format!("--pretty flowgraph couldn't find id: {}", nodeid)) + }); - match blocks::Code::from_node(&tcx.hir(), nodeid) { - Some(code) => { - let variants = gather_flowgraph_variants(tcx.sess); + match blocks::Code::from_node(&tcx.hir(), nodeid) { + Some(code) => { + let variants = gather_flowgraph_variants(tcx.sess); - let out: &mut dyn Write = &mut out; + let out: &mut dyn Write = &mut out; - print_flowgraph(variants, tcx, code, mode, out) - } - None => { - let message = format!("--pretty=flowgraph needs block, fn, or method; \ - got {:?}", - node); + print_flowgraph(variants, tcx, code, mode, out) + } + None => { + let message = format!("--pretty=flowgraph needs block, fn, or method; \ + got {:?}", + node); - tcx.sess.span_fatal(tcx.hir().span(nodeid), &message) - } + tcx.sess.span_fatal(tcx.hir().span(nodeid), &message) } } - _ => unreachable!(), } - }), - sess) - .unwrap(); + _ => unreachable!(), + }; + + print().unwrap(); write_output(out, ofile); + + Ok(()) } diff --git a/src/librustc_driver/proc_macro_decls.rs b/src/librustc_driver/proc_macro_decls.rs deleted file mode 100644 index 136a27b1ced47..0000000000000 --- a/src/librustc_driver/proc_macro_decls.rs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::hir::itemlikevisit::ItemLikeVisitor; -use rustc::hir::map::Map; -use rustc::hir; -use syntax::ast; -use syntax::attr; - -pub fn find(hir_map: &Map) -> Option { - let krate = hir_map.krate(); - - let mut finder = Finder { decls: None }; - krate.visit_all_item_likes(&mut finder); - finder.decls -} - -struct Finder { - decls: Option, -} - -impl<'v> ItemLikeVisitor<'v> for Finder { - fn visit_item(&mut self, item: &hir::Item) { - if attr::contains_name(&item.attrs, "rustc_proc_macro_decls") { - self.decls = Some(item.id); - } - } - - fn visit_trait_item(&mut self, _trait_item: &hir::TraitItem) { - } - - fn visit_impl_item(&mut self, _impl_item: &hir::ImplItem) { - } -} - diff --git a/src/librustc_driver/profile/mod.rs b/src/librustc_driver/profile/mod.rs deleted file mode 100644 index d334a9476ce24..0000000000000 --- a/src/librustc_driver/profile/mod.rs +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::session::Session; -use rustc::util::common::{ProfQDumpParams, ProfileQueriesMsg, profq_msg, profq_set_chan}; -use std::sync::mpsc::{Receiver}; -use std::io::{Write}; -use rustc::dep_graph::{DepNode}; -use std::time::{Duration, Instant}; - -pub mod trace; - -/// begin a profile thread, if not already running -pub fn begin(sess: &Session) { - use std::thread; - use std::sync::mpsc::{channel}; - let (tx, rx) = channel(); - if profq_set_chan(sess, tx) { - thread::spawn(move || profile_queries_thread(rx)); - } -} - -/// dump files with profiling information to the given base path, and -/// wait for this dump to complete. -/// -/// wraps the RPC (send/recv channel logic) of requesting a dump. -pub fn dump(sess: &Session, path: String) { - use std::sync::mpsc::{channel}; - let (tx, rx) = channel(); - let params = ProfQDumpParams { - path, - ack: tx, - // FIXME: Add another compiler flag to toggle whether this log - // is written; false for now - dump_profq_msg_log: true, - }; - profq_msg(sess, ProfileQueriesMsg::Dump(params)); - let _ = rx.recv().unwrap(); -} - -// State for parsing recursive trace structure in separate thread, via messages -#[derive(Clone, Eq, PartialEq)] -enum ParseState { - // No (local) parse state; may be parsing a tree, focused on a - // sub-tree that could be anything. - Clear, - // Have Query information from the last message - HaveQuery(trace::Query, Instant), - // Have "time-begin" information from the last message (doit flag, and message) - HaveTimeBegin(String, Instant), - // Have "task-begin" information from the last message - HaveTaskBegin(DepNode, Instant), -} -struct StackFrame { - pub parse_st: ParseState, - pub traces: Vec, -} - -fn total_duration(traces: &[trace::Rec]) -> Duration { - let mut sum : Duration = Duration::new(0, 0); - for t in traces.iter() { sum += t.dur_total; } - return sum -} - -// profiling thread; retains state (in local variables) and dump traces, upon request. -fn profile_queries_thread(r: Receiver) { - use self::trace::*; - use std::fs::File; - use std::time::{Instant}; - - let mut profq_msgs: Vec = vec![]; - let mut frame: StackFrame = StackFrame { parse_st: ParseState::Clear, traces: vec![] }; - let mut stack: Vec = vec![]; - loop { - let msg = r.recv(); - if let Err(_recv_err) = msg { - // FIXME: Perhaps do something smarter than simply quitting? - break - }; - let msg = msg.unwrap(); - debug!("profile_queries_thread: {:?}", msg); - - // Meta-level versus _actual_ queries messages - match msg { - ProfileQueriesMsg::Halt => return, - ProfileQueriesMsg::Dump(params) => { - assert!(stack.is_empty()); - assert!(frame.parse_st == ParseState::Clear); - { - // write log of all messages - if params.dump_profq_msg_log { - let mut log_file = - File::create(format!("{}.log.txt", params.path)).unwrap(); - for m in profq_msgs.iter() { - writeln!(&mut log_file, "{:?}", m).unwrap() - }; - } - - // write HTML file, and counts file - let html_path = format!("{}.html", params.path); - let mut html_file = File::create(&html_path).unwrap(); - - let counts_path = format!("{}.counts.txt", params.path); - let mut counts_file = File::create(&counts_path).unwrap(); - - writeln!(html_file, - "\n\n", - "profile_queries.css").unwrap(); - writeln!(html_file, "\n\n").unwrap(); - trace::write_traces(&mut html_file, &mut counts_file, &frame.traces); - writeln!(html_file, "\n").unwrap(); - - let ack_path = format!("{}.ack", params.path); - let ack_file = File::create(&ack_path).unwrap(); - drop(ack_file); - - // Tell main thread that we are done, e.g., so it can exit - params.ack.send(()).unwrap(); - } - continue - } - // Actual query message: - msg => { - // Record msg in our log - profq_msgs.push(msg.clone()); - // Respond to the message, knowing that we've already handled Halt and Dump, above. - match (frame.parse_st.clone(), msg) { - (_,ProfileQueriesMsg::Halt) => unreachable!(), - (_,ProfileQueriesMsg::Dump(_)) => unreachable!(), - - // Parse State: Clear - (ParseState::Clear, - ProfileQueriesMsg::QueryBegin(span, querymsg)) => { - let start = Instant::now(); - frame.parse_st = ParseState::HaveQuery - (Query { span, msg: querymsg }, start) - }, - (ParseState::Clear, - ProfileQueriesMsg::CacheHit) => { - panic!("parse error: unexpected CacheHit; expected QueryBegin") - }, - (ParseState::Clear, - ProfileQueriesMsg::ProviderBegin) => { - panic!("parse error: expected QueryBegin before beginning a provider") - }, - (ParseState::Clear, - ProfileQueriesMsg::ProviderEnd) => { - let provider_extent = frame.traces; - match stack.pop() { - None => - panic!("parse error: expected a stack frame; found an empty stack"), - Some(old_frame) => { - match old_frame.parse_st { - ParseState::HaveQuery(q, start) => { - let duration = start.elapsed(); - frame = StackFrame{ - parse_st:ParseState::Clear, - traces:old_frame.traces - }; - let dur_extent = total_duration(&provider_extent); - let trace = Rec { - effect: Effect::QueryBegin(q, CacheCase::Miss), - extent: Box::new(provider_extent), - start: start, - dur_self: duration - dur_extent, - dur_total: duration, - }; - frame.traces.push( trace ); - }, - _ => panic!("internal parse error: malformed parse stack") - } - } - } - }, - - - (ParseState::Clear, - ProfileQueriesMsg::TimeBegin(msg)) => { - let start = Instant::now(); - frame.parse_st = ParseState::HaveTimeBegin(msg, start); - stack.push(frame); - frame = StackFrame{parse_st:ParseState::Clear, traces:vec![]}; - }, - (_, ProfileQueriesMsg::TimeBegin(_)) => - panic!("parse error; did not expect time begin here"), - - (ParseState::Clear, - ProfileQueriesMsg::TimeEnd) => { - let provider_extent = frame.traces; - match stack.pop() { - None => - panic!("parse error: expected a stack frame; found an empty stack"), - Some(old_frame) => { - match old_frame.parse_st { - ParseState::HaveTimeBegin(msg, start) => { - let duration = start.elapsed(); - frame = StackFrame{ - parse_st:ParseState::Clear, - traces:old_frame.traces - }; - let dur_extent = total_duration(&provider_extent); - let trace = Rec { - effect: Effect::TimeBegin(msg), - extent: Box::new(provider_extent), - start: start, - dur_total: duration, - dur_self: duration - dur_extent, - }; - frame.traces.push( trace ); - }, - _ => panic!("internal parse error: malformed parse stack") - } - } - } - }, - (_, ProfileQueriesMsg::TimeEnd) => { panic!("parse error") } - - (ParseState::Clear, - ProfileQueriesMsg::TaskBegin(key)) => { - let start = Instant::now(); - frame.parse_st = ParseState::HaveTaskBegin(key, start); - stack.push(frame); - frame = StackFrame{parse_st:ParseState::Clear, traces:vec![]}; - }, - (_, ProfileQueriesMsg::TaskBegin(_)) => - panic!("parse error; did not expect time begin here"), - - (ParseState::Clear, - ProfileQueriesMsg::TaskEnd) => { - let provider_extent = frame.traces; - match stack.pop() { - None => - panic!("parse error: expected a stack frame; found an empty stack"), - Some(old_frame) => { - match old_frame.parse_st { - ParseState::HaveTaskBegin(key, start) => { - let duration = start.elapsed(); - frame = StackFrame{ - parse_st:ParseState::Clear, - traces:old_frame.traces - }; - let dur_extent = total_duration(&provider_extent); - let trace = Rec { - effect: Effect::TaskBegin(key), - extent: Box::new(provider_extent), - start: start, - dur_total: duration, - dur_self: duration - dur_extent, - }; - frame.traces.push( trace ); - }, - _ => panic!("internal parse error: malformed parse stack") - } - } - } - }, - (_, ProfileQueriesMsg::TaskEnd) => { panic!("parse error") } - - // Parse State: HaveQuery - (ParseState::HaveQuery(q,start), - ProfileQueriesMsg::CacheHit) => { - let duration = start.elapsed(); - let trace : Rec = Rec{ - effect: Effect::QueryBegin(q, CacheCase::Hit), - extent: Box::new(vec![]), - start: start, - dur_self: duration, - dur_total: duration, - }; - frame.traces.push( trace ); - frame.parse_st = ParseState::Clear; - }, - (ParseState::HaveQuery(_,_), - ProfileQueriesMsg::ProviderBegin) => { - stack.push(frame); - frame = StackFrame{parse_st:ParseState::Clear, traces:vec![]}; - }, - - // Parse errors: - - (ParseState::HaveQuery(q,_), - ProfileQueriesMsg::ProviderEnd) => { - panic!("parse error: unexpected ProviderEnd; \ - expected something else to follow BeginQuery for {:?}", q) - }, - (ParseState::HaveQuery(q1,_), - ProfileQueriesMsg::QueryBegin(span2,querymsg2)) => { - panic!("parse error: unexpected QueryBegin; \ - earlier query is unfinished: {:?} and now {:?}", - q1, Query{span:span2, msg:querymsg2}) - }, - - (ParseState::HaveTimeBegin(_, _), _) => { - unreachable!() - }, - (ParseState::HaveTaskBegin(_, _), _) => { - unreachable!() - }, - } - } - } - } -} diff --git a/src/librustc_driver/test.rs b/src/librustc_driver/test.rs deleted file mode 100644 index f9d49f03ee044..0000000000000 --- a/src/librustc_driver/test.rs +++ /dev/null @@ -1,724 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! # Standalone Tests for the Inference Module - -use driver; -use errors; -use errors::emitter::Emitter; -use errors::{DiagnosticBuilder, Level}; -use rustc::hir; -use rustc::hir::map as hir_map; -use rustc::infer::outlives::env::OutlivesEnvironment; -use rustc::infer::type_variable::TypeVariableOrigin; -use rustc::infer::{self, InferOk, InferResult, SuppressRegionErrors}; -use rustc::middle::region; -use rustc::session::config::{OutputFilenames, OutputTypes}; -use rustc::session::{self, config}; -use rustc::traits::ObligationCause; -use rustc::ty::query::OnDiskCache; -use rustc::ty::subst::Subst; -use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; -use rustc_data_structures::sync::{self, Lrc}; -use rustc_lint; -use rustc_metadata::cstore::CStore; -use rustc_resolve::MakeGlobMap; -use rustc_target::spec::abi::Abi; -use syntax; -use syntax::ast; -use syntax::feature_gate::UnstableFeatures; -use syntax::source_map::{FileName, FilePathMapping, SourceMap}; -use syntax::symbol::Symbol; -use syntax_pos::DUMMY_SP; - -use std::path::PathBuf; -use std::sync::mpsc; - -struct Env<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { - infcx: &'a infer::InferCtxt<'a, 'gcx, 'tcx>, - region_scope_tree: &'a mut region::ScopeTree, - param_env: ty::ParamEnv<'tcx>, -} - -struct RH<'a> { - id: hir::ItemLocalId, - sub: &'a [RH<'a>], -} - -const EMPTY_SOURCE_STR: &'static str = "#![feature(no_core)] #![no_core]"; - -struct ExpectErrorEmitter { - messages: Vec, -} - -fn remove_message(e: &mut ExpectErrorEmitter, msg: &str, lvl: Level) { - match lvl { - Level::Bug | Level::Fatal | Level::Error => {} - _ => { - return; - } - } - - debug!("Error: {}", msg); - match e.messages.iter().position(|m| msg.contains(m)) { - Some(i) => { - e.messages.remove(i); - } - None => { - debug!("Unexpected error: {} Expected: {:?}", msg, e.messages); - panic!("Unexpected error: {} Expected: {:?}", msg, e.messages); - } - } -} - -impl Emitter for ExpectErrorEmitter { - fn emit(&mut self, db: &DiagnosticBuilder) { - remove_message(self, &db.message(), db.level); - for child in &db.children { - remove_message(self, &child.message(), child.level); - } - } -} - -fn errors(msgs: &[&str]) -> (Box, usize) { - let v = msgs.iter().map(|m| m.to_string()).collect(); - ( - box ExpectErrorEmitter { messages: v } as Box, - msgs.len(), - ) -} - -fn test_env(source_string: &str, args: (Box, usize), body: F) -where - F: FnOnce(Env) + sync::Send, -{ - syntax::with_globals(|| { - let mut options = config::Options::default(); - options.debugging_opts.verbose = true; - options.unstable_features = UnstableFeatures::Allow; - - driver::spawn_thread_pool(options, |options| { - test_env_with_pool(options, source_string, args, body) - }) - }); -} - -fn test_env_with_pool( - options: config::Options, - source_string: &str, - (emitter, expected_err_count): (Box, usize), - body: F, -) where - F: FnOnce(Env), -{ - let diagnostic_handler = errors::Handler::with_emitter(true, false, emitter); - let sess = session::build_session_( - options, - None, - diagnostic_handler, - Lrc::new(SourceMap::new(FilePathMapping::empty())), - ); - let cstore = CStore::new(::get_codegen_backend(&sess).metadata_loader()); - rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); - let input = config::Input::Str { - name: FileName::anon_source_code(&source_string), - input: source_string.to_string(), - }; - let krate = - driver::phase_1_parse_input(&driver::CompileController::basic(), &sess, &input).unwrap(); - let driver::ExpansionResult { - defs, - resolutions, - mut hir_forest, - .. - } = { - driver::phase_2_configure_and_expand( - &sess, - &cstore, - krate, - None, - "test", - None, - MakeGlobMap::No, - |_| Ok(()), - ).expect("phase 2 aborted") - }; - - let arenas = ty::AllArenas::new(); - let hir_map = hir_map::map_crate(&sess, &cstore, &mut hir_forest, &defs); - - // Run just enough stuff to build a tcx. - let (tx, _rx) = mpsc::channel(); - let outputs = OutputFilenames { - out_directory: PathBuf::new(), - out_filestem: String::new(), - single_output_file: None, - extra: String::new(), - outputs: OutputTypes::new(&[]), - }; - TyCtxt::create_and_enter( - &sess, - &cstore, - ty::query::Providers::default(), - ty::query::Providers::default(), - &arenas, - resolutions, - hir_map, - OnDiskCache::new_empty(sess.source_map()), - "test_crate", - tx, - &outputs, - |tcx| { - tcx.infer_ctxt().enter(|infcx| { - let mut region_scope_tree = region::ScopeTree::default(); - let param_env = ty::ParamEnv::empty(); - body(Env { - infcx: &infcx, - region_scope_tree: &mut region_scope_tree, - param_env: param_env, - }); - let outlives_env = OutlivesEnvironment::new(param_env); - let def_id = tcx.hir().local_def_id(ast::CRATE_NODE_ID); - infcx.resolve_regions_and_report_errors( - def_id, - ®ion_scope_tree, - &outlives_env, - SuppressRegionErrors::default(), - ); - assert_eq!(tcx.sess.err_count(), expected_err_count); - }); - }, - ); -} - -fn d1() -> ty::DebruijnIndex { - ty::INNERMOST -} - -fn d2() -> ty::DebruijnIndex { - d1().shifted_in(1) -} - -impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> { - pub fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { - self.infcx.tcx - } - - pub fn create_region_hierarchy( - &mut self, - rh: &RH, - parent: (region::Scope, region::ScopeDepth), - ) { - let me = region::Scope { - id: rh.id, - data: region::ScopeData::Node, - }; - self.region_scope_tree.record_scope_parent(me, Some(parent)); - for child_rh in rh.sub { - self.create_region_hierarchy(child_rh, (me, parent.1 + 1)); - } - } - - pub fn create_simple_region_hierarchy(&mut self) { - // Creates a region hierarchy where 1 is root, 10 and 11 are - // children of 1, etc. - - let dscope = region::Scope { - id: hir::ItemLocalId::from_u32(1), - data: region::ScopeData::Destruction, - }; - self.region_scope_tree.record_scope_parent(dscope, None); - self.create_region_hierarchy( - &RH { - id: hir::ItemLocalId::from_u32(1), - sub: &[ - RH { - id: hir::ItemLocalId::from_u32(10), - sub: &[], - }, - RH { - id: hir::ItemLocalId::from_u32(11), - sub: &[], - }, - ], - }, - (dscope, 1), - ); - } - - #[allow(dead_code)] // this seems like it could be useful, even if we don't use it now - pub fn lookup_item(&self, names: &[String]) -> ast::NodeId { - return match search_mod(self, &self.infcx.tcx.hir().krate().module, 0, names) { - Some(id) => id, - None => { - panic!("no item found: `{}`", names.join("::")); - } - }; - - fn search_mod( - this: &Env, - m: &hir::Mod, - idx: usize, - names: &[String], - ) -> Option { - assert!(idx < names.len()); - for item in &m.item_ids { - let item = this.infcx.tcx.hir().expect_item(item.id); - if item.name.to_string() == names[idx] { - return search(this, item, idx + 1, names); - } - } - return None; - } - - fn search(this: &Env, it: &hir::Item, idx: usize, names: &[String]) -> Option { - if idx == names.len() { - return Some(it.id); - } - - return match it.node { - hir::ItemKind::Use(..) - | hir::ItemKind::ExternCrate(..) - | hir::ItemKind::Const(..) - | hir::ItemKind::Static(..) - | hir::ItemKind::Fn(..) - | hir::ItemKind::ForeignMod(..) - | hir::ItemKind::GlobalAsm(..) - | hir::ItemKind::Existential(..) - | hir::ItemKind::Ty(..) => None, - - hir::ItemKind::Enum(..) - | hir::ItemKind::Struct(..) - | hir::ItemKind::Union(..) - | hir::ItemKind::Trait(..) - | hir::ItemKind::TraitAlias(..) - | hir::ItemKind::Impl(..) => None, - - hir::ItemKind::Mod(ref m) => search_mod(this, m, idx, names), - }; - } - } - - pub fn make_subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> bool { - match self.infcx - .at(&ObligationCause::dummy(), self.param_env) - .sub(a, b) - { - Ok(_) => true, - Err(ref e) => panic!("Encountered error: {}", e), - } - } - - pub fn is_subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> bool { - self.infcx.can_sub(self.param_env, a, b).is_ok() - } - - pub fn assert_subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) { - if !self.is_subtype(a, b) { - panic!("{} is not a subtype of {}, but it should be", a, b); - } - } - - pub fn assert_eq(&self, a: Ty<'tcx>, b: Ty<'tcx>) { - self.assert_subtype(a, b); - self.assert_subtype(b, a); - } - - pub fn t_fn(&self, input_tys: &[Ty<'tcx>], output_ty: Ty<'tcx>) -> Ty<'tcx> { - self.infcx - .tcx - .mk_fn_ptr(ty::Binder::bind(self.infcx.tcx.mk_fn_sig( - input_tys.iter().cloned(), - output_ty, - false, - hir::Unsafety::Normal, - Abi::Rust, - ))) - } - - pub fn t_nil(&self) -> Ty<'tcx> { - self.infcx.tcx.mk_unit() - } - - pub fn t_pair(&self, ty1: Ty<'tcx>, ty2: Ty<'tcx>) -> Ty<'tcx> { - self.infcx.tcx.intern_tup(&[ty1, ty2]) - } - - pub fn t_param(&self, index: u32) -> Ty<'tcx> { - let name = format!("T{}", index); - self.infcx - .tcx - .mk_ty_param(index, Symbol::intern(&name).as_interned_str()) - } - - pub fn re_early_bound(&self, index: u32, name: &'static str) -> ty::Region<'tcx> { - let name = Symbol::intern(name).as_interned_str(); - self.infcx - .tcx - .mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion { - def_id: self.infcx.tcx.hir().local_def_id(ast::CRATE_NODE_ID), - index, - name, - })) - } - - pub fn re_late_bound_with_debruijn( - &self, - id: u32, - debruijn: ty::DebruijnIndex, - ) -> ty::Region<'tcx> { - self.infcx - .tcx - .mk_region(ty::ReLateBound(debruijn, ty::BrAnon(id))) - } - - pub fn t_rptr(&self, r: ty::Region<'tcx>) -> Ty<'tcx> { - self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize) - } - - pub fn t_rptr_late_bound(&self, id: u32) -> Ty<'tcx> { - let r = self.re_late_bound_with_debruijn(id, d1()); - self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize) - } - - pub fn t_rptr_late_bound_with_debruijn( - &self, - id: u32, - debruijn: ty::DebruijnIndex, - ) -> Ty<'tcx> { - let r = self.re_late_bound_with_debruijn(id, debruijn); - self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize) - } - - pub fn t_rptr_scope(&self, id: u32) -> Ty<'tcx> { - let r = ty::ReScope(region::Scope { - id: hir::ItemLocalId::from_u32(id), - data: region::ScopeData::Node, - }); - self.infcx - .tcx - .mk_imm_ref(self.infcx.tcx.mk_region(r), self.tcx().types.isize) - } - - pub fn re_free(&self, id: u32) -> ty::Region<'tcx> { - self.infcx.tcx.mk_region(ty::ReFree(ty::FreeRegion { - scope: self.infcx.tcx.hir().local_def_id(ast::CRATE_NODE_ID), - bound_region: ty::BrAnon(id), - })) - } - - pub fn t_rptr_free(&self, id: u32) -> Ty<'tcx> { - let r = self.re_free(id); - self.infcx.tcx.mk_imm_ref(r, self.tcx().types.isize) - } - - pub fn sub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) -> InferResult<'tcx, ()> { - self.infcx - .at(&ObligationCause::dummy(), self.param_env) - .sub(t1, t2) - } - - /// Checks that `t1 <: t2` is true (this may register additional - /// region checks). - pub fn check_sub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) { - match self.sub(t1, t2) { - Ok(InferOk { - obligations, - value: (), - }) => { - // None of these tests should require nested obligations. - assert!(obligations.is_empty()); - } - Err(ref e) => { - panic!("unexpected error computing sub({:?},{:?}): {}", t1, t2, e); - } - } - } - - /// Checks that `t1 <: t2` is false (this may register additional - /// region checks). - pub fn check_not_sub(&self, t1: Ty<'tcx>, t2: Ty<'tcx>) { - match self.sub(t1, t2) { - Err(_) => {} - Ok(_) => { - panic!("unexpected success computing sub({:?},{:?})", t1, t2); - } - } - } -} - -#[test] -fn contravariant_region_ptr_ok() { - test_env(EMPTY_SOURCE_STR, errors(&[]), |mut env| { - env.create_simple_region_hierarchy(); - let t_rptr1 = env.t_rptr_scope(1); - let t_rptr10 = env.t_rptr_scope(10); - env.assert_eq(t_rptr1, t_rptr1); - env.assert_eq(t_rptr10, t_rptr10); - env.make_subtype(t_rptr1, t_rptr10); - }) -} - -#[test] -fn contravariant_region_ptr_err() { - test_env(EMPTY_SOURCE_STR, errors(&["mismatched types"]), |mut env| { - env.create_simple_region_hierarchy(); - let t_rptr1 = env.t_rptr_scope(1); - let t_rptr10 = env.t_rptr_scope(10); - env.assert_eq(t_rptr1, t_rptr1); - env.assert_eq(t_rptr10, t_rptr10); - - // This will cause an error when regions are resolved. - env.make_subtype(t_rptr10, t_rptr1); - }) -} - -#[test] -fn sub_free_bound_false() { - //! Test that: - //! - //! fn(&'a isize) <: for<'b> fn(&'b isize) - //! - //! *does not* hold. - - test_env(EMPTY_SOURCE_STR, errors(&[]), |mut env| { - env.create_simple_region_hierarchy(); - let t_rptr_free1 = env.t_rptr_free(1); - let t_rptr_bound1 = env.t_rptr_late_bound(1); - env.check_not_sub( - env.t_fn(&[t_rptr_free1], env.tcx().types.isize), - env.t_fn(&[t_rptr_bound1], env.tcx().types.isize), - ); - }) -} - -#[test] -fn sub_bound_free_true() { - //! Test that: - //! - //! for<'a> fn(&'a isize) <: fn(&'b isize) - //! - //! *does* hold. - - test_env(EMPTY_SOURCE_STR, errors(&[]), |mut env| { - env.create_simple_region_hierarchy(); - let t_rptr_bound1 = env.t_rptr_late_bound(1); - let t_rptr_free1 = env.t_rptr_free(1); - env.check_sub( - env.t_fn(&[t_rptr_bound1], env.tcx().types.isize), - env.t_fn(&[t_rptr_free1], env.tcx().types.isize), - ); - }) -} - -#[test] -fn sub_free_bound_false_infer() { - //! Test that: - //! - //! fn(_#1) <: for<'b> fn(&'b isize) - //! - //! does NOT hold for any instantiation of `_#1`. - - test_env(EMPTY_SOURCE_STR, errors(&[]), |env| { - let t_infer1 = env.infcx - .next_ty_var(TypeVariableOrigin::MiscVariable(DUMMY_SP)); - let t_rptr_bound1 = env.t_rptr_late_bound(1); - env.check_not_sub( - env.t_fn(&[t_infer1], env.tcx().types.isize), - env.t_fn(&[t_rptr_bound1], env.tcx().types.isize), - ); - }) -} - -/// Test substituting a bound region into a function, which introduces another level of binding. -/// This requires adjusting the Debruijn index. -#[test] -fn subst_ty_renumber_bound() { - test_env(EMPTY_SOURCE_STR, errors(&[]), |env| { - // Situation: - // Theta = [A -> &'a foo] - - let t_rptr_bound1 = env.t_rptr_late_bound(1); - - // t_source = fn(A) - let t_source = { - let t_param = env.t_param(0); - env.t_fn(&[t_param], env.t_nil()) - }; - - let substs = env.infcx.tcx.intern_substs(&[t_rptr_bound1.into()]); - let t_substituted = t_source.subst(env.infcx.tcx, substs); - - // t_expected = fn(&'a isize) - let t_expected = { - let t_ptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, d2()); - env.t_fn(&[t_ptr_bound2], env.t_nil()) - }; - - debug!( - "subst_bound: t_source={:?} substs={:?} t_substituted={:?} t_expected={:?}", - t_source, substs, t_substituted, t_expected - ); - - assert_eq!(t_substituted, t_expected); - }) -} - -/// Test substituting a bound region into a function, which introduces another level of binding. -/// This requires adjusting the Debruijn index. -#[test] -fn subst_ty_renumber_some_bounds() { - test_env(EMPTY_SOURCE_STR, errors(&[]), |env| { - // Situation: - // `Theta = [A -> &'a foo]` - - let t_rptr_bound1 = env.t_rptr_late_bound(1); - - // `t_source = (A, fn(A))` - let t_source = { - let t_param = env.t_param(0); - env.t_pair(t_param, env.t_fn(&[t_param], env.t_nil())) - }; - - let substs = env.infcx.tcx.intern_substs(&[t_rptr_bound1.into()]); - let t_substituted = t_source.subst(env.infcx.tcx, substs); - - // `t_expected = (&'a isize, fn(&'a isize))` - // - // However, note that the Debruijn index is different in the different cases. - let t_expected = { - let t_rptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, d2()); - env.t_pair(t_rptr_bound1, env.t_fn(&[t_rptr_bound2], env.t_nil())) - }; - - debug!( - "subst_bound: t_source={:?} substs={:?} t_substituted={:?} t_expected={:?}", - t_source, substs, t_substituted, t_expected - ); - - assert_eq!(t_substituted, t_expected); - }) -} - -/// Test that we correctly compute whether a type has escaping regions or not. -#[test] -fn escaping() { - test_env(EMPTY_SOURCE_STR, errors(&[]), |mut env| { - // Situation: - // `Theta = [A -> &'a foo]` - env.create_simple_region_hierarchy(); - - assert!(!env.t_nil().has_escaping_bound_vars()); - - let t_rptr_free1 = env.t_rptr_free(1); - assert!(!t_rptr_free1.has_escaping_bound_vars()); - - let t_rptr_bound1 = env.t_rptr_late_bound_with_debruijn(1, d1()); - assert!(t_rptr_bound1.has_escaping_bound_vars()); - - let t_rptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, d2()); - assert!(t_rptr_bound2.has_escaping_bound_vars()); - - // `t_fn = fn(A)` - let t_param = env.t_param(0); - assert!(!t_param.has_escaping_bound_vars()); - let t_fn = env.t_fn(&[t_param], env.t_nil()); - assert!(!t_fn.has_escaping_bound_vars()); - }) -} - -/// Test applying a substitution where the value being substituted for an early-bound region is a -/// late-bound region. -#[test] -fn subst_region_renumber_region() { - test_env(EMPTY_SOURCE_STR, errors(&[]), |env| { - let re_bound1 = env.re_late_bound_with_debruijn(1, d1()); - - // `type t_source<'a> = fn(&'a isize)` - let t_source = { - let re_early = env.re_early_bound(0, "'a"); - env.t_fn(&[env.t_rptr(re_early)], env.t_nil()) - }; - - let substs = env.infcx.tcx.intern_substs(&[re_bound1.into()]); - let t_substituted = t_source.subst(env.infcx.tcx, substs); - - // `t_expected = fn(&'a isize)` - // - // but not that the Debruijn index is different in the different cases. - let t_expected = { - let t_rptr_bound2 = env.t_rptr_late_bound_with_debruijn(1, d2()); - env.t_fn(&[t_rptr_bound2], env.t_nil()) - }; - - debug!( - "subst_bound: t_source={:?} substs={:?} t_substituted={:?} t_expected={:?}", - t_source, substs, t_substituted, t_expected - ); - - assert_eq!(t_substituted, t_expected); - }) -} - -#[test] -fn walk_ty() { - test_env(EMPTY_SOURCE_STR, errors(&[]), |env| { - let tcx = env.infcx.tcx; - let int_ty = tcx.types.isize; - let usize_ty = tcx.types.usize; - let tup1_ty = tcx.intern_tup(&[int_ty, usize_ty, int_ty, usize_ty]); - let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, usize_ty]); - let walked: Vec<_> = tup2_ty.walk().collect(); - assert_eq!( - walked, - [ - tup2_ty, tup1_ty, int_ty, usize_ty, int_ty, usize_ty, tup1_ty, int_ty, usize_ty, - int_ty, usize_ty, usize_ty - ] - ); - }) -} - -#[test] -fn walk_ty_skip_subtree() { - test_env(EMPTY_SOURCE_STR, errors(&[]), |env| { - let tcx = env.infcx.tcx; - let int_ty = tcx.types.isize; - let usize_ty = tcx.types.usize; - let tup1_ty = tcx.intern_tup(&[int_ty, usize_ty, int_ty, usize_ty]); - let tup2_ty = tcx.intern_tup(&[tup1_ty, tup1_ty, usize_ty]); - - // types we expect to see (in order), plus a boolean saying - // whether to skip the subtree. - let mut expected = vec![ - (tup2_ty, false), - (tup1_ty, false), - (int_ty, false), - (usize_ty, false), - (int_ty, false), - (usize_ty, false), - (tup1_ty, true), // skip the isize/usize/isize/usize - (usize_ty, false), - ]; - expected.reverse(); - - let mut walker = tup2_ty.walk(); - while let Some(t) = walker.next() { - debug!("walked to {:?}", t); - let (expected_ty, skip) = expected.pop().unwrap(); - assert_eq!(t, expected_ty); - if skip { - walker.skip_current_subtree(); - } - } - - assert!(expected.is_empty()); - }) -} diff --git a/src/librustc_errors/Cargo.toml b/src/librustc_errors/Cargo.toml index b24f8ddf4d9f7..02c011857bd2a 100644 --- a/src/librustc_errors/Cargo.toml +++ b/src/librustc_errors/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustc_errors" version = "0.0.0" +edition = "2018" [lib] name = "rustc_errors" diff --git a/src/librustc_errors/diagnostic.rs b/src/librustc_errors/diagnostic.rs index ea425ad4c47a2..fc1fd960c4ace 100644 --- a/src/librustc_errors/diagnostic.rs +++ b/src/librustc_errors/diagnostic.rs @@ -1,21 +1,12 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use CodeSuggestion; -use SubstitutionPart; -use Substitution; -use Applicability; -use Level; +use crate::CodeSuggestion; +use crate::SuggestionStyle; +use crate::SubstitutionPart; +use crate::Substitution; +use crate::Applicability; +use crate::Level; +use crate::snippet::Style; use std::fmt; use syntax_pos::{MultiSpan, Span}; -use snippet::Style; #[must_use] #[derive(Clone, Debug, PartialEq, Hash, RustcEncodable, RustcDecodable)] @@ -128,7 +119,7 @@ impl Diagnostic { self.level == Level::Cancelled } - /// Add a span/label to be included in the resulting snippet. + /// Adds a span/label to be included in the resulting snippet. /// This is pushed onto the `MultiSpan` that was created when the /// diagnostic was first built. If you don't call this function at /// all, and you just supplied a `Span` to create the diagnostic, @@ -239,44 +230,72 @@ impl Diagnostic { self } - /// Prints out a message with a suggested edit of the code. If the suggestion is presented - /// inline it will only show the text message and not the text. + pub fn multipart_suggestion( + &mut self, + msg: &str, + suggestion: Vec<(Span, String)>, + applicability: Applicability, + ) -> &mut Self { + self.suggestions.push(CodeSuggestion { + substitutions: vec![Substitution { + parts: suggestion + .into_iter() + .map(|(span, snippet)| SubstitutionPart { snippet, span }) + .collect(), + }], + msg: msg.to_owned(), + style: SuggestionStyle::ShowCode, + applicability, + }); + self + } + + /// Prints out a message with for a multipart suggestion without showing the suggested code. /// - /// See `CodeSuggestion` for more information. - #[deprecated(note = "Use `span_suggestion_short_with_applicability`")] - pub fn span_suggestion_short(&mut self, sp: Span, msg: &str, suggestion: String) -> &mut Self { + /// This is intended to be used for suggestions that are obvious in what the changes need to + /// be from the message, showing the span label inline would be visually unpleasant + /// (marginally overlapping spans or multiline spans) and showing the snippet window wouldn't + /// improve understandability. + pub fn tool_only_multipart_suggestion( + &mut self, + msg: &str, + suggestion: Vec<(Span, String)>, + applicability: Applicability, + ) -> &mut Self { self.suggestions.push(CodeSuggestion { substitutions: vec![Substitution { - parts: vec![SubstitutionPart { - snippet: suggestion, - span: sp, - }], + parts: suggestion + .into_iter() + .map(|(span, snippet)| SubstitutionPart { snippet, span }) + .collect(), }], msg: msg.to_owned(), - show_code_when_inline: false, - applicability: Applicability::Unspecified, + style: SuggestionStyle::CompletelyHidden, + applicability, }); self } /// Prints out a message with a suggested edit of the code. /// - /// In case of short messages and a simple suggestion, - /// rustc displays it as a label like + /// In case of short messages and a simple suggestion, rustc displays it as a label: /// - /// "try adding parentheses: `(tup.0).1`" + /// ```text + /// try adding parentheses: `(tup.0).1` + /// ``` /// /// The message /// /// * should not end in any punctuation (a `:` is added automatically) - /// * should not be a question - /// * should not contain any parts like "the following", "as shown" + /// * should not be a question (avoid language like "did you mean") + /// * should not contain any phrases like "the following", "as shown", etc. /// * may look like "to do xyz, use" or "to do xyz, use abc" - /// * may contain a name of a function, variable or type, but not whole expressions + /// * may contain a name of a function, variable, or type, but not whole expressions /// /// See `CodeSuggestion` for more information. - #[deprecated(note = "Use `span_suggestion_with_applicability`")] - pub fn span_suggestion(&mut self, sp: Span, msg: &str, suggestion: String) -> &mut Self { + pub fn span_suggestion(&mut self, sp: Span, msg: &str, + suggestion: String, + applicability: Applicability) -> &mut Self { self.suggestions.push(CodeSuggestion { substitutions: vec![Substitution { parts: vec![SubstitutionPart { @@ -285,67 +304,37 @@ impl Diagnostic { }], }], msg: msg.to_owned(), - show_code_when_inline: true, - applicability: Applicability::Unspecified, - }); - self - } - - pub fn multipart_suggestion_with_applicability( - &mut self, - msg: &str, - suggestion: Vec<(Span, String)>, - applicability: Applicability, - ) -> &mut Self { - self.suggestions.push(CodeSuggestion { - substitutions: vec![Substitution { - parts: suggestion - .into_iter() - .map(|(span, snippet)| SubstitutionPart { snippet, span }) - .collect(), - }], - msg: msg.to_owned(), - show_code_when_inline: true, + style: SuggestionStyle::ShowCode, applicability, }); self } - #[deprecated(note = "Use `multipart_suggestion_with_applicability`")] - pub fn multipart_suggestion( - &mut self, - msg: &str, - suggestion: Vec<(Span, String)>, - ) -> &mut Self { - self.multipart_suggestion_with_applicability( - msg, - suggestion, - Applicability::Unspecified, - ) - } - /// Prints out a message with multiple suggested edits of the code. - #[deprecated(note = "Use `span_suggestions_with_applicability`")] - pub fn span_suggestions(&mut self, sp: Span, msg: &str, suggestions: Vec) -> &mut Self { + pub fn span_suggestions(&mut self, sp: Span, msg: &str, + suggestions: impl Iterator, applicability: Applicability) -> &mut Self + { self.suggestions.push(CodeSuggestion { - substitutions: suggestions.into_iter().map(|snippet| Substitution { + substitutions: suggestions.map(|snippet| Substitution { parts: vec![SubstitutionPart { snippet, span: sp, }], }).collect(), msg: msg.to_owned(), - show_code_when_inline: true, - applicability: Applicability::Unspecified, + style: SuggestionStyle::ShowCode, + applicability, }); self } - /// This is a suggestion that may contain mistakes or fillers and should - /// be read and understood by a human. - pub fn span_suggestion_with_applicability(&mut self, sp: Span, msg: &str, - suggestion: String, - applicability: Applicability) -> &mut Self { + /// Prints out a message with a suggested edit of the code. If the suggestion is presented + /// inline, it will only show the message and not the suggestion. + /// + /// See `CodeSuggestion` for more information. + pub fn span_suggestion_short( + &mut self, sp: Span, msg: &str, suggestion: String, applicability: Applicability + ) -> &mut Self { self.suggestions.push(CodeSuggestion { substitutions: vec![Substitution { parts: vec![SubstitutionPart { @@ -354,30 +343,40 @@ impl Diagnostic { }], }], msg: msg.to_owned(), - show_code_when_inline: true, + style: SuggestionStyle::HideCodeInline, applicability, }); self } - pub fn span_suggestions_with_applicability(&mut self, sp: Span, msg: &str, - suggestions: impl Iterator, applicability: Applicability) -> &mut Self - { + /// Prints out a message with for a suggestion without showing the suggested code. + /// + /// This is intended to be used for suggestions that are obvious in what the changes need to + /// be from the message, showing the span label inline would be visually unpleasant + /// (marginally overlapping spans or multiline spans) and showing the snippet window wouldn't + /// improve understandability. + pub fn span_suggestion_hidden( + &mut self, sp: Span, msg: &str, suggestion: String, applicability: Applicability + ) -> &mut Self { self.suggestions.push(CodeSuggestion { - substitutions: suggestions.map(|snippet| Substitution { + substitutions: vec![Substitution { parts: vec![SubstitutionPart { - snippet, + snippet: suggestion, span: sp, }], - }).collect(), + }], msg: msg.to_owned(), - show_code_when_inline: true, + style: SuggestionStyle::HideCodeAlways, applicability, }); self } - pub fn span_suggestion_short_with_applicability( + /// Adds a suggestion to the json output, but otherwise remains silent/undisplayed in the cli. + /// + /// This is intended to be used for suggestions that are *very* obvious in what the changes + /// need to be from the message, but we still want other tools to be able to apply them. + pub fn tool_only_span_suggestion( &mut self, sp: Span, msg: &str, suggestion: String, applicability: Applicability ) -> &mut Self { self.suggestions.push(CodeSuggestion { @@ -388,7 +387,7 @@ impl Diagnostic { }], }], msg: msg.to_owned(), - show_code_when_inline: false, + style: SuggestionStyle::CompletelyHidden, applicability: applicability, }); self diff --git a/src/librustc_errors/diagnostic_builder.rs b/src/librustc_errors/diagnostic_builder.rs index 2f16470530e43..c8d47339fb365 100644 --- a/src/librustc_errors/diagnostic_builder.rs +++ b/src/librustc_errors/diagnostic_builder.rs @@ -1,24 +1,15 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use Diagnostic; -use DiagnosticId; -use DiagnosticStyledString; -use Applicability; - -use Level; -use Handler; +use crate::Diagnostic; +use crate::DiagnosticId; +use crate::DiagnosticStyledString; +use crate::Applicability; + +use crate::Level; +use crate::Handler; use std::fmt::{self, Debug}; use std::ops::{Deref, DerefMut}; use std::thread::panicking; use syntax_pos::{MultiSpan, Span}; +use log::debug; /// Used for emitting structured error messages and other diagnostic information. /// @@ -35,7 +26,7 @@ pub struct DiagnosticBuilder<'a> { /// In general, the `DiagnosticBuilder` uses deref to allow access to /// the fields and methods of the embedded `diagnostic` in a -/// transparent way. *However,* many of the methods are intended to +/// transparent way. *However,* many of the methods are intended to /// be used in a chained way, and hence ought to return `self`. In /// that case, we can't just naively forward to the method on the /// `diagnostic`, because the return type would be a `&Diagnostic` @@ -43,18 +34,24 @@ pub struct DiagnosticBuilder<'a> { /// it easy to declare such methods on the builder. macro_rules! forward { // Forward pattern for &self -> &Self - (pub fn $n:ident(&self, $($name:ident: $ty:ty),* $(,)*) -> &Self) => { + ( + $(#[$attrs:meta])* + pub fn $n:ident(&self, $($name:ident: $ty:ty),* $(,)?) -> &Self + ) => { + $(#[$attrs])* pub fn $n(&self, $($name: $ty),*) -> &Self { - #[allow(deprecated)] self.diagnostic.$n($($name),*); self } }; // Forward pattern for &mut self -> &mut Self - (pub fn $n:ident(&mut self, $($name:ident: $ty:ty),* $(,)*) -> &mut Self) => { + ( + $(#[$attrs:meta])* + pub fn $n:ident(&mut self, $($name:ident: $ty:ty),* $(,)?) -> &mut Self + ) => { + $(#[$attrs])* pub fn $n(&mut self, $($name: $ty),*) -> &mut Self { - #[allow(deprecated)] self.diagnostic.$n($($name),*); self } @@ -62,12 +59,16 @@ macro_rules! forward { // Forward pattern for &mut self -> &mut Self, with S: Into // type parameter. No obvious way to make this more generic. - (pub fn $n:ident>( - &mut self, - $($name:ident: $ty:ty),* - $(,)*) -> &mut Self) => { + ( + $(#[$attrs:meta])* + pub fn $n:ident>( + &mut self, + $($name:ident: $ty:ty),* + $(,)? + ) -> &mut Self + ) => { + $(#[$attrs])* pub fn $n>(&mut self, $($name: $ty),*) -> &mut Self { - #[allow(deprecated)] self.diagnostic.$n($($name),*); self } @@ -102,7 +103,9 @@ impl<'a> DiagnosticBuilder<'a> { /// Buffers the diagnostic for later emission, unless handler /// has disabled such buffering. pub fn buffer(mut self, buffered_diagnostics: &mut Vec) { - if self.handler.flags.dont_buffer_diagnostics || self.handler.flags.treat_err_as_bug { + if self.handler.flags.dont_buffer_diagnostics || + self.handler.flags.treat_err_as_bug.is_some() + { self.emit(); return; } @@ -111,8 +114,8 @@ impl<'a> DiagnosticBuilder<'a> { // implements `Drop`. let diagnostic; unsafe { - diagnostic = ::std::ptr::read(&self.diagnostic); - ::std::mem::forget(self); + diagnostic = std::ptr::read(&self.diagnostic); + std::mem::forget(self); }; // Logging here is useful to help track down where in logs an error was // actually emitted. @@ -149,7 +152,7 @@ impl<'a> DiagnosticBuilder<'a> { self.cancel(); } - /// Add a span/label to be included in the resulting snippet. + /// Adds a span/label to be included in the resulting snippet. /// This is pushed onto the `MultiSpan` that was created when the /// diagnostic was first built. If you don't call this function at /// all, and you just supplied a `Span` to create the diagnostic, @@ -187,44 +190,33 @@ impl<'a> DiagnosticBuilder<'a> { msg: &str, ) -> &mut Self); - #[deprecated(note = "Use `span_suggestion_short_with_applicability`")] - forward!(pub fn span_suggestion_short( - &mut self, - sp: Span, - msg: &str, - suggestion: String, - ) -> &mut Self); + pub fn multipart_suggestion( + &mut self, + msg: &str, + suggestion: Vec<(Span, String)>, + applicability: Applicability, + ) -> &mut Self { + if !self.allow_suggestions { + return self + } + self.diagnostic.multipart_suggestion( + msg, + suggestion, + applicability, + ); + self + } - #[deprecated(note = "Use `multipart_suggestion_with_applicability`")] - forward!(pub fn multipart_suggestion( + pub fn tool_only_multipart_suggestion( &mut self, msg: &str, suggestion: Vec<(Span, String)>, - ) -> &mut Self); - - #[deprecated(note = "Use `span_suggestion_with_applicability`")] - forward!(pub fn span_suggestion(&mut self, - sp: Span, - msg: &str, - suggestion: String, - ) -> &mut Self); - - #[deprecated(note = "Use `span_suggestions_with_applicability`")] - forward!(pub fn span_suggestions(&mut self, - sp: Span, - msg: &str, - suggestions: Vec, - ) -> &mut Self); - - pub fn multipart_suggestion_with_applicability(&mut self, - msg: &str, - suggestion: Vec<(Span, String)>, - applicability: Applicability, - ) -> &mut Self { + applicability: Applicability, + ) -> &mut Self { if !self.allow_suggestions { return self } - self.diagnostic.multipart_suggestion_with_applicability( + self.diagnostic.tool_only_multipart_suggestion( msg, suggestion, applicability, @@ -232,16 +224,18 @@ impl<'a> DiagnosticBuilder<'a> { self } - pub fn span_suggestion_with_applicability(&mut self, - sp: Span, - msg: &str, - suggestion: String, - applicability: Applicability) - -> &mut Self { + + pub fn span_suggestion( + &mut self, + sp: Span, + msg: &str, + suggestion: String, + applicability: Applicability, + ) -> &mut Self { if !self.allow_suggestions { return self } - self.diagnostic.span_suggestion_with_applicability( + self.diagnostic.span_suggestion( sp, msg, suggestion, @@ -250,16 +244,17 @@ impl<'a> DiagnosticBuilder<'a> { self } - pub fn span_suggestions_with_applicability(&mut self, - sp: Span, - msg: &str, - suggestions: impl Iterator, - applicability: Applicability) - -> &mut Self { + pub fn span_suggestions( + &mut self, + sp: Span, + msg: &str, + suggestions: impl Iterator, + applicability: Applicability, + ) -> &mut Self { if !self.allow_suggestions { return self } - self.diagnostic.span_suggestions_with_applicability( + self.diagnostic.span_suggestions( sp, msg, suggestions, @@ -268,16 +263,36 @@ impl<'a> DiagnosticBuilder<'a> { self } - pub fn span_suggestion_short_with_applicability(&mut self, - sp: Span, - msg: &str, - suggestion: String, - applicability: Applicability) - -> &mut Self { + pub fn span_suggestion_short( + &mut self, + sp: Span, + msg: &str, + suggestion: String, + applicability: Applicability, + ) -> &mut Self { + if !self.allow_suggestions { + return self + } + self.diagnostic.span_suggestion_short( + sp, + msg, + suggestion, + applicability, + ); + self + } + + pub fn span_suggestion_hidden( + &mut self, + sp: Span, + msg: &str, + suggestion: String, + applicability: Applicability, + ) -> &mut Self { if !self.allow_suggestions { return self } - self.diagnostic.span_suggestion_short_with_applicability( + self.diagnostic.span_suggestion_hidden( sp, msg, suggestion, @@ -285,6 +300,26 @@ impl<'a> DiagnosticBuilder<'a> { ); self } + + pub fn tool_only_span_suggestion( + &mut self, + sp: Span, + msg: &str, + suggestion: String, + applicability: Applicability, + ) -> &mut Self { + if !self.allow_suggestions { + return self + } + self.diagnostic.tool_only_span_suggestion( + sp, + msg, + suggestion, + applicability, + ); + self + } + forward!(pub fn set_span>(&mut self, sp: S) -> &mut Self); forward!(pub fn code(&mut self, s: DiagnosticId) -> &mut Self); @@ -323,7 +358,7 @@ impl<'a> DiagnosticBuilder<'a> { } impl<'a> Debug for DiagnosticBuilder<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.diagnostic.fmt(f) } } diff --git a/src/librustc_errors/emitter.rs b/src/librustc_errors/emitter.rs index 7bd0f0f8fc469..ee2a1b69cbd01 100644 --- a/src/librustc_errors/emitter.rs +++ b/src/librustc_errors/emitter.rs @@ -1,47 +1,39 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use self::Destination::*; +use Destination::*; use syntax_pos::{SourceFile, Span, MultiSpan}; -use {Level, CodeSuggestion, DiagnosticBuilder, SubDiagnostic, SourceMapperDyn, DiagnosticId}; -use snippet::{Annotation, AnnotationType, Line, MultilineAnnotation, StyledString, Style}; -use styled_buffer::StyledBuffer; +use crate::{ + Level, CodeSuggestion, DiagnosticBuilder, SubDiagnostic, + SuggestionStyle, SourceMapperDyn, DiagnosticId, +}; +use crate::Level::Error; +use crate::snippet::{Annotation, AnnotationType, Line, MultilineAnnotation, StyledString, Style}; +use crate::styled_buffer::StyledBuffer; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::Lrc; -use atty; use std::borrow::Cow; use std::io::prelude::*; use std::io; use std::cmp::{min, Reverse}; use termcolor::{StandardStream, ColorChoice, ColorSpec, BufferWriter}; use termcolor::{WriteColor, Color, Buffer}; -use unicode_width; const ANONYMIZED_LINE_NUM: &str = "LL"; /// Emitter trait for emitting errors. pub trait Emitter { /// Emit a structured diagnostic. - fn emit(&mut self, db: &DiagnosticBuilder); + fn emit(&mut self, db: &DiagnosticBuilder<'_>); - /// Check if should show explanations about "rustc --explain" + /// Checks if should show explanations about "rustc --explain" fn should_show_explain(&self) -> bool { true } } impl Emitter for EmitterWriter { - fn emit(&mut self, db: &DiagnosticBuilder) { + fn emit(&mut self, db: &DiagnosticBuilder<'_>) { let mut primary_span = db.span.clone(); let mut children = db.children.clone(); let mut suggestions: &[_] = &[]; @@ -55,9 +47,14 @@ impl Emitter for EmitterWriter { // don't display long messages as labels sugg.msg.split_whitespace().count() < 10 && // don't display multiline suggestions as labels - !sugg.substitutions[0].parts[0].snippet.contains('\n') { + !sugg.substitutions[0].parts[0].snippet.contains('\n') && + // when this style is set we want the suggestion to be a message, not inline + sugg.style != SuggestionStyle::HideCodeAlways && + // trivial suggestion for tooling's sake, never shown + sugg.style != SuggestionStyle::CompletelyHidden + { let substitution = &sugg.substitutions[0].parts[0].snippet.trim(); - let msg = if substitution.len() == 0 || !sugg.show_code_when_inline { + let msg = if substitution.len() == 0 || sugg.style.hide_inline() { // This substitution is only removal or we explicitly don't want to show the // code inline, don't show it format!("help: {}", sugg.msg) @@ -76,6 +73,7 @@ impl Emitter for EmitterWriter { self.fix_multispans_in_std_macros(&mut primary_span, &mut children, + &db.level, db.handler.flags.external_macro_backtrace); self.emit_messages_default(&db.level, @@ -247,6 +245,7 @@ impl EmitterWriter { end_col: hi.col_display, is_primary: span_label.is_primary, label: span_label.label.clone(), + overlaps_exactly: false, }; multiline_annotations.push((lo.file.clone(), ml.clone())); AnnotationType::Multiline(ml) @@ -262,10 +261,7 @@ impl EmitterWriter { }; if !ann.is_multiline() { - add_annotation_to_file(&mut output, - lo.file, - lo.line, - ann); + add_annotation_to_file(&mut output, lo.file, lo.line, ann); } } } @@ -278,10 +274,12 @@ impl EmitterWriter { let ref mut a = item.1; // Move all other multiline annotations overlapping with this one // one level to the right. - if &ann != a && + if !(ann.same_span(a)) && num_overlap(ann.line_start, ann.line_end, a.line_start, a.line_end, true) { a.increase_depth(); + } else if ann.same_span(a) && &ann != a { + a.overlaps_exactly = true; } else { break; } @@ -293,17 +291,49 @@ impl EmitterWriter { if ann.depth > max_depth { max_depth = ann.depth; } - add_annotation_to_file(&mut output, file.clone(), ann.line_start, ann.as_start()); - let middle = min(ann.line_start + 4, ann.line_end); - for line in ann.line_start + 1..middle { - add_annotation_to_file(&mut output, file.clone(), line, ann.as_line()); - } - if middle < ann.line_end - 1 { - for line in ann.line_end - 1..ann.line_end { + let mut end_ann = ann.as_end(); + if !ann.overlaps_exactly { + // avoid output like + // + // | foo( + // | _____^ + // | |_____| + // | || bar, + // | || ); + // | || ^ + // | ||______| + // | |______foo + // | baz + // + // and instead get + // + // | foo( + // | _____^ + // | | bar, + // | | ); + // | | ^ + // | | | + // | |______foo + // | baz + add_annotation_to_file(&mut output, file.clone(), ann.line_start, ann.as_start()); + // 4 is the minimum vertical length of a multiline span when presented: two lines + // of code and two lines of underline. This is not true for the special case where + // the beginning doesn't have an underline, but the current logic seems to be + // working correctly. + let middle = min(ann.line_start + 4, ann.line_end); + for line in ann.line_start + 1..middle { + // Every `|` that joins the beginning of the span (`___^`) to the end (`|__^`). add_annotation_to_file(&mut output, file.clone(), line, ann.as_line()); } + if middle < ann.line_end - 1 { + for line in ann.line_end - 1..ann.line_end { + add_annotation_to_file(&mut output, file.clone(), line, ann.as_line()); + } + } + } else { + end_ann.annotation_type = AnnotationType::Singleline; } - add_annotation_to_file(&mut output, file, ann.line_end, ann.as_end()); + add_annotation_to_file(&mut output, file, ann.line_end, end_ann); } for file_vec in output.iter_mut() { file_vec.multiline_depth = max_depth; @@ -684,8 +714,8 @@ impl EmitterWriter { // | | something about `foo` // | something about `fn foo()` annotations_position.sort_by(|a, b| { - // Decreasing order - a.1.len().cmp(&b.1.len()).reverse() + // Decreasing order. When `a` and `b` are the same length, prefer `Primary`. + (a.1.len(), !a.1.is_primary).cmp(&(b.1.len(), !b.1.is_primary)).reverse() }); // Write the underlines. @@ -860,18 +890,27 @@ impl EmitterWriter { fn fix_multispans_in_std_macros(&mut self, span: &mut MultiSpan, children: &mut Vec, + level: &Level, backtrace: bool) { let mut spans_updated = self.fix_multispan_in_std_macros(span, backtrace); for child in children.iter_mut() { spans_updated |= self.fix_multispan_in_std_macros(&mut child.span, backtrace); } + let msg = if level == &Error { + "this error originates in a macro outside of the current crate \ + (in Nightly builds, run with -Z external-macro-backtrace \ + for more info)".to_string() + } else { + "this warning originates in a macro outside of the current crate \ + (in Nightly builds, run with -Z external-macro-backtrace \ + for more info)".to_string() + }; + if spans_updated { children.push(SubDiagnostic { level: Level::Note, message: vec![ - ("this error originates in a macro outside of the current crate \ - (in Nightly builds, run with -Z external-macro-backtrace \ - for more info)".to_string(), + (msg, Style::NoStyle), ], span: MultiSpan::new(), @@ -880,7 +919,7 @@ impl EmitterWriter { } } - /// Add a left margin to every line but the first, given a padding length and the label being + /// Adds a left margin to every line but the first, given a padding length and the label being /// displayed, keeping the provided highlighting. fn msg_to_buffer(&self, buffer: &mut StyledBuffer, @@ -907,7 +946,7 @@ impl EmitterWriter { // `max_line_num_len` let padding = " ".repeat(padding + label.len() + 5); - /// Return whether `style`, or the override if present and the style is `NoStyle`. + /// Returns `true` if `style`, or the override if present and the style is `NoStyle`. fn style_or_override(style: Style, override_style: Option\n\n").unwrap(); + trace::write_traces(&mut html_file, &mut counts_file, &frame.traces); + writeln!(html_file, "\n").unwrap(); + + let ack_path = format!("{}.ack", params.path); + let ack_file = File::create(&ack_path).unwrap(); + drop(ack_file); + + // Tell main thread that we are done, e.g., so it can exit + params.ack.send(()).unwrap(); + } + // Actual query message: + msg => { + // Record msg in our log + profq_msgs.push(msg.clone()); + // Respond to the message, knowing that we've already handled Halt and Dump, above. + match (frame.parse_st.clone(), msg) { + (_, ProfileQueriesMsg::Halt) | (_, ProfileQueriesMsg::Dump(_)) => { + unreachable!(); + }, + // Parse State: Clear + (ParseState::Clear, + ProfileQueriesMsg::QueryBegin(span, querymsg)) => { + let start = Instant::now(); + frame.parse_st = ParseState::HaveQuery + (Query { span, msg: querymsg }, start) + }, + (ParseState::Clear, + ProfileQueriesMsg::CacheHit) => { + panic!("parse error: unexpected CacheHit; expected QueryBegin") + }, + (ParseState::Clear, + ProfileQueriesMsg::ProviderBegin) => { + panic!("parse error: expected QueryBegin before beginning a provider") + }, + (ParseState::Clear, + ProfileQueriesMsg::ProviderEnd) => { + let provider_extent = frame.traces; + match stack.pop() { + None => + panic!("parse error: expected a stack frame; found an empty stack"), + Some(old_frame) => { + match old_frame.parse_st { + ParseState::HaveQuery(q, start) => { + let duration = start.elapsed(); + frame = StackFrame{ + parse_st: ParseState::Clear, + traces: old_frame.traces + }; + let dur_extent = total_duration(&provider_extent); + let trace = Rec { + effect: Effect::QueryBegin(q, CacheCase::Miss), + extent: Box::new(provider_extent), + start: start, + dur_self: duration - dur_extent, + dur_total: duration, + }; + frame.traces.push( trace ); + }, + _ => panic!("internal parse error: malformed parse stack") + } + } + } + }, + (ParseState::Clear, + ProfileQueriesMsg::TimeBegin(msg)) => { + let start = Instant::now(); + frame.parse_st = ParseState::HaveTimeBegin(msg, start); + stack.push(frame); + frame = StackFrame{parse_st: ParseState::Clear, traces: vec![]}; + }, + (_, ProfileQueriesMsg::TimeBegin(_)) => { + panic!("parse error; did not expect time begin here"); + }, + (ParseState::Clear, + ProfileQueriesMsg::TimeEnd) => { + let provider_extent = frame.traces; + match stack.pop() { + None => + panic!("parse error: expected a stack frame; found an empty stack"), + Some(old_frame) => { + match old_frame.parse_st { + ParseState::HaveTimeBegin(msg, start) => { + let duration = start.elapsed(); + frame = StackFrame{ + parse_st: ParseState::Clear, + traces: old_frame.traces + }; + let dur_extent = total_duration(&provider_extent); + let trace = Rec { + effect: Effect::TimeBegin(msg), + extent: Box::new(provider_extent), + start: start, + dur_total: duration, + dur_self: duration - dur_extent, + }; + frame.traces.push( trace ); + }, + _ => panic!("internal parse error: malformed parse stack") + } + } + } + }, + (_, ProfileQueriesMsg::TimeEnd) => { + panic!("parse error") + }, + (ParseState::Clear, + ProfileQueriesMsg::TaskBegin(key)) => { + let start = Instant::now(); + frame.parse_st = ParseState::HaveTaskBegin(key, start); + stack.push(frame); + frame = StackFrame{ parse_st: ParseState::Clear, traces: vec![] }; + }, + (_, ProfileQueriesMsg::TaskBegin(_)) => { + panic!("parse error; did not expect time begin here"); + }, + (ParseState::Clear, + ProfileQueriesMsg::TaskEnd) => { + let provider_extent = frame.traces; + match stack.pop() { + None => + panic!("parse error: expected a stack frame; found an empty stack"), + Some(old_frame) => { + match old_frame.parse_st { + ParseState::HaveTaskBegin(key, start) => { + let duration = start.elapsed(); + frame = StackFrame{ + parse_st: ParseState::Clear, + traces: old_frame.traces + }; + let dur_extent = total_duration(&provider_extent); + let trace = Rec { + effect: Effect::TaskBegin(key), + extent: Box::new(provider_extent), + start: start, + dur_total: duration, + dur_self: duration - dur_extent, + }; + frame.traces.push( trace ); + }, + _ => panic!("internal parse error: malformed parse stack") + } + } + } + }, + (_, ProfileQueriesMsg::TaskEnd) => { + panic!("parse error") + }, + // Parse State: HaveQuery + (ParseState::HaveQuery(q,start), + ProfileQueriesMsg::CacheHit) => { + let duration = start.elapsed(); + let trace : Rec = Rec{ + effect: Effect::QueryBegin(q, CacheCase::Hit), + extent: Box::new(vec![]), + start: start, + dur_self: duration, + dur_total: duration, + }; + frame.traces.push( trace ); + frame.parse_st = ParseState::Clear; + }, + (ParseState::HaveQuery(_, _), + ProfileQueriesMsg::ProviderBegin) => { + stack.push(frame); + frame = StackFrame{ parse_st: ParseState::Clear, traces: vec![] }; + }, + + // Parse errors: + + (ParseState::HaveQuery(q, _), + ProfileQueriesMsg::ProviderEnd) => { + panic!("parse error: unexpected ProviderEnd; \ + expected something else to follow BeginQuery for {:?}", q) + }, + (ParseState::HaveQuery(q1, _), + ProfileQueriesMsg::QueryBegin(span2, querymsg2)) => { + panic!("parse error: unexpected QueryBegin; \ + earlier query is unfinished: {:?} and now {:?}", + q1, Query{span:span2, msg: querymsg2}) + }, + (ParseState::HaveTimeBegin(_, _), _) => { + unreachable!() + }, + (ParseState::HaveTaskBegin(_, _), _) => { + unreachable!() + }, + } + } + } + } +} diff --git a/src/librustc_driver/profile/trace.rs b/src/librustc_interface/profile/trace.rs similarity index 94% rename from src/librustc_driver/profile/trace.rs rename to src/librustc_interface/profile/trace.rs index 9589ae2a8dbe0..95c4ea6ff2347 100644 --- a/src/librustc_driver/profile/trace.rs +++ b/src/librustc_interface/profile/trace.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::*; use syntax_pos::SpanData; use rustc_data_structures::fx::FxHashMap; diff --git a/src/librustc_interface/queries.rs b/src/librustc_interface/queries.rs new file mode 100644 index 0000000000000..570509ffb2b8c --- /dev/null +++ b/src/librustc_interface/queries.rs @@ -0,0 +1,303 @@ +use crate::interface::{Compiler, Result}; +use crate::passes::{self, BoxedResolver, ExpansionResult, BoxedGlobalCtxt, PluginInfo}; + +use rustc_incremental::DepGraphFuture; +use rustc_data_structures::sync::Lrc; +use rustc::session::config::{Input, OutputFilenames, OutputType}; +use rustc::session::Session; +use rustc::util::common::{time, ErrorReported}; +use rustc::util::profiling::ProfileCategory; +use rustc::lint; +use rustc::hir; +use rustc::hir::def_id::LOCAL_CRATE; +use rustc::ty; +use rustc::ty::steal::Steal; +use rustc::dep_graph::DepGraph; +use rustc_passes::hir_stats; +use rustc_plugin::registry::Registry; +use serialize::json; +use std::cell::{Ref, RefMut, RefCell}; +use std::ops::Deref; +use std::rc::Rc; +use std::sync::mpsc; +use std::any::Any; +use std::mem; +use syntax::parse::{self, PResult}; +use syntax::util::node_count::NodeCounter; +use syntax::{self, ast, attr, diagnostics, visit}; +use syntax_pos::hygiene; + +/// Represent the result of a query. +/// This result can be stolen with the `take` method and returned with the `give` method. +pub struct Query { + result: RefCell>>, +} + +impl Query { + fn compute Result>(&self, f: F) -> Result<&Query> { + let mut result = self.result.borrow_mut(); + if result.is_none() { + *result = Some(f()); + } + result.as_ref().unwrap().as_ref().map(|_| self).map_err(|err| *err) + } + + /// Takes ownership of the query result. Further attempts to take or peek the query + /// result will panic unless it is returned by calling the `give` method. + pub fn take(&self) -> T { + self.result + .borrow_mut() + .take() + .expect("missing query result") + .unwrap() + } + + /// Returns a stolen query result. Panics if there's already a result. + pub fn give(&self, value: T) { + let mut result = self.result.borrow_mut(); + assert!(result.is_none(), "a result already exists"); + *result = Some(Ok(value)); + } + + /// Borrows the query result using the RefCell. Panics if the result is stolen. + pub fn peek(&self) -> Ref<'_, T> { + Ref::map(self.result.borrow(), |r| { + r.as_ref().unwrap().as_ref().expect("missing query result") + }) + } + + /// Mutably borrows the query result using the RefCell. Panics if the result is stolen. + pub fn peek_mut(&self) -> RefMut<'_, T> { + RefMut::map(self.result.borrow_mut(), |r| { + r.as_mut().unwrap().as_mut().expect("missing query result") + }) + } +} + +impl Default for Query { + fn default() -> Self { + Query { + result: RefCell::new(None), + } + } +} + +#[derive(Default)] +pub(crate) struct Queries { + dep_graph_future: Query>, + parse: Query, + crate_name: Query, + register_plugins: Query<(ast::Crate, PluginInfo)>, + expansion: Query<(ast::Crate, Rc>>)>, + dep_graph: Query, + lower_to_hir: Query<(Steal, ExpansionResult)>, + prepare_outputs: Query, + codegen_channel: Query<(Steal>>, + Steal>>)>, + global_ctxt: Query, + ongoing_codegen: Query>, + link: Query<()>, +} + +impl Compiler { + pub fn dep_graph_future(&self) -> Result<&Query>> { + self.queries.dep_graph_future.compute(|| { + Ok(if self.session().opts.build_dep_graph() { + Some(rustc_incremental::load_dep_graph(self.session())) + } else { + None + }) + }) + } + + pub fn parse(&self) -> Result<&Query> { + self.queries.parse.compute(|| { + passes::parse(self.session(), &self.input).map_err( + |mut parse_error| { + parse_error.emit(); + ErrorReported + }, + ) + }) + } + + pub fn register_plugins(&self) -> Result<&Query<(ast::Crate, PluginInfo)>> { + self.queries.register_plugins.compute(|| { + let crate_name = self.crate_name()?.peek().clone(); + let krate = self.parse()?.take(); + + passes::register_plugins( + self, + self.session(), + self.cstore(), + krate, + &crate_name, + ) + }) + } + + pub fn crate_name(&self) -> Result<&Query> { + self.queries.crate_name.compute(|| { + let parse_result = self.parse()?; + let krate = parse_result.peek(); + let result = match self.crate_name { + Some(ref crate_name) => crate_name.clone(), + None => rustc_codegen_utils::link::find_crate_name( + Some(self.session()), + &krate.attrs, + &self.input + ), + }; + Ok(result) + }) + } + + pub fn expansion( + &self + ) -> Result<&Query<(ast::Crate, Rc>>)>> { + self.queries.expansion.compute(|| { + let crate_name = self.crate_name()?.peek().clone(); + let (krate, plugin_info) = self.register_plugins()?.take(); + passes::configure_and_expand( + self.sess.clone(), + self.cstore().clone(), + krate, + &crate_name, + plugin_info, + ).map(|(krate, resolver)| (krate, Rc::new(Some(RefCell::new(resolver))))) + }) + } + + pub fn dep_graph(&self) -> Result<&Query> { + self.queries.dep_graph.compute(|| { + Ok(match self.dep_graph_future()?.take() { + None => DepGraph::new_disabled(), + Some(future) => { + let (prev_graph, prev_work_products) = + time(self.session(), "blocked while dep-graph loading finishes", || { + future.open().unwrap_or_else(|e| rustc_incremental::LoadResult::Error { + message: format!("could not decode incremental cache: {:?}", e), + }).open(self.session()) + }); + DepGraph::new(prev_graph, prev_work_products) + } + }) + }) + } + + pub fn lower_to_hir(&self) -> Result<&Query<(Steal, ExpansionResult)>> { + self.queries.lower_to_hir.compute(|| { + let expansion_result = self.expansion()?; + let (krate, resolver) = expansion_result.take(); + let resolver_ref = &*resolver; + let hir = Steal::new(resolver_ref.as_ref().unwrap().borrow_mut().access(|resolver| { + passes::lower_to_hir( + self.session(), + self.cstore(), + resolver, + &*self.dep_graph()?.peek(), + &krate + ) + })?); + expansion_result.give((krate, Rc::new(None))); + Ok((hir, BoxedResolver::to_expansion_result(resolver))) + }) + } + + pub fn prepare_outputs(&self) -> Result<&Query> { + self.queries.prepare_outputs.compute(|| { + self.lower_to_hir()?; + let krate = self.expansion()?; + let krate = krate.peek(); + let crate_name = self.crate_name()?; + let crate_name = crate_name.peek(); + passes::prepare_outputs(self.session(), self, &krate.0, &*crate_name) + }) + } + + pub fn codegen_channel(&self) -> Result<&Query<(Steal>>, + Steal>>)>> { + self.queries.codegen_channel.compute(|| { + let (tx, rx) = mpsc::channel(); + Ok((Steal::new(tx), Steal::new(rx))) + }) + } + + pub fn global_ctxt(&self) -> Result<&Query> { + self.queries.global_ctxt.compute(|| { + let crate_name = self.crate_name()?.peek().clone(); + let outputs = self.prepare_outputs()?.peek().clone(); + let hir = self.lower_to_hir()?; + let hir = hir.peek(); + let (ref hir_forest, ref expansion) = *hir; + let tx = self.codegen_channel()?.peek().0.steal(); + Ok(passes::create_global_ctxt( + self, + hir_forest.steal(), + expansion.defs.steal(), + expansion.resolutions.steal(), + outputs, + tx, + &crate_name)) + }) + } + + pub fn ongoing_codegen(&self) -> Result<&Query>> { + self.queries.ongoing_codegen.compute(|| { + let rx = self.codegen_channel()?.peek().1.steal(); + let outputs = self.prepare_outputs()?; + self.global_ctxt()?.peek_mut().enter(|tcx| { + tcx.analysis(LOCAL_CRATE).ok(); + + // Don't do code generation if there were any errors + self.session().compile_status()?; + + Ok(passes::start_codegen( + &***self.codegen_backend(), + tcx, + rx, + &*outputs.peek() + )) + }) + }) + } + + pub fn link(&self) -> Result<&Query<()>> { + self.queries.link.compute(|| { + let sess = self.session(); + + let ongoing_codegen = self.ongoing_codegen()?.take(); + + self.codegen_backend().join_codegen_and_link( + ongoing_codegen, + sess, + &*self.dep_graph()?.peek(), + &*self.prepare_outputs()?.peek(), + ).map_err(|_| ErrorReported)?; + + Ok(()) + }) + } + + pub fn compile(&self) -> Result<()> { + self.prepare_outputs()?; + + if self.session().opts.output_types.contains_key(&OutputType::DepInfo) + && self.session().opts.output_types.len() == 1 + { + return Ok(()) + } + + self.global_ctxt()?; + + // Drop AST after creating GlobalCtxt to free memory + mem::drop(self.expansion()?.take()); + + self.ongoing_codegen()?; + + // Drop GlobalCtxt after starting codegen to free memory + mem::drop(self.global_ctxt()?.take()); + + self.link().map(|_| ()) + } +} diff --git a/src/librustc_interface/util.rs b/src/librustc_interface/util.rs new file mode 100644 index 0000000000000..17523aedffb58 --- /dev/null +++ b/src/librustc_interface/util.rs @@ -0,0 +1,859 @@ +use log::info; +use rustc::session::config::{Input, OutputFilenames, ErrorOutputType}; +use rustc::session::{self, config, early_error, filesearch, Session, DiagnosticOutput}; +use rustc::session::CrateDisambiguator; +use rustc::ty; +use rustc::lint; +use rustc_codegen_utils::codegen_backend::CodegenBackend; +#[cfg(parallel_compiler)] +use rustc_data_structures::jobserver; +use rustc_data_structures::sync::{Lock, Lrc}; +use rustc_data_structures::stable_hasher::StableHasher; +use rustc_data_structures::fingerprint::Fingerprint; +use rustc_data_structures::thin_vec::ThinVec; +use rustc_data_structures::fx::{FxHashSet, FxHashMap}; +use rustc_errors::registry::Registry; +use rustc_lint; +use rustc_metadata::dynamic_lib::DynamicLibrary; +use rustc_mir; +use rustc_passes; +use rustc_plugin; +use rustc_privacy; +use rustc_resolve; +use rustc_typeck; +use std::env; +use std::env::consts::{DLL_PREFIX, DLL_SUFFIX}; +use std::io::{self, Write}; +use std::mem; +use std::path::{Path, PathBuf}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, Mutex, Once}; +use std::ops::DerefMut; +use smallvec::SmallVec; +use syntax::ptr::P; +use syntax::mut_visit::{*, MutVisitor, visit_clobber}; +use syntax::ast::BlockCheckMode; +use syntax::util::lev_distance::find_best_match_for_name; +use syntax::source_map::{FileLoader, RealFileLoader, SourceMap}; +use syntax::symbol::Symbol; +use syntax::{self, ast, attr}; +#[cfg(not(parallel_compiler))] +use std::{thread, panic}; + +pub fn diagnostics_registry() -> Registry { + let mut all_errors = Vec::new(); + all_errors.extend_from_slice(&rustc::DIAGNOSTICS); + all_errors.extend_from_slice(&rustc_typeck::DIAGNOSTICS); + all_errors.extend_from_slice(&rustc_resolve::DIAGNOSTICS); + all_errors.extend_from_slice(&rustc_privacy::DIAGNOSTICS); + // FIXME: need to figure out a way to get these back in here + // all_errors.extend_from_slice(get_codegen_backend(sess).diagnostics()); + all_errors.extend_from_slice(&rustc_metadata::DIAGNOSTICS); + all_errors.extend_from_slice(&rustc_passes::DIAGNOSTICS); + all_errors.extend_from_slice(&rustc_plugin::DIAGNOSTICS); + all_errors.extend_from_slice(&rustc_mir::DIAGNOSTICS); + all_errors.extend_from_slice(&syntax::DIAGNOSTICS); + + Registry::new(&all_errors) +} + +/// Adds `target_feature = "..."` cfgs for a variety of platform +/// specific features (SSE, NEON etc.). +/// +/// This is performed by checking whether a whitelisted set of +/// features is available on the target machine, by querying LLVM. +pub fn add_configuration( + cfg: &mut ast::CrateConfig, + sess: &Session, + codegen_backend: &dyn CodegenBackend, +) { + let tf = Symbol::intern("target_feature"); + + cfg.extend( + codegen_backend + .target_features(sess) + .into_iter() + .map(|feat| (tf, Some(feat))), + ); + + if sess.crt_static_feature() { + cfg.insert((tf, Some(Symbol::intern("crt-static")))); + } +} + +pub fn create_session( + sopts: config::Options, + cfg: FxHashSet<(String, Option)>, + diagnostic_output: DiagnosticOutput, + file_loader: Option>, + input_path: Option, + lint_caps: FxHashMap, +) -> (Lrc, Lrc>, Lrc) { + let descriptions = diagnostics_registry(); + + let loader = file_loader.unwrap_or(box RealFileLoader); + let source_map = Lrc::new(SourceMap::with_file_loader( + loader, + sopts.file_path_mapping(), + )); + let mut sess = session::build_session_with_source_map( + sopts, + input_path, + descriptions, + source_map.clone(), + diagnostic_output, + lint_caps, + ); + + let codegen_backend = get_codegen_backend(&sess); + + rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); + if sess.unstable_options() { + rustc_lint::register_internals(&mut sess.lint_store.borrow_mut(), Some(&sess)); + } + + let mut cfg = config::build_configuration(&sess, config::to_crate_config(cfg)); + add_configuration(&mut cfg, &sess, &*codegen_backend); + sess.parse_sess.config = cfg; + + (Lrc::new(sess), Lrc::new(codegen_backend), source_map) +} + +// Temporarily have stack size set to 32MB to deal with various crates with long method +// chains or deep syntax trees. +// FIXME(oli-obk): get https://github.com/rust-lang/rust/pull/55617 the finish line +const STACK_SIZE: usize = 32 * 1024 * 1024; // 32MB + +fn get_stack_size() -> Option { + // FIXME: Hacks on hacks. If the env is trying to override the stack size + // then *don't* set it explicitly. + if env::var_os("RUST_MIN_STACK").is_none() { + Some(STACK_SIZE) + } else { + None + } +} + +struct Sink(Arc>>); +impl Write for Sink { + fn write(&mut self, data: &[u8]) -> io::Result { + Write::write(&mut *self.0.lock().unwrap(), data) + } + fn flush(&mut self) -> io::Result<()> { Ok(()) } +} + +#[cfg(not(parallel_compiler))] +pub fn scoped_thread R + Send, R: Send>(cfg: thread::Builder, f: F) -> R { + struct Ptr(*mut ()); + unsafe impl Send for Ptr {} + unsafe impl Sync for Ptr {} + + let mut f = Some(f); + let run = Ptr(&mut f as *mut _ as *mut ()); + let mut result = None; + let result_ptr = Ptr(&mut result as *mut _ as *mut ()); + + let thread = cfg.spawn(move || { + let run = unsafe { (*(run.0 as *mut Option)).take().unwrap() }; + let result = unsafe { &mut *(result_ptr.0 as *mut Option) }; + *result = Some(run()); + }); + + match thread.unwrap().join() { + Ok(()) => result.unwrap(), + Err(p) => panic::resume_unwind(p), + } +} + +#[cfg(not(parallel_compiler))] +pub fn spawn_thread_pool R + Send, R: Send>( + _threads: Option, + stderr: &Option>>>, + f: F, +) -> R { + let mut cfg = thread::Builder::new().name("rustc".to_string()); + + if let Some(size) = get_stack_size() { + cfg = cfg.stack_size(size); + } + + scoped_thread(cfg, || { + syntax::with_globals( || { + ty::tls::GCX_PTR.set(&Lock::new(0), || { + if let Some(stderr) = stderr { + io::set_panic(Some(box Sink(stderr.clone()))); + } + ty::tls::with_thread_locals(|| f()) + }) + }) + }) +} + +#[cfg(parallel_compiler)] +pub fn spawn_thread_pool R + Send, R: Send>( + threads: Option, + stderr: &Option>>>, + f: F, +) -> R { + use rayon::{ThreadPool, ThreadPoolBuilder}; + use syntax; + use syntax_pos; + + let gcx_ptr = &Lock::new(0); + + let mut config = ThreadPoolBuilder::new() + .acquire_thread_handler(jobserver::acquire_thread) + .release_thread_handler(jobserver::release_thread) + .num_threads(Session::threads_from_count(threads)) + .deadlock_handler(|| unsafe { ty::query::handle_deadlock() }); + + if let Some(size) = get_stack_size() { + config = config.stack_size(size); + } + + let with_pool = move |pool: &ThreadPool| pool.install(move || f()); + + syntax::with_globals(|| { + syntax::GLOBALS.with(|syntax_globals| { + syntax_pos::GLOBALS.with(|syntax_pos_globals| { + // The main handler runs for each Rayon worker thread and sets up + // the thread local rustc uses. syntax_globals and syntax_pos_globals are + // captured and set on the new threads. ty::tls::with_thread_locals sets up + // thread local callbacks from libsyntax + let main_handler = move |worker: &mut dyn FnMut()| { + syntax::GLOBALS.set(syntax_globals, || { + syntax_pos::GLOBALS.set(syntax_pos_globals, || { + if let Some(stderr) = stderr { + io::set_panic(Some(box Sink(stderr.clone()))); + } + ty::tls::with_thread_locals(|| { + ty::tls::GCX_PTR.set(gcx_ptr, || worker()) + }) + }) + }) + }; + + ThreadPool::scoped_pool(config, main_handler, with_pool).unwrap() + }) + }) + }) +} + +fn load_backend_from_dylib(path: &Path) -> fn() -> Box { + let lib = DynamicLibrary::open(Some(path)).unwrap_or_else(|err| { + let err = format!("couldn't load codegen backend {:?}: {:?}", path, err); + early_error(ErrorOutputType::default(), &err); + }); + unsafe { + match lib.symbol("__rustc_codegen_backend") { + Ok(f) => { + mem::forget(lib); + mem::transmute::<*mut u8, _>(f) + } + Err(e) => { + let err = format!("couldn't load codegen backend as it \ + doesn't export the `__rustc_codegen_backend` \ + symbol: {:?}", e); + early_error(ErrorOutputType::default(), &err); + } + } + } +} + +pub fn get_codegen_backend(sess: &Session) -> Box { + static INIT: Once = Once::new(); + + static mut LOAD: fn() -> Box = || unreachable!(); + + INIT.call_once(|| { + let codegen_name = sess.opts.debugging_opts.codegen_backend.as_ref() + .unwrap_or(&sess.target.target.options.codegen_backend); + let backend = match &codegen_name[..] { + filename if filename.contains(".") => { + load_backend_from_dylib(filename.as_ref()) + } + codegen_name => get_codegen_sysroot(codegen_name), + }; + + unsafe { + LOAD = backend; + } + }); + let backend = unsafe { LOAD() }; + backend.init(sess); + backend +} + +pub fn get_codegen_sysroot(backend_name: &str) -> fn() -> Box { + // For now we only allow this function to be called once as it'll dlopen a + // few things, which seems to work best if we only do that once. In + // general this assertion never trips due to the once guard in `get_codegen_backend`, + // but there's a few manual calls to this function in this file we protect + // against. + static LOADED: AtomicBool = AtomicBool::new(false); + assert!(!LOADED.fetch_or(true, Ordering::SeqCst), + "cannot load the default codegen backend twice"); + + let target = session::config::host_triple(); + let mut sysroot_candidates = vec![filesearch::get_or_default_sysroot()]; + let path = current_dll_path() + .and_then(|s| s.canonicalize().ok()); + if let Some(dll) = path { + // use `parent` twice to chop off the file name and then also the + // directory containing the dll which should be either `lib` or `bin`. + if let Some(path) = dll.parent().and_then(|p| p.parent()) { + // The original `path` pointed at the `rustc_driver` crate's dll. + // Now that dll should only be in one of two locations. The first is + // in the compiler's libdir, for example `$sysroot/lib/*.dll`. The + // other is the target's libdir, for example + // `$sysroot/lib/rustlib/$target/lib/*.dll`. + // + // We don't know which, so let's assume that if our `path` above + // ends in `$target` we *could* be in the target libdir, and always + // assume that we may be in the main libdir. + sysroot_candidates.push(path.to_owned()); + + if path.ends_with(target) { + sysroot_candidates.extend(path.parent() // chop off `$target` + .and_then(|p| p.parent()) // chop off `rustlib` + .and_then(|p| p.parent()) // chop off `lib` + .map(|s| s.to_owned())); + } + } + } + + let sysroot = sysroot_candidates.iter() + .map(|sysroot| { + let libdir = filesearch::relative_target_lib_path(&sysroot, &target); + sysroot.join(libdir).with_file_name( + option_env!("CFG_CODEGEN_BACKENDS_DIR").unwrap_or("codegen-backends")) + }) + .filter(|f| { + info!("codegen backend candidate: {}", f.display()); + f.exists() + }) + .next(); + let sysroot = sysroot.unwrap_or_else(|| { + let candidates = sysroot_candidates.iter() + .map(|p| p.display().to_string()) + .collect::>() + .join("\n* "); + let err = format!("failed to find a `codegen-backends` folder \ + in the sysroot candidates:\n* {}", candidates); + early_error(ErrorOutputType::default(), &err); + }); + info!("probing {} for a codegen backend", sysroot.display()); + + let d = sysroot.read_dir().unwrap_or_else(|e| { + let err = format!("failed to load default codegen backend, couldn't \ + read `{}`: {}", sysroot.display(), e); + early_error(ErrorOutputType::default(), &err); + }); + + let mut file: Option = None; + + let expected_name = format!("rustc_codegen_llvm-{}", backend_name); + for entry in d.filter_map(|e| e.ok()) { + let path = entry.path(); + let filename = match path.file_name().and_then(|s| s.to_str()) { + Some(s) => s, + None => continue, + }; + if !(filename.starts_with(DLL_PREFIX) && filename.ends_with(DLL_SUFFIX)) { + continue + } + let name = &filename[DLL_PREFIX.len() .. filename.len() - DLL_SUFFIX.len()]; + if name != expected_name { + continue + } + if let Some(ref prev) = file { + let err = format!("duplicate codegen backends found\n\ + first: {}\n\ + second: {}\n\ + ", prev.display(), path.display()); + early_error(ErrorOutputType::default(), &err); + } + file = Some(path.clone()); + } + + match file { + Some(ref s) => return load_backend_from_dylib(s), + None => { + let err = format!("failed to load default codegen backend for `{}`, \ + no appropriate codegen dylib found in `{}`", + backend_name, sysroot.display()); + early_error(ErrorOutputType::default(), &err); + } + } + + #[cfg(unix)] + fn current_dll_path() -> Option { + use std::ffi::{OsStr, CStr}; + use std::os::unix::prelude::*; + + unsafe { + let addr = current_dll_path as usize as *mut _; + let mut info = mem::zeroed(); + if libc::dladdr(addr, &mut info) == 0 { + info!("dladdr failed"); + return None + } + if info.dli_fname.is_null() { + info!("dladdr returned null pointer"); + return None + } + let bytes = CStr::from_ptr(info.dli_fname).to_bytes(); + let os = OsStr::from_bytes(bytes); + Some(PathBuf::from(os)) + } + } + + #[cfg(windows)] + fn current_dll_path() -> Option { + use std::ffi::OsString; + use std::os::windows::prelude::*; + + extern "system" { + fn GetModuleHandleExW(dwFlags: u32, + lpModuleName: usize, + phModule: *mut usize) -> i32; + fn GetModuleFileNameW(hModule: usize, + lpFilename: *mut u16, + nSize: u32) -> u32; + } + + const GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS: u32 = 0x00000004; + + unsafe { + let mut module = 0; + let r = GetModuleHandleExW(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, + current_dll_path as usize, + &mut module); + if r == 0 { + info!("GetModuleHandleExW failed: {}", io::Error::last_os_error()); + return None + } + let mut space = Vec::with_capacity(1024); + let r = GetModuleFileNameW(module, + space.as_mut_ptr(), + space.capacity() as u32); + if r == 0 { + info!("GetModuleFileNameW failed: {}", io::Error::last_os_error()); + return None + } + let r = r as usize; + if r >= space.capacity() { + info!("our buffer was too small? {}", + io::Error::last_os_error()); + return None + } + space.set_len(r); + let os = OsString::from_wide(&space); + Some(PathBuf::from(os)) + } + } +} + +pub(crate) fn compute_crate_disambiguator(session: &Session) -> CrateDisambiguator { + use std::hash::Hasher; + + // The crate_disambiguator is a 128 bit hash. The disambiguator is fed + // into various other hashes quite a bit (symbol hashes, incr. comp. hashes, + // debuginfo type IDs, etc), so we don't want it to be too wide. 128 bits + // should still be safe enough to avoid collisions in practice. + let mut hasher = StableHasher::::new(); + + let mut metadata = session.opts.cg.metadata.clone(); + // We don't want the crate_disambiguator to dependent on the order + // -C metadata arguments, so sort them: + metadata.sort(); + // Every distinct -C metadata value is only incorporated once: + metadata.dedup(); + + hasher.write(b"metadata"); + for s in &metadata { + // Also incorporate the length of a metadata string, so that we generate + // different values for `-Cmetadata=ab -Cmetadata=c` and + // `-Cmetadata=a -Cmetadata=bc` + hasher.write_usize(s.len()); + hasher.write(s.as_bytes()); + } + + // Also incorporate crate type, so that we don't get symbol conflicts when + // linking against a library of the same name, if this is an executable. + let is_exe = session + .crate_types + .borrow() + .contains(&config::CrateType::Executable); + hasher.write(if is_exe { b"exe" } else { b"lib" }); + + CrateDisambiguator::from(hasher.finish()) +} + +pub fn collect_crate_types(session: &Session, attrs: &[ast::Attribute]) -> Vec { + // Unconditionally collect crate types from attributes to make them used + let attr_types: Vec = attrs + .iter() + .filter_map(|a| { + if a.check_name("crate_type") { + match a.value_str() { + Some(ref n) if *n == "rlib" => Some(config::CrateType::Rlib), + Some(ref n) if *n == "dylib" => Some(config::CrateType::Dylib), + Some(ref n) if *n == "cdylib" => Some(config::CrateType::Cdylib), + Some(ref n) if *n == "lib" => Some(config::default_lib_output()), + Some(ref n) if *n == "staticlib" => Some(config::CrateType::Staticlib), + Some(ref n) if *n == "proc-macro" => Some(config::CrateType::ProcMacro), + Some(ref n) if *n == "bin" => Some(config::CrateType::Executable), + Some(ref n) => { + let crate_types = vec![ + Symbol::intern("rlib"), + Symbol::intern("dylib"), + Symbol::intern("cdylib"), + Symbol::intern("lib"), + Symbol::intern("staticlib"), + Symbol::intern("proc-macro"), + Symbol::intern("bin") + ]; + + if let ast::MetaItemKind::NameValue(spanned) = a.meta().unwrap().node { + let span = spanned.span; + let lev_candidate = find_best_match_for_name( + crate_types.iter(), + &n.as_str(), + None + ); + if let Some(candidate) = lev_candidate { + session.buffer_lint_with_diagnostic( + lint::builtin::UNKNOWN_CRATE_TYPES, + ast::CRATE_NODE_ID, + span, + "invalid `crate_type` value", + lint::builtin::BuiltinLintDiagnostics:: + UnknownCrateTypes( + span, + "did you mean".to_string(), + format!("\"{}\"", candidate) + ) + ); + } else { + session.buffer_lint( + lint::builtin::UNKNOWN_CRATE_TYPES, + ast::CRATE_NODE_ID, + span, + "invalid `crate_type` value" + ); + } + } + None + } + None => None + } + } else { + None + } + }) + .collect(); + + // If we're generating a test executable, then ignore all other output + // styles at all other locations + if session.opts.test { + return vec![config::CrateType::Executable]; + } + + // Only check command line flags if present. If no types are specified by + // command line, then reuse the empty `base` Vec to hold the types that + // will be found in crate attributes. + let mut base = session.opts.crate_types.clone(); + if base.is_empty() { + base.extend(attr_types); + if base.is_empty() { + base.push(::rustc_codegen_utils::link::default_output_for_target( + session, + )); + } else { + base.sort(); + base.dedup(); + } + } + + base.retain(|crate_type| { + let res = !::rustc_codegen_utils::link::invalid_output_for_target(session, *crate_type); + + if !res { + session.warn(&format!( + "dropping unsupported crate type `{}` for target `{}`", + *crate_type, session.opts.target_triple + )); + } + + res + }); + + base +} + +pub fn build_output_filenames( + input: &Input, + odir: &Option, + ofile: &Option, + attrs: &[ast::Attribute], + sess: &Session, +) -> OutputFilenames { + match *ofile { + None => { + // "-" as input file will cause the parser to read from stdin so we + // have to make up a name + // We want to toss everything after the final '.' + let dirpath = (*odir).as_ref().cloned().unwrap_or_default(); + + // If a crate name is present, we use it as the link name + let stem = sess.opts + .crate_name + .clone() + .or_else(|| attr::find_crate_name(attrs).map(|n| n.to_string())) + .unwrap_or_else(|| input.filestem().to_owned()); + + OutputFilenames { + out_directory: dirpath, + out_filestem: stem, + single_output_file: None, + extra: sess.opts.cg.extra_filename.clone(), + outputs: sess.opts.output_types.clone(), + } + } + + Some(ref out_file) => { + let unnamed_output_types = sess.opts + .output_types + .values() + .filter(|a| a.is_none()) + .count(); + let ofile = if unnamed_output_types > 1 { + sess.warn( + "due to multiple output types requested, the explicitly specified \ + output file name will be adapted for each output type", + ); + None + } else { + Some(out_file.clone()) + }; + if *odir != None { + sess.warn("ignoring --out-dir flag due to -o flag"); + } + if !sess.opts.cg.extra_filename.is_empty() { + sess.warn("ignoring -C extra-filename flag due to -o flag"); + } + + OutputFilenames { + out_directory: out_file.parent().unwrap_or_else(|| Path::new("")).to_path_buf(), + out_filestem: out_file + .file_stem() + .unwrap_or_default() + .to_str() + .unwrap() + .to_string(), + single_output_file: ofile, + extra: sess.opts.cg.extra_filename.clone(), + outputs: sess.opts.output_types.clone(), + } + } + } +} + +// Note: Also used by librustdoc, see PR #43348. Consider moving this struct elsewhere. +// +// FIXME: Currently the `everybody_loops` transformation is not applied to: +// * `const fn`, due to issue #43636 that `loop` is not supported for const evaluation. We are +// waiting for miri to fix that. +// * `impl Trait`, due to issue #43869 that functions returning impl Trait cannot be diverging. +// Solving this may require `!` to implement every trait, which relies on the an even more +// ambitious form of the closed RFC #1637. See also [#34511]. +// +// [#34511]: https://github.com/rust-lang/rust/issues/34511#issuecomment-322340401 +pub struct ReplaceBodyWithLoop<'a> { + within_static_or_const: bool, + nested_blocks: Option>, + sess: &'a Session, +} + +impl<'a> ReplaceBodyWithLoop<'a> { + pub fn new(sess: &'a Session) -> ReplaceBodyWithLoop<'a> { + ReplaceBodyWithLoop { + within_static_or_const: false, + nested_blocks: None, + sess + } + } + + fn run R>(&mut self, is_const: bool, action: F) -> R { + let old_const = mem::replace(&mut self.within_static_or_const, is_const); + let old_blocks = self.nested_blocks.take(); + let ret = action(self); + self.within_static_or_const = old_const; + self.nested_blocks = old_blocks; + ret + } + + fn should_ignore_fn(ret_ty: &ast::FnDecl) -> bool { + if let ast::FunctionRetTy::Ty(ref ty) = ret_ty.output { + fn involves_impl_trait(ty: &ast::Ty) -> bool { + match ty.node { + ast::TyKind::ImplTrait(..) => true, + ast::TyKind::Slice(ref subty) | + ast::TyKind::Array(ref subty, _) | + ast::TyKind::Ptr(ast::MutTy { ty: ref subty, .. }) | + ast::TyKind::Rptr(_, ast::MutTy { ty: ref subty, .. }) | + ast::TyKind::Paren(ref subty) => involves_impl_trait(subty), + ast::TyKind::Tup(ref tys) => any_involves_impl_trait(tys.iter()), + ast::TyKind::Path(_, ref path) => path.segments.iter().any(|seg| { + match seg.args.as_ref().map(|generic_arg| &**generic_arg) { + None => false, + Some(&ast::GenericArgs::AngleBracketed(ref data)) => { + let types = data.args.iter().filter_map(|arg| match arg { + ast::GenericArg::Type(ty) => Some(ty), + _ => None, + }); + any_involves_impl_trait(types.into_iter()) || + any_involves_impl_trait(data.bindings.iter().map(|b| &b.ty)) + }, + Some(&ast::GenericArgs::Parenthesized(ref data)) => { + any_involves_impl_trait(data.inputs.iter()) || + any_involves_impl_trait(data.output.iter()) + } + } + }), + _ => false, + } + } + + fn any_involves_impl_trait<'a, I: Iterator>>(mut it: I) -> bool { + it.any(|subty| involves_impl_trait(subty)) + } + + involves_impl_trait(ty) + } else { + false + } + } +} + +impl<'a> MutVisitor for ReplaceBodyWithLoop<'a> { + fn visit_item_kind(&mut self, i: &mut ast::ItemKind) { + let is_const = match i { + ast::ItemKind::Static(..) | ast::ItemKind::Const(..) => true, + ast::ItemKind::Fn(ref decl, ref header, _, _) => + header.constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), + _ => false, + }; + self.run(is_const, |s| noop_visit_item_kind(i, s)) + } + + fn flat_map_trait_item(&mut self, i: ast::TraitItem) -> SmallVec<[ast::TraitItem; 1]> { + let is_const = match i.node { + ast::TraitItemKind::Const(..) => true, + ast::TraitItemKind::Method(ast::MethodSig { ref decl, ref header, .. }, _) => + header.constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), + _ => false, + }; + self.run(is_const, |s| noop_flat_map_trait_item(i, s)) + } + + fn flat_map_impl_item(&mut self, i: ast::ImplItem) -> SmallVec<[ast::ImplItem; 1]> { + let is_const = match i.node { + ast::ImplItemKind::Const(..) => true, + ast::ImplItemKind::Method(ast::MethodSig { ref decl, ref header, .. }, _) => + header.constness.node == ast::Constness::Const || Self::should_ignore_fn(decl), + _ => false, + }; + self.run(is_const, |s| noop_flat_map_impl_item(i, s)) + } + + fn visit_anon_const(&mut self, c: &mut ast::AnonConst) { + self.run(true, |s| noop_visit_anon_const(c, s)) + } + + fn visit_block(&mut self, b: &mut P) { + fn stmt_to_block(rules: ast::BlockCheckMode, + s: Option, + sess: &Session) -> ast::Block { + ast::Block { + stmts: s.into_iter().collect(), + rules, + id: sess.next_node_id(), + span: syntax_pos::DUMMY_SP, + } + } + + fn block_to_stmt(b: ast::Block, sess: &Session) -> ast::Stmt { + let expr = P(ast::Expr { + id: sess.next_node_id(), + node: ast::ExprKind::Block(P(b), None), + span: syntax_pos::DUMMY_SP, + attrs: ThinVec::new(), + }); + + ast::Stmt { + id: sess.next_node_id(), + node: ast::StmtKind::Expr(expr), + span: syntax_pos::DUMMY_SP, + } + } + + let empty_block = stmt_to_block(BlockCheckMode::Default, None, self.sess); + let loop_expr = P(ast::Expr { + node: ast::ExprKind::Loop(P(empty_block), None), + id: self.sess.next_node_id(), + span: syntax_pos::DUMMY_SP, + attrs: ThinVec::new(), + }); + + let loop_stmt = ast::Stmt { + id: self.sess.next_node_id(), + span: syntax_pos::DUMMY_SP, + node: ast::StmtKind::Expr(loop_expr), + }; + + if self.within_static_or_const { + noop_visit_block(b, self) + } else { + visit_clobber(b.deref_mut(), |b| { + let mut stmts = vec![]; + for s in b.stmts { + let old_blocks = self.nested_blocks.replace(vec![]); + + stmts.extend(self.flat_map_stmt(s).into_iter().filter(|s| s.is_item())); + + // we put a Some in there earlier with that replace(), so this is valid + let new_blocks = self.nested_blocks.take().unwrap(); + self.nested_blocks = old_blocks; + stmts.extend(new_blocks.into_iter().map(|b| block_to_stmt(b, &self.sess))); + } + + let mut new_block = ast::Block { + stmts, + ..b + }; + + if let Some(old_blocks) = self.nested_blocks.as_mut() { + //push our fresh block onto the cache and yield an empty block with `loop {}` + if !new_block.stmts.is_empty() { + old_blocks.push(new_block); + } + + stmt_to_block(b.rules, Some(loop_stmt), self.sess) + } else { + //push `loop {}` onto the end of our fresh block and yield that + new_block.stmts.push(loop_stmt); + + new_block + } + }) + } + } + + // in general the pretty printer processes unexpanded code, so + // we override the default `visit_mac` method which panics. + fn visit_mac(&mut self, mac: &mut ast::Mac) { + noop_visit_mac(mac, self) + } +} diff --git a/src/librustc_lint/Cargo.toml b/src/librustc_lint/Cargo.toml index 7fb7a06ea1ad5..fd2b635faefb4 100644 --- a/src/librustc_lint/Cargo.toml +++ b/src/librustc_lint/Cargo.toml @@ -2,12 +2,12 @@ authors = ["The Rust Project Developers"] name = "rustc_lint" version = "0.0.0" +edition = "2018" [lib] name = "rustc_lint" path = "lib.rs" crate-type = ["dylib"] -test = false [dependencies] log = "0.4" diff --git a/src/librustc_lint/builtin.rs b/src/librustc_lint/builtin.rs index 7dd5d3c1cbc4c..7fe047ec2c65a 100644 --- a/src/librustc_lint/builtin.rs +++ b/src/librustc_lint/builtin.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Lints in the Rust compiler. //! //! This contains lints which can feasibly be implemented as their own @@ -23,16 +13,20 @@ //! `LintPass` (also, note that such lints will need to be defined in //! `rustc::lint::builtin`, not here). //! -//! If you define a new `LintPass`, you will also need to add it to the -//! `add_builtin!` or `add_builtin_with_new!` invocation in `lib.rs`. -//! Use the former for unit-like structs and the latter for structs with -//! a `pub fn new()`. +//! If you define a new `EarlyLintPass`, you will also need to add it to the +//! `add_early_builtin!` or `add_early_builtin_with_new!` invocation in +//! `lib.rs`. Use the former for unit-like structs and the latter for structs +//! with a `pub fn new()`. +//! +//! If you define a new `LateLintPass`, you will also need to add it to the +//! `late_lint_methods!` invocation in `lib.rs`. use rustc::hir::def::Def; -use rustc::hir::def_id::DefId; +use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::ty::{self, Ty}; +use rustc::{lint, util}; use hir::Node; -use util::nodemap::NodeSet; +use util::nodemap::HirIdSet; use lint::{LateContext, LintContext, LintArray}; use lint::{LintPass, LateLintPass, EarlyLintPass, EarlyContext}; @@ -42,19 +36,23 @@ use syntax::tokenstream::{TokenTree, TokenStream}; use syntax::ast; use syntax::ptr::P; use syntax::ast::Expr; -use syntax::attr; +use syntax::attr::{self, HasAttrs}; use syntax::source_map::Spanned; use syntax::edition::Edition; -use syntax::feature_gate::{AttributeGate, AttributeType, Stability, deprecated_attributes}; +use syntax::feature_gate::{AttributeGate, AttributeTemplate, AttributeType}; +use syntax::feature_gate::{Stability, deprecated_attributes}; use syntax_pos::{BytePos, Span, SyntaxContext}; use syntax::symbol::keywords; use syntax::errors::{Applicability, DiagnosticBuilder}; use syntax::print::pprust::expr_to_string; +use syntax::visit::FnKind; +use syntax::struct_span_err; use rustc::hir::{self, GenericParamKind, PatKind}; -use rustc::hir::intravisit::FnKind; -use nonstandard_style::{MethodLateContext, method_context}; +use crate::nonstandard_style::{MethodLateContext, method_context}; + +use log::debug; // hardwired lints from librustc pub use lint::builtin::*; @@ -65,17 +63,10 @@ declare_lint! { "suggest using `loop { }` instead of `while true { }`" } -#[derive(Copy, Clone)] -pub struct WhileTrue; - -impl LintPass for WhileTrue { - fn get_lints(&self) -> LintArray { - lint_array!(WHILE_TRUE) - } -} +declare_lint_pass!(WhileTrue => [WHILE_TRUE]); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for WhileTrue { - fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) { + fn check_expr(&mut self, cx: &LateContext<'_, '_>, e: &hir::Expr) { if let hir::ExprKind::While(ref cond, ..) = e.node { if let hir::ExprKind::Lit(ref lit) = cond.node { if let ast::LitKind::Bool(true) = lit.node { @@ -83,7 +74,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for WhileTrue { let msg = "denote infinite loops with `loop { ... }`"; let condition_span = cx.tcx.sess.source_map().def_span(e.span); let mut err = cx.struct_span_lint(WHILE_TRUE, condition_span, msg); - err.span_suggestion_short_with_applicability( + err.span_suggestion_short( condition_span, "use `loop`", "loop".to_owned(), @@ -103,11 +94,10 @@ declare_lint! { "use of owned (Box type) heap memory" } -#[derive(Copy, Clone)] -pub struct BoxPointers; +declare_lint_pass!(BoxPointers => [BOX_POINTERS]); impl BoxPointers { - fn check_heap_type<'a, 'tcx>(&self, cx: &LateContext, span: Span, ty: Ty) { + fn check_heap_type<'a, 'tcx>(&self, cx: &LateContext<'_, '_>, span: Span, ty: Ty<'_>) { for leaf_ty in ty.walk() { if leaf_ty.is_box() { let m = format!("type uses owned (Box type) pointers: {}", ty); @@ -117,21 +107,15 @@ impl BoxPointers { } } -impl LintPass for BoxPointers { - fn get_lints(&self) -> LintArray { - lint_array!(BOX_POINTERS) - } -} - impl<'a, 'tcx> LateLintPass<'a, 'tcx> for BoxPointers { - fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { + fn check_item(&mut self, cx: &LateContext<'_, '_>, it: &hir::Item) { match it.node { hir::ItemKind::Fn(..) | hir::ItemKind::Ty(..) | hir::ItemKind::Enum(..) | hir::ItemKind::Struct(..) | hir::ItemKind::Union(..) => { - let def_id = cx.tcx.hir().local_def_id(it.id); + let def_id = cx.tcx.hir().local_def_id_from_hir_id(it.hir_id); self.check_heap_type(cx, it.span, cx.tcx.type_of(def_id)) } _ => () @@ -142,7 +126,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for BoxPointers { hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => { for struct_field in struct_def.fields() { - let def_id = cx.tcx.hir().local_def_id(struct_field.id); + let def_id = cx.tcx.hir().local_def_id_from_hir_id(struct_field.hir_id); self.check_heap_type(cx, struct_field.span, cx.tcx.type_of(def_id)); } @@ -151,8 +135,8 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for BoxPointers { } } - fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) { - let ty = cx.tables.node_id_to_type(e.hir_id); + fn check_expr(&mut self, cx: &LateContext<'_, '_>, e: &hir::Expr) { + let ty = cx.tables.node_type(e.hir_id); self.check_heap_type(cx, e.span, ty); } } @@ -163,17 +147,10 @@ declare_lint! { "using `Struct { x: x }` instead of `Struct { x }` in a pattern" } -#[derive(Copy, Clone)] -pub struct NonShorthandFieldPatterns; - -impl LintPass for NonShorthandFieldPatterns { - fn get_lints(&self) -> LintArray { - lint_array!(NON_SHORTHAND_FIELD_PATTERNS) - } -} +declare_lint_pass!(NonShorthandFieldPatterns => [NON_SHORTHAND_FIELD_PATTERNS]); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for NonShorthandFieldPatterns { - fn check_pat(&mut self, cx: &LateContext, pat: &hir::Pat) { + fn check_pat(&mut self, cx: &LateContext<'_, '_>, pat: &hir::Pat) { if let PatKind::Struct(ref qpath, ref field_pats, _) = pat.node { let variant = cx.tables.pat_ty(pat).ty_adt_def() .expect("struct pattern type is not an ADT") @@ -190,13 +167,13 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for NonShorthandFieldPatterns { } if let PatKind::Binding(_, _, ident, None) = fieldpat.node.pat.node { if cx.tcx.find_field_index(ident, &variant) == - Some(cx.tcx.field_index(fieldpat.node.id, cx.tables)) { + Some(cx.tcx.field_index(fieldpat.node.hir_id, cx.tables)) { let mut err = cx.struct_span_lint(NON_SHORTHAND_FIELD_PATTERNS, fieldpat.span, &format!("the `{}:` in this pattern is redundant", ident)); let subspan = cx.tcx.sess.source_map().span_through_char(fieldpat.span, ':'); - err.span_suggestion_short_with_applicability( + err.span_suggestion_short( subspan, "remove this", ident.to_string(), @@ -216,17 +193,10 @@ declare_lint! { "usage of `unsafe` code" } -#[derive(Copy, Clone)] -pub struct UnsafeCode; - -impl LintPass for UnsafeCode { - fn get_lints(&self) -> LintArray { - lint_array!(UNSAFE_CODE) - } -} +declare_lint_pass!(UnsafeCode => [UNSAFE_CODE]); impl UnsafeCode { - fn report_unsafe(&self, cx: &LateContext, span: Span, desc: &'static str) { + fn report_unsafe(&self, cx: &EarlyContext<'_>, span: Span, desc: &'static str) { // This comes from a macro that has #[allow_internal_unsafe]. if span.allows_unsafe() { return; @@ -236,23 +206,31 @@ impl UnsafeCode { } } -impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnsafeCode { - fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) { - if let hir::ExprKind::Block(ref blk, _) = e.node { +impl EarlyLintPass for UnsafeCode { + fn check_attribute(&mut self, cx: &EarlyContext<'_>, attr: &ast::Attribute) { + if attr.check_name("allow_internal_unsafe") { + self.report_unsafe(cx, attr.span, "`allow_internal_unsafe` allows defining \ + macros using unsafe without triggering \ + the `unsafe_code` lint at their call site"); + } + } + + fn check_expr(&mut self, cx: &EarlyContext<'_>, e: &ast::Expr) { + if let ast::ExprKind::Block(ref blk, _) = e.node { // Don't warn about generated blocks, that'll just pollute the output. - if blk.rules == hir::UnsafeBlock(hir::UserProvided) { + if blk.rules == ast::BlockCheckMode::Unsafe(ast::UserProvided) { self.report_unsafe(cx, blk.span, "usage of an `unsafe` block"); } } } - fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { + fn check_item(&mut self, cx: &EarlyContext<'_>, it: &ast::Item) { match it.node { - hir::ItemKind::Trait(_, hir::Unsafety::Unsafe, ..) => { + ast::ItemKind::Trait(_, ast::Unsafety::Unsafe, ..) => { self.report_unsafe(cx, it.span, "declaration of an `unsafe` trait") } - hir::ItemKind::Impl(hir::Unsafety::Unsafe, ..) => { + ast::ItemKind::Impl(ast::Unsafety::Unsafe, ..) => { self.report_unsafe(cx, it.span, "implementation of an `unsafe` trait") } @@ -261,19 +239,18 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnsafeCode { } fn check_fn(&mut self, - cx: &LateContext, - fk: FnKind<'tcx>, - _: &hir::FnDecl, - _: &hir::Body, + cx: &EarlyContext<'_>, + fk: FnKind<'_>, + _: &ast::FnDecl, span: Span, _: ast::NodeId) { match fk { - FnKind::ItemFn(_, _, hir::FnHeader { unsafety: hir::Unsafety::Unsafe, .. }, ..) => { + FnKind::ItemFn(_, ast::FnHeader { unsafety: ast::Unsafety::Unsafe, .. }, ..) => { self.report_unsafe(cx, span, "declaration of an `unsafe` function") } FnKind::Method(_, sig, ..) => { - if sig.header.unsafety == hir::Unsafety::Unsafe { + if sig.header.unsafety == ast::Unsafety::Unsafe { self.report_unsafe(cx, span, "implementation of an `unsafe` method") } } @@ -282,9 +259,9 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnsafeCode { } } - fn check_trait_item(&mut self, cx: &LateContext, item: &hir::TraitItem) { - if let hir::TraitItemKind::Method(ref sig, hir::TraitMethod::Required(_)) = item.node { - if sig.header.unsafety == hir::Unsafety::Unsafe { + fn check_trait_item(&mut self, cx: &EarlyContext<'_>, item: &ast::TraitItem) { + if let ast::TraitItemKind::Method(ref sig, None) = item.node { + if sig.header.unsafety == ast::Unsafety::Unsafe { self.report_unsafe(cx, item.span, "declaration of an `unsafe` method") } } @@ -299,12 +276,33 @@ declare_lint! { } pub struct MissingDoc { - /// Stack of whether #[doc(hidden)] is set - /// at each level which has lint attributes. + /// Stack of whether `#[doc(hidden)]` is set at each level which has lint attributes. doc_hidden_stack: Vec, /// Private traits or trait items that leaked through. Don't check their methods. - private_traits: FxHashSet, + private_traits: FxHashSet, +} + +impl_lint_pass!(MissingDoc => [MISSING_DOCS]); + +fn has_doc(attr: &ast::Attribute) -> bool { + if !attr.check_name("doc") { + return false; + } + + if attr.is_value_str() { + return true; + } + + if let Some(list) = attr.meta_item_list() { + for meta in list { + if meta.check_name("include") || meta.check_name("hidden") { + return true; + } + } + } + + false } impl MissingDoc { @@ -320,8 +318,8 @@ impl MissingDoc { } fn check_missing_docs_attrs(&self, - cx: &LateContext, - id: Option, + cx: &LateContext<'_, '_>, + id: Option, attrs: &[ast::Attribute], sp: Span, desc: &'static str) { @@ -345,26 +343,6 @@ impl MissingDoc { } } - fn has_doc(attr: &ast::Attribute) -> bool { - if !attr.check_name("doc") { - return false; - } - - if attr.is_value_str() { - return true; - } - - if let Some(list) = attr.meta_item_list() { - for meta in list { - if meta.check_name("include") { - return true; - } - } - } - - false - } - let has_doc = attrs.iter().any(|a| has_doc(a)); if !has_doc { cx.span_lint(MISSING_DOCS, @@ -374,14 +352,8 @@ impl MissingDoc { } } -impl LintPass for MissingDoc { - fn get_lints(&self) -> LintArray { - lint_array!(MISSING_DOCS) - } -} - impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingDoc { - fn enter_lint_attrs(&mut self, _: &LateContext, attrs: &[ast::Attribute]) { + fn enter_lint_attrs(&mut self, _: &LateContext<'_, '_>, attrs: &[ast::Attribute]) { let doc_hidden = self.doc_hidden() || attrs.iter().any(|attr| { attr.check_name("doc") && @@ -393,15 +365,24 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingDoc { self.doc_hidden_stack.push(doc_hidden); } - fn exit_lint_attrs(&mut self, _: &LateContext, _attrs: &[ast::Attribute]) { + fn exit_lint_attrs(&mut self, _: &LateContext<'_, '_>, _attrs: &[ast::Attribute]) { self.doc_hidden_stack.pop().expect("empty doc_hidden_stack"); } - fn check_crate(&mut self, cx: &LateContext, krate: &hir::Crate) { + fn check_crate(&mut self, cx: &LateContext<'_, '_>, krate: &hir::Crate) { self.check_missing_docs_attrs(cx, None, &krate.attrs, krate.span, "crate"); + + for macro_def in &krate.exported_macros { + let has_doc = macro_def.attrs.iter().any(|a| has_doc(a)); + if !has_doc { + cx.span_lint(MISSING_DOCS, + cx.tcx.sess.source_map().def_span(macro_def.span), + "missing documentation for macro"); + } + } } - fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { + fn check_item(&mut self, cx: &LateContext<'_, '_>, it: &hir::Item) { let desc = match it.node { hir::ItemKind::Fn(..) => "a function", hir::ItemKind::Mod(..) => "a module", @@ -411,9 +392,9 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingDoc { hir::ItemKind::Trait(.., ref trait_item_refs) => { // Issue #11592, traits are always considered exported, even when private. if let hir::VisibilityKind::Inherited = it.vis.node { - self.private_traits.insert(it.id); + self.private_traits.insert(it.hir_id); for trait_item_ref in trait_item_refs { - self.private_traits.insert(trait_item_ref.id.node_id); + self.private_traits.insert(trait_item_ref.id.hir_id); } return; } @@ -424,12 +405,12 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingDoc { // If the trait is private, add the impl items to private_traits so they don't get // reported for missing docs. let real_trait = trait_ref.path.def.def_id(); - if let Some(node_id) = cx.tcx.hir().as_local_node_id(real_trait) { - match cx.tcx.hir().find(node_id) { + if let Some(hir_id) = cx.tcx.hir().as_local_hir_id(real_trait) { + match cx.tcx.hir().find_by_hir_id(hir_id) { Some(Node::Item(item)) => { if let hir::VisibilityKind::Inherited = item.vis.node { for impl_item_ref in impl_item_refs { - self.private_traits.insert(impl_item_ref.id.node_id); + self.private_traits.insert(impl_item_ref.id.hir_id); } } } @@ -443,11 +424,11 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingDoc { _ => return, }; - self.check_missing_docs_attrs(cx, Some(it.id), &it.attrs, it.span, desc); + self.check_missing_docs_attrs(cx, Some(it.hir_id), &it.attrs, it.span, desc); } - fn check_trait_item(&mut self, cx: &LateContext, trait_item: &hir::TraitItem) { - if self.private_traits.contains(&trait_item.id) { + fn check_trait_item(&mut self, cx: &LateContext<'_, '_>, trait_item: &hir::TraitItem) { + if self.private_traits.contains(&trait_item.hir_id) { return; } @@ -458,15 +439,15 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingDoc { }; self.check_missing_docs_attrs(cx, - Some(trait_item.id), + Some(trait_item.hir_id), &trait_item.attrs, trait_item.span, desc); } - fn check_impl_item(&mut self, cx: &LateContext, impl_item: &hir::ImplItem) { + fn check_impl_item(&mut self, cx: &LateContext<'_, '_>, impl_item: &hir::ImplItem) { // If the method is an impl for a trait, don't doc. - if method_context(cx, impl_item.id) == MethodLateContext::TraitImpl { + if method_context(cx, impl_item.hir_id) == MethodLateContext::TraitImpl { return; } @@ -477,25 +458,25 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingDoc { hir::ImplItemKind::Existential(_) => "an associated existential type", }; self.check_missing_docs_attrs(cx, - Some(impl_item.id), + Some(impl_item.hir_id), &impl_item.attrs, impl_item.span, desc); } - fn check_struct_field(&mut self, cx: &LateContext, sf: &hir::StructField) { + fn check_struct_field(&mut self, cx: &LateContext<'_, '_>, sf: &hir::StructField) { if !sf.is_positional() { self.check_missing_docs_attrs(cx, - Some(sf.id), + Some(sf.hir_id), &sf.attrs, sf.span, "a struct field") } } - fn check_variant(&mut self, cx: &LateContext, v: &hir::Variant, _: &hir::Generics) { + fn check_variant(&mut self, cx: &LateContext<'_, '_>, v: &hir::Variant, _: &hir::Generics) { self.check_missing_docs_attrs(cx, - Some(v.node.data.id()), + Some(v.node.id), &v.node.attrs, v.span, "a variant"); @@ -508,18 +489,11 @@ declare_lint! { "detects potentially-forgotten implementations of `Copy`" } -#[derive(Copy, Clone)] -pub struct MissingCopyImplementations; - -impl LintPass for MissingCopyImplementations { - fn get_lints(&self) -> LintArray { - lint_array!(MISSING_COPY_IMPLEMENTATIONS) - } -} +declare_lint_pass!(MissingCopyImplementations => [MISSING_COPY_IMPLEMENTATIONS]); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingCopyImplementations { - fn check_item(&mut self, cx: &LateContext, item: &hir::Item) { - if !cx.access_levels.is_reachable(item.id) { + fn check_item(&mut self, cx: &LateContext<'_, '_>, item: &hir::Item) { + if !cx.access_levels.is_reachable(item.hir_id) { return; } let (def, ty) = match item.node { @@ -527,21 +501,21 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingCopyImplementations { if !ast_generics.params.is_empty() { return; } - let def = cx.tcx.adt_def(cx.tcx.hir().local_def_id(item.id)); + let def = cx.tcx.adt_def(cx.tcx.hir().local_def_id_from_hir_id(item.hir_id)); (def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[]))) } hir::ItemKind::Union(_, ref ast_generics) => { if !ast_generics.params.is_empty() { return; } - let def = cx.tcx.adt_def(cx.tcx.hir().local_def_id(item.id)); + let def = cx.tcx.adt_def(cx.tcx.hir().local_def_id_from_hir_id(item.hir_id)); (def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[]))) } hir::ItemKind::Enum(_, ref ast_generics) => { if !ast_generics.params.is_empty() { return; } - let def = cx.tcx.adt_def(cx.tcx.hir().local_def_id(item.id)); + let def = cx.tcx.adt_def(cx.tcx.hir().local_def_id_from_hir_id(item.hir_id)); (def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[]))) } _ => return, @@ -550,7 +524,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingCopyImplementations { return; } let param_env = ty::ParamEnv::empty(); - if !ty.moves_by_default(cx.tcx, param_env, item.span) { + if ty.is_copy_modulo_regions(cx.tcx, param_env, item.span) { return; } if param_env.can_type_implement_copy(cx.tcx, ty).is_ok() { @@ -569,24 +543,20 @@ declare_lint! { } pub struct MissingDebugImplementations { - impling_types: Option, + impling_types: Option, } +impl_lint_pass!(MissingDebugImplementations => [MISSING_DEBUG_IMPLEMENTATIONS]); + impl MissingDebugImplementations { pub fn new() -> MissingDebugImplementations { MissingDebugImplementations { impling_types: None } } } -impl LintPass for MissingDebugImplementations { - fn get_lints(&self) -> LintArray { - lint_array!(MISSING_DEBUG_IMPLEMENTATIONS) - } -} - impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingDebugImplementations { - fn check_item(&mut self, cx: &LateContext, item: &hir::Item) { - if !cx.access_levels.is_reachable(item.id) { + fn check_item(&mut self, cx: &LateContext<'_, '_>, item: &hir::Item) { + if !cx.access_levels.is_reachable(item.hir_id) { return; } @@ -603,11 +573,11 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingDebugImplementations { }; if self.impling_types.is_none() { - let mut impls = NodeSet::default(); + let mut impls = HirIdSet::default(); cx.tcx.for_each_impl(debug, |d| { if let Some(ty_def) = cx.tcx.type_of(d).ty_adt_def() { - if let Some(node_id) = cx.tcx.hir().as_local_node_id(ty_def.did) { - impls.insert(node_id); + if let Some(hir_id) = cx.tcx.hir().as_local_hir_id(ty_def.did) { + impls.insert(hir_id); } } }); @@ -616,7 +586,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MissingDebugImplementations { debug!("{:?}", self.impling_types); } - if !self.impling_types.as_ref().unwrap().contains(&item.id) { + if !self.impling_types.as_ref().unwrap().contains(&item.hir_id) { cx.span_lint(MISSING_DEBUG_IMPLEMENTATIONS, item.span, "type does not implement `fmt::Debug`; consider adding #[derive(Debug)] \ @@ -631,18 +601,13 @@ declare_lint! { "detects anonymous parameters" } -/// Checks for use of anonymous parameters (RFC 1685) -#[derive(Clone)] -pub struct AnonymousParameters; - -impl LintPass for AnonymousParameters { - fn get_lints(&self) -> LintArray { - lint_array!(ANONYMOUS_PARAMETERS) - } -} +declare_lint_pass!( + /// Checks for use of anonymous parameters (RFC 1685). + AnonymousParameters => [ANONYMOUS_PARAMETERS] +); impl EarlyLintPass for AnonymousParameters { - fn check_trait_item(&mut self, cx: &EarlyContext, it: &ast::TraitItem) { + fn check_trait_item(&mut self, cx: &EarlyContext<'_>, it: &ast::TraitItem) { match it.node { ast::TraitItemKind::Method(ref sig, _) => { for arg in sig.decl.inputs.iter() { @@ -665,7 +630,7 @@ impl EarlyLintPass for AnonymousParameters { arg.pat.span, "anonymous parameters are deprecated and will be \ removed in the next edition." - ).span_suggestion_with_applicability( + ).span_suggestion( arg.pat.span, "Try naming the parameter or explicitly \ ignoring it", @@ -683,88 +648,16 @@ impl EarlyLintPass for AnonymousParameters { } } -/// Checks for incorrect use use of `repr` attributes. -#[derive(Clone)] -pub struct BadRepr; - -impl LintPass for BadRepr { - fn get_lints(&self) -> LintArray { - lint_array!() - } -} - -impl EarlyLintPass for BadRepr { - fn check_attribute(&mut self, cx: &EarlyContext, attr: &ast::Attribute) { - if attr.name() == "repr" { - let list = attr.meta_item_list(); - - let repr_str = |lit: &str| { format!("#[repr({})]", lit) }; - - // Emit warnings with `repr` either has a literal assignment (`#[repr = "C"]`) or - // no hints (``#[repr]`) - let has_hints = list.as_ref().map(|ref list| !list.is_empty()).unwrap_or(false); - if !has_hints { - let mut suggested = false; - let mut warn = if let Some(ref lit) = attr.value_str() { - // avoid warning about empty `repr` on `#[repr = "foo"]` - let mut warn = cx.struct_span_lint( - BAD_REPR, - attr.span, - "`repr` attribute isn't configurable with a literal", - ); - match lit.to_string().as_ref() { - | "C" | "packed" | "rust" | "transparent" - | "u8" | "u16" | "u32" | "u64" | "u128" | "usize" - | "i8" | "i16" | "i32" | "i64" | "i128" | "isize" => { - // if the literal could have been a valid `repr` arg, - // suggest the correct syntax - warn.span_suggestion_with_applicability( - attr.span, - "give `repr` a hint", - repr_str(&lit.as_str()), - Applicability::MachineApplicable - ); - suggested = true; - } - _ => { // the literal wasn't a valid `repr` arg - warn.span_label(attr.span, "needs a hint"); - } - }; - warn - } else { - let mut warn = cx.struct_span_lint( - BAD_REPR, - attr.span, - "`repr` attribute must have a hint", - ); - warn.span_label(attr.span, "needs a hint"); - warn - }; - if !suggested { - warn.help(&format!( - "valid hints include `{}`, `{}`, `{}` and `{}`", - repr_str("C"), - repr_str("packed"), - repr_str("rust"), - repr_str("transparent"), - )); - warn.note("for more information, visit \ - "); - } - warn.emit(); - } - } - } -} - -/// Checks for use of attributes which have been deprecated. +/// Check for use of attributes which have been deprecated. #[derive(Clone)] pub struct DeprecatedAttr { // This is not free to compute, so we want to keep it around, rather than // compute it for every attribute. - depr_attrs: Vec<&'static (&'static str, AttributeType, AttributeGate)>, + depr_attrs: Vec<&'static (&'static str, AttributeType, AttributeTemplate, AttributeGate)>, } +impl_lint_pass!(DeprecatedAttr => []); + impl DeprecatedAttr { pub fn new() -> DeprecatedAttr { DeprecatedAttr { @@ -773,16 +666,11 @@ impl DeprecatedAttr { } } -impl LintPass for DeprecatedAttr { - fn get_lints(&self) -> LintArray { - lint_array!() - } -} - impl EarlyLintPass for DeprecatedAttr { - fn check_attribute(&mut self, cx: &EarlyContext, attr: &ast::Attribute) { - for &&(n, _, ref g) in &self.depr_attrs { - if attr.name() == n { + fn check_attribute(&mut self, cx: &EarlyContext<'_>, attr: &ast::Attribute) { + let name = attr.name_or_empty(); + for &&(n, _, _, ref g) in &self.depr_attrs { + if name == n { if let &AttributeGate::Gated(Stability::Deprecated(link, suggestion), ref name, ref reason, @@ -790,7 +678,7 @@ impl EarlyLintPass for DeprecatedAttr { let msg = format!("use of deprecated attribute `{}`: {}. See {}", name, reason, link); let mut err = cx.struct_span_lint(DEPRECATED, attr.span, &msg); - err.span_suggestion_short_with_applicability( + err.span_suggestion_short( attr.span, suggestion.unwrap_or("remove this attribute"), String::new(), @@ -810,37 +698,84 @@ declare_lint! { "detects doc comments that aren't used by rustdoc" } -#[derive(Copy, Clone)] -pub struct UnusedDocComment; - -impl LintPass for UnusedDocComment { - fn get_lints(&self) -> LintArray { - lint_array![UNUSED_DOC_COMMENTS] - } -} +declare_lint_pass!(UnusedDocComment => [UNUSED_DOC_COMMENTS]); impl UnusedDocComment { - fn warn_if_doc<'a, 'tcx, - I: Iterator, - C: LintContext<'tcx>>(&self, mut attrs: I, cx: &C) { - if let Some(attr) = attrs.find(|a| a.is_value_str() && a.check_name("doc")) { - cx.struct_span_lint(UNUSED_DOC_COMMENTS, attr.span, "doc comment not used by rustdoc") - .emit(); + fn warn_if_doc( + &self, + cx: &EarlyContext<'_>, + node_span: Span, + node_kind: &str, + is_macro_expansion: bool, + attrs: &[ast::Attribute] + ) { + let mut attrs = attrs.into_iter().peekable(); + + // Accumulate a single span for sugared doc comments. + let mut sugared_span: Option = None; + + while let Some(attr) = attrs.next() { + if attr.is_sugared_doc { + sugared_span = Some( + sugared_span.map_or_else( + || attr.span, + |span| span.with_hi(attr.span.hi()), + ), + ); + } + + if attrs.peek().map(|next_attr| next_attr.is_sugared_doc).unwrap_or_default() { + continue; + } + + let span = sugared_span.take().unwrap_or_else(|| attr.span); + + if attr.check_name("doc") { + let mut err = cx.struct_span_lint(UNUSED_DOC_COMMENTS, span, "unused doc comment"); + + err.span_label( + node_span, + format!("rustdoc does not generate documentation for {}", node_kind) + ); + + if is_macro_expansion { + err.help("to document an item produced by a macro, \ + the macro must produce the documentation as part of its expansion"); + } + + err.emit(); + } } } } impl EarlyLintPass for UnusedDocComment { - fn check_local(&mut self, cx: &EarlyContext, decl: &ast::Local) { - self.warn_if_doc(decl.attrs.iter(), cx); + fn check_item(&mut self, cx: &EarlyContext<'_>, item: &ast::Item) { + if let ast::ItemKind::Mac(..) = item.node { + self.warn_if_doc(cx, item.span, "macro expansions", true, &item.attrs); + } } - fn check_arm(&mut self, cx: &EarlyContext, arm: &ast::Arm) { - self.warn_if_doc(arm.attrs.iter(), cx); + fn check_stmt(&mut self, cx: &EarlyContext<'_>, stmt: &ast::Stmt) { + let (kind, is_macro_expansion) = match stmt.node { + ast::StmtKind::Local(..) => ("statements", false), + ast::StmtKind::Item(..) => ("inner items", false), + ast::StmtKind::Mac(..) => ("macro expansions", true), + // expressions will be reported by `check_expr`. + ast::StmtKind::Semi(..) | + ast::StmtKind::Expr(..) => return, + }; + + self.warn_if_doc(cx, stmt.span, kind, is_macro_expansion, stmt.node.attrs()); } - fn check_expr(&mut self, cx: &EarlyContext, expr: &ast::Expr) { - self.warn_if_doc(expr.attrs.iter(), cx); + fn check_arm(&mut self, cx: &EarlyContext<'_>, arm: &ast::Arm) { + let arm_span = arm.pats[0].span.with_hi(arm.body.span.hi()); + self.warn_if_doc(cx, arm_span, "match arms", false, &arm.attrs); + } + + fn check_expr(&mut self, cx: &EarlyContext<'_>, expr: &ast::Expr) { + self.warn_if_doc(cx, expr.span, "expressions", false, &expr.attrs); } } @@ -850,18 +785,11 @@ declare_lint! { "compiler plugin used as ordinary library in non-plugin crate" } -#[derive(Copy, Clone)] -pub struct PluginAsLibrary; - -impl LintPass for PluginAsLibrary { - fn get_lints(&self) -> LintArray { - lint_array![PLUGIN_AS_LIBRARY] - } -} +declare_lint_pass!(PluginAsLibrary => [PLUGIN_AS_LIBRARY]); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for PluginAsLibrary { - fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { - if cx.sess().plugin_registrar_fn.get().is_some() { + fn check_item(&mut self, cx: &LateContext<'_, '_>, it: &hir::Item) { + if cx.tcx.plugin_registrar_fn(LOCAL_CRATE).is_some() { // We're compiling a plugin; it's fine to link other plugins. return; } @@ -871,7 +799,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for PluginAsLibrary { _ => return, }; - let def_id = cx.tcx.hir().local_def_id(it.id); + let def_id = cx.tcx.hir().local_def_id_from_hir_id(it.hir_id); let prfn = match cx.tcx.extern_mod_stmt_cnum(def_id) { Some(cnum) => cx.tcx.plugin_registrar_fn(cnum), None => { @@ -902,30 +830,24 @@ declare_lint! { "generic items must be mangled" } -#[derive(Copy, Clone)] -pub struct InvalidNoMangleItems; - -impl LintPass for InvalidNoMangleItems { - fn get_lints(&self) -> LintArray { - lint_array!(NO_MANGLE_CONST_ITEMS, - NO_MANGLE_GENERIC_ITEMS) - } -} +declare_lint_pass!(InvalidNoMangleItems => [NO_MANGLE_CONST_ITEMS, NO_MANGLE_GENERIC_ITEMS]); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for InvalidNoMangleItems { - fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { + fn check_item(&mut self, cx: &LateContext<'_, '_>, it: &hir::Item) { match it.node { hir::ItemKind::Fn(.., ref generics, _) => { if let Some(no_mangle_attr) = attr::find_by_name(&it.attrs, "no_mangle") { for param in &generics.params { match param.kind { GenericParamKind::Lifetime { .. } => {} - GenericParamKind::Type { .. } => { - let mut err = cx.struct_span_lint(NO_MANGLE_GENERIC_ITEMS, - it.span, - "functions generic over \ - types must be mangled"); - err.span_suggestion_short_with_applicability( + GenericParamKind::Type { .. } | + GenericParamKind::Const { .. } => { + let mut err = cx.struct_span_lint( + NO_MANGLE_GENERIC_ITEMS, + it.span, + "functions generic over types or consts must be mangled", + ); + err.span_suggestion_short( no_mangle_attr.span, "remove this attribute", String::new(), @@ -953,7 +875,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for InvalidNoMangleItems { .unwrap_or(0) as u32; // `const` is 5 chars let const_span = it.span.with_hi(BytePos(it.span.lo().0 + start + 5)); - err.span_suggestion_with_applicability( + err.span_suggestion( const_span, "try a static value", "pub static".to_owned(), @@ -967,28 +889,21 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for InvalidNoMangleItems { } } -#[derive(Clone, Copy)] -pub struct MutableTransmutes; - declare_lint! { MUTABLE_TRANSMUTES, Deny, "mutating transmuted &mut T from &T may cause undefined behavior" } -impl LintPass for MutableTransmutes { - fn get_lints(&self) -> LintArray { - lint_array!(MUTABLE_TRANSMUTES) - } -} +declare_lint_pass!(MutableTransmutes => [MUTABLE_TRANSMUTES]); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MutableTransmutes { - fn check_expr(&mut self, cx: &LateContext, expr: &hir::Expr) { + fn check_expr(&mut self, cx: &LateContext<'_, '_>, expr: &hir::Expr) { use rustc_target::spec::abi::Abi::RustIntrinsic; let msg = "mutating transmuted &mut T from &T may cause undefined behavior, \ consider instead using an UnsafeCell"; - match get_transmute_from_to(cx, expr) { + match get_transmute_from_to(cx, expr).map(|(ty1, ty2)| (&ty1.sty, &ty2.sty)) { Some((&ty::Ref(_, _, from_mt), &ty::Ref(_, _, to_mt))) => { if to_mt == hir::Mutability::MutMutable && from_mt == hir::Mutability::MutImmutable { @@ -1001,7 +916,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MutableTransmutes { fn get_transmute_from_to<'a, 'tcx> (cx: &LateContext<'a, 'tcx>, expr: &hir::Expr) - -> Option<(&'tcx ty::TyKind<'tcx>, &'tcx ty::TyKind<'tcx>)> { + -> Option<(Ty<'tcx>, Ty<'tcx>)> { let def = if let hir::ExprKind::Path(ref qpath) = expr.node { cx.tables.qpath_def(qpath, expr.hir_id) } else { @@ -1011,39 +926,34 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for MutableTransmutes { if !def_id_is_transmute(cx, did) { return None; } - let sig = cx.tables.node_id_to_type(expr.hir_id).fn_sig(cx.tcx); + let sig = cx.tables.node_type(expr.hir_id).fn_sig(cx.tcx); let from = sig.inputs().skip_binder()[0]; let to = *sig.output().skip_binder(); - return Some((&from.sty, &to.sty)); + return Some((from, to)); } None } - fn def_id_is_transmute(cx: &LateContext, def_id: DefId) -> bool { + fn def_id_is_transmute(cx: &LateContext<'_, '_>, def_id: DefId) -> bool { cx.tcx.fn_sig(def_id).abi() == RustIntrinsic && cx.tcx.item_name(def_id) == "transmute" } } } -/// Forbids using the `#[feature(...)]` attribute -#[derive(Copy, Clone)] -pub struct UnstableFeatures; - declare_lint! { UNSTABLE_FEATURES, Allow, "enabling unstable features (deprecated. do not use)" } -impl LintPass for UnstableFeatures { - fn get_lints(&self) -> LintArray { - lint_array!(UNSTABLE_FEATURES) - } -} +declare_lint_pass!( + /// Forbids using the `#[feature(...)]` attribute + UnstableFeatures => [UNSTABLE_FEATURES] +); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnstableFeatures { - fn check_attribute(&mut self, ctx: &LateContext, attr: &ast::Attribute) { + fn check_attribute(&mut self, ctx: &LateContext<'_, '_>, attr: &ast::Attribute) { if attr.check_name("feature") { if let Some(items) = attr.meta_item_list() { for item in items { @@ -1054,26 +964,23 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnstableFeatures { } } -/// Lint for unions that contain fields with possibly non-trivial destructors. -pub struct UnionsWithDropFields; - declare_lint! { UNIONS_WITH_DROP_FIELDS, Warn, "use of unions that contain fields with possibly non-trivial drop code" } -impl LintPass for UnionsWithDropFields { - fn get_lints(&self) -> LintArray { - lint_array!(UNIONS_WITH_DROP_FIELDS) - } -} +declare_lint_pass!( + /// Lint for unions that contain fields with possibly non-trivial destructors. + UnionsWithDropFields => [UNIONS_WITH_DROP_FIELDS] +); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnionsWithDropFields { - fn check_item(&mut self, ctx: &LateContext, item: &hir::Item) { + fn check_item(&mut self, ctx: &LateContext<'_, '_>, item: &hir::Item) { if let hir::ItemKind::Union(ref vdata, _) = item.node { for field in vdata.fields() { - let field_ty = ctx.tcx.type_of(ctx.tcx.hir().local_def_id(field.id)); + let field_ty = ctx.tcx.type_of( + ctx.tcx.hir().local_def_id_from_hir_id(field.hir_id)); if field_ty.needs_drop(ctx.tcx, ctx.param_env) { ctx.span_lint(UNIONS_WITH_DROP_FIELDS, field.span, @@ -1086,23 +993,19 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnionsWithDropFields { } } -/// Lint for items marked `pub` that aren't reachable from other crates -pub struct UnreachablePub; - declare_lint! { pub UNREACHABLE_PUB, Allow, "`pub` items not reachable from crate root" } -impl LintPass for UnreachablePub { - fn get_lints(&self) -> LintArray { - lint_array!(UNREACHABLE_PUB) - } -} +declare_lint_pass!( + /// Lint for items marked `pub` that aren't reachable from other crates. + UnreachablePub => [UNREACHABLE_PUB] +); impl UnreachablePub { - fn perform_lint(&self, cx: &LateContext, what: &str, id: ast::NodeId, + fn perform_lint(&self, cx: &LateContext<'_, '_>, what: &str, id: hir::HirId, vis: &hir::Visibility, span: Span, exportable: bool) { let mut applicability = Applicability::MachineApplicable; match vis.node { @@ -1119,10 +1022,12 @@ impl UnreachablePub { "pub(crate)" }.to_owned(); - err.span_suggestion_with_applicability(vis.span, - "consider restricting its visibility", - replacement, - applicability); + err.span_suggestion( + vis.span, + "consider restricting its visibility", + replacement, + applicability, + ); if exportable { err.help("or consider exporting it for use by other crates"); } @@ -1133,43 +1038,37 @@ impl UnreachablePub { } } - impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnreachablePub { - fn check_item(&mut self, cx: &LateContext, item: &hir::Item) { - self.perform_lint(cx, "item", item.id, &item.vis, item.span, true); + fn check_item(&mut self, cx: &LateContext<'_, '_>, item: &hir::Item) { + self.perform_lint(cx, "item", item.hir_id, &item.vis, item.span, true); } - fn check_foreign_item(&mut self, cx: &LateContext, foreign_item: &hir::ForeignItem) { - self.perform_lint(cx, "item", foreign_item.id, &foreign_item.vis, + fn check_foreign_item(&mut self, cx: &LateContext<'_, '_>, foreign_item: &hir::ForeignItem) { + self.perform_lint(cx, "item", foreign_item.hir_id, &foreign_item.vis, foreign_item.span, true); } - fn check_struct_field(&mut self, cx: &LateContext, field: &hir::StructField) { - self.perform_lint(cx, "field", field.id, &field.vis, field.span, false); + fn check_struct_field(&mut self, cx: &LateContext<'_, '_>, field: &hir::StructField) { + self.perform_lint(cx, "field", field.hir_id, &field.vis, field.span, false); } - fn check_impl_item(&mut self, cx: &LateContext, impl_item: &hir::ImplItem) { - self.perform_lint(cx, "item", impl_item.id, &impl_item.vis, impl_item.span, false); + fn check_impl_item(&mut self, cx: &LateContext<'_, '_>, impl_item: &hir::ImplItem) { + self.perform_lint(cx, "item", impl_item.hir_id, &impl_item.vis, impl_item.span, false); } } -/// Lint for trait and lifetime bounds in type aliases being mostly ignored: -/// They are relevant when using associated types, but otherwise neither checked -/// at definition site nor enforced at use site. - -pub struct TypeAliasBounds; - declare_lint! { TYPE_ALIAS_BOUNDS, Warn, "bounds in type aliases are not enforced" } -impl LintPass for TypeAliasBounds { - fn get_lints(&self) -> LintArray { - lint_array!(TYPE_ALIAS_BOUNDS) - } -} +declare_lint_pass!( + /// Lint for trait and lifetime bounds in type aliases being mostly ignored. + /// They are relevant when using associated types, but otherwise neither checked + /// at definition site nor enforced at use site. + TypeAliasBounds => [TYPE_ALIAS_BOUNDS] +); impl TypeAliasBounds { fn is_type_variable_assoc(qpath: &hir::QPath) -> bool { @@ -1190,7 +1089,7 @@ impl TypeAliasBounds { } } - fn suggest_changing_assoc_types(ty: &hir::Ty, err: &mut DiagnosticBuilder) { + fn suggest_changing_assoc_types(ty: &hir::Ty, err: &mut DiagnosticBuilder<'_>) { // Access to associates types should use `::Assoc`, which does not need a // bound. Let's see if this type does that. @@ -1222,7 +1121,7 @@ impl TypeAliasBounds { } impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeAliasBounds { - fn check_item(&mut self, cx: &LateContext, item: &hir::Item) { + fn check_item(&mut self, cx: &LateContext<'_, '_>, item: &hir::Item) { let (ty, type_alias_generics) = match item.node { hir::ItemKind::Ty(ref ty, ref generics) => (&*ty, generics), _ => return, @@ -1263,18 +1162,15 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeAliasBounds { } } -/// Lint constants that are erroneous. -/// Without this lint, we might not get any diagnostic if the constant is -/// unused within this crate, even though downstream crates can't use it -/// without producing an error. -pub struct UnusedBrokenConst; +declare_lint_pass!( + /// Lint constants that are erroneous. + /// Without this lint, we might not get any diagnostic if the constant is + /// unused within this crate, even though downstream crates can't use it + /// without producing an error. + UnusedBrokenConst => [] +); -impl LintPass for UnusedBrokenConst { - fn get_lints(&self) -> LintArray { - lint_array!() - } -} -fn check_const(cx: &LateContext, body_id: hir::BodyId) { +fn check_const(cx: &LateContext<'_, '_>, body_id: hir::BodyId) { let def_id = cx.tcx.hir().body_owner_def_id(body_id); let is_static = cx.tcx.is_static(def_id).is_some(); let param_env = if is_static { @@ -1288,11 +1184,12 @@ fn check_const(cx: &LateContext, body_id: hir::BodyId) { promoted: None }; // trigger the query once for all constants since that will already report the errors + // FIXME: Use ensure here let _ = cx.tcx.const_eval(param_env.and(cid)); } impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedBrokenConst { - fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { + fn check_item(&mut self, cx: &LateContext<'_, '_>, it: &hir::Item) { match it.node { hir::ItemKind::Const(_, body_id) => { check_const(cx, body_id); @@ -1305,21 +1202,17 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedBrokenConst { } } -/// Lint for trait and lifetime bounds that don't depend on type parameters -/// which either do nothing, or stop the item from being used. -pub struct TrivialConstraints; - declare_lint! { TRIVIAL_BOUNDS, Warn, "these bounds don't depend on an type parameters" } -impl LintPass for TrivialConstraints { - fn get_lints(&self) -> LintArray { - lint_array!(TRIVIAL_BOUNDS) - } -} +declare_lint_pass!( + /// Lint for trait and lifetime bounds that don't depend on type parameters + /// which either do nothing, or stop the item from being used. + TrivialConstraints => [TRIVIAL_BOUNDS] +); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TrivialConstraints { fn check_item( @@ -1332,7 +1225,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TrivialConstraints { if cx.tcx.features().trivial_bounds { - let def_id = cx.tcx.hir().local_def_id(item.id); + let def_id = cx.tcx.hir().local_def_id_from_hir_id(item.hir_id); let predicates = cx.tcx.predicates_of(def_id); for &(predicate, span) in &predicates.predicates { let predicate_kind_name = match predicate { @@ -1363,36 +1256,30 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TrivialConstraints { } } - -/// Does nothing as a lint pass, but registers some `Lint`s -/// which are used by other parts of the compiler. -#[derive(Copy, Clone)] -pub struct SoftLints; - -impl LintPass for SoftLints { - fn get_lints(&self) -> LintArray { - lint_array!( - WHILE_TRUE, - BOX_POINTERS, - NON_SHORTHAND_FIELD_PATTERNS, - UNSAFE_CODE, - MISSING_DOCS, - MISSING_COPY_IMPLEMENTATIONS, - MISSING_DEBUG_IMPLEMENTATIONS, - ANONYMOUS_PARAMETERS, - UNUSED_DOC_COMMENTS, - PLUGIN_AS_LIBRARY, - NO_MANGLE_CONST_ITEMS, - NO_MANGLE_GENERIC_ITEMS, - MUTABLE_TRANSMUTES, - UNSTABLE_FEATURES, - UNIONS_WITH_DROP_FIELDS, - UNREACHABLE_PUB, - TYPE_ALIAS_BOUNDS, - TRIVIAL_BOUNDS - ) - } -} +declare_lint_pass!( + /// Does nothing as a lint pass, but registers some `Lint`s + /// which are used by other parts of the compiler. + SoftLints => [ + WHILE_TRUE, + BOX_POINTERS, + NON_SHORTHAND_FIELD_PATTERNS, + UNSAFE_CODE, + MISSING_DOCS, + MISSING_COPY_IMPLEMENTATIONS, + MISSING_DEBUG_IMPLEMENTATIONS, + ANONYMOUS_PARAMETERS, + UNUSED_DOC_COMMENTS, + PLUGIN_AS_LIBRARY, + NO_MANGLE_CONST_ITEMS, + NO_MANGLE_GENERIC_ITEMS, + MUTABLE_TRANSMUTES, + UNSTABLE_FEATURES, + UNIONS_WITH_DROP_FIELDS, + UNREACHABLE_PUB, + TYPE_ALIAS_BOUNDS, + TRIVIAL_BOUNDS + ] +); declare_lint! { pub ELLIPSIS_INCLUSIVE_RANGE_PATTERNS, @@ -1400,17 +1287,10 @@ declare_lint! { "`...` range patterns are deprecated" } - -pub struct EllipsisInclusiveRangePatterns; - -impl LintPass for EllipsisInclusiveRangePatterns { - fn get_lints(&self) -> LintArray { - lint_array!(ELLIPSIS_INCLUSIVE_RANGE_PATTERNS) - } -} +declare_lint_pass!(EllipsisInclusiveRangePatterns => [ELLIPSIS_INCLUSIVE_RANGE_PATTERNS]); impl EarlyLintPass for EllipsisInclusiveRangePatterns { - fn check_pat(&mut self, cx: &EarlyContext, pat: &ast::Pat, visit_subpats: &mut bool) { + fn check_pat(&mut self, cx: &EarlyContext<'_>, pat: &ast::Pat, visit_subpats: &mut bool) { use self::ast::{PatKind, RangeEnd, RangeSyntax::DotDotDot}; /// If `pat` is a `...` pattern, return the start and end of the range, as well as the span @@ -1435,7 +1315,7 @@ impl EarlyLintPass for EllipsisInclusiveRangePatterns { if parenthesise { *visit_subpats = false; let mut err = cx.struct_span_lint(ELLIPSIS_INCLUSIVE_RANGE_PATTERNS, pat.span, msg); - err.span_suggestion_with_applicability( + err.span_suggestion( pat.span, suggestion, format!("&({}..={})", expr_to_string(&start), expr_to_string(&end)), @@ -1444,7 +1324,7 @@ impl EarlyLintPass for EllipsisInclusiveRangePatterns { err.emit(); } else { let mut err = cx.struct_span_lint(ELLIPSIS_INCLUSIVE_RANGE_PATTERNS, join, msg); - err.span_suggestion_short_with_applicability( + err.span_suggestion_short( join, suggestion, "..=".to_owned(), @@ -1464,32 +1344,28 @@ declare_lint! { } pub struct UnnameableTestItems { - boundary: ast::NodeId, // NodeId of the item under which things are not nameable + boundary: hir::HirId, // HirId of the item under which things are not nameable items_nameable: bool, } +impl_lint_pass!(UnnameableTestItems => [UNNAMEABLE_TEST_ITEMS]); + impl UnnameableTestItems { pub fn new() -> Self { Self { - boundary: ast::DUMMY_NODE_ID, + boundary: hir::DUMMY_HIR_ID, items_nameable: true } } } -impl LintPass for UnnameableTestItems { - fn get_lints(&self) -> LintArray { - lint_array!(UNNAMEABLE_TEST_ITEMS) - } -} - impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnnameableTestItems { - fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { + fn check_item(&mut self, cx: &LateContext<'_, '_>, it: &hir::Item) { if self.items_nameable { if let hir::ItemKind::Mod(..) = it.node {} else { self.items_nameable = false; - self.boundary = it.id; + self.boundary = it.hir_id; } return; } @@ -1503,8 +1379,8 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnnameableTestItems { } } - fn check_item_post(&mut self, _cx: &LateContext, it: &hir::Item) { - if !self.items_nameable && self.boundary == it.id { + fn check_item_post(&mut self, _cx: &LateContext<'_, '_>, it: &hir::Item) { + if !self.items_nameable && self.boundary == it.hir_id { self.items_nameable = true; } } @@ -1516,24 +1392,21 @@ declare_lint! { "detects edition keywords being used as an identifier" } -/// Checks for uses of edition keywords used as an identifier -#[derive(Clone)] -pub struct KeywordIdents; +declare_lint_pass!( + /// Check for uses of edition keywords used as an identifier. + KeywordIdents => [KEYWORD_IDENTS] +); -impl LintPass for KeywordIdents { - fn get_lints(&self) -> LintArray { - lint_array!(KEYWORD_IDENTS) - } -} +struct UnderMacro(bool); impl KeywordIdents { - fn check_tokens(&mut self, cx: &EarlyContext, tokens: TokenStream) { + fn check_tokens(&mut self, cx: &EarlyContext<'_>, tokens: TokenStream) { for tt in tokens.into_trees() { match tt { TokenTree::Token(span, tok) => match tok.ident() { // only report non-raw idents Some((ident, false)) => { - self.check_ident(cx, ast::Ident { + self.check_ident_token(cx, UnderMacro(true), ast::Ident { span: span.substitute_dummy(ident.span), ..ident }); @@ -1541,21 +1414,17 @@ impl KeywordIdents { _ => {}, } TokenTree::Delimited(_, _, tts) => { - self.check_tokens(cx, tts.stream()) + self.check_tokens(cx, tts) }, } } } -} -impl EarlyLintPass for KeywordIdents { - fn check_mac_def(&mut self, cx: &EarlyContext, mac_def: &ast::MacroDef, _id: ast::NodeId) { - self.check_tokens(cx, mac_def.stream()); - } - fn check_mac(&mut self, cx: &EarlyContext, mac: &ast::Mac) { - self.check_tokens(cx, mac.node.tts.clone().into()); - } - fn check_ident(&mut self, cx: &EarlyContext, ident: ast::Ident) { + fn check_ident_token(&mut self, + cx: &EarlyContext<'_>, + UnderMacro(under_macro): UnderMacro, + ident: ast::Ident) + { let ident_str = &ident.as_str()[..]; let cur_edition = cx.sess.edition(); let is_raw_ident = |ident: ast::Ident| { @@ -1564,7 +1433,22 @@ impl EarlyLintPass for KeywordIdents { let next_edition = match cur_edition { Edition::Edition2015 => { match ident_str { - "async" | "try" | "dyn" => Edition::Edition2018, + "async" | "try" => Edition::Edition2018, + + // rust-lang/rust#56327: Conservatively do not + // attempt to report occurrences of `dyn` within + // macro definitions or invocations, because `dyn` + // can legitimately occur as a contextual keyword + // in 2015 code denoting its 2018 meaning, and we + // do not want rustfix to inject bugs into working + // code by rewriting such occurrences. + // + // But if we see `dyn` outside of a macro, we know + // its precise role in the parsed AST and thus are + // assured this is truly an attempt to use it as + // an identifier. + "dyn" if !under_macro => Edition::Edition2018, + // Only issue warnings for `await` if the `async_await` // feature isn't being used. Otherwise, users need // to keep using `await` for the macro exposed by std. @@ -1588,7 +1472,7 @@ impl EarlyLintPass for KeywordIdents { E0721, "`await` is a keyword in the {} edition", cur_edition, ); - err.span_suggestion_with_applicability( + err.span_suggestion( ident.span, "you can use a raw identifier to stay compatible", "r#await".to_string(), @@ -1612,7 +1496,7 @@ impl EarlyLintPass for KeywordIdents { ident.as_str(), next_edition), ); - lint.span_suggestion_with_applicability( + lint.span_suggestion( ident.span, "you can use a raw identifier to stay compatible", format!("r#{}", ident.as_str()), @@ -1622,19 +1506,24 @@ impl EarlyLintPass for KeywordIdents { } } - -pub struct ExplicitOutlivesRequirements; - -impl LintPass for ExplicitOutlivesRequirements { - fn get_lints(&self) -> LintArray { - lint_array![EXPLICIT_OUTLIVES_REQUIREMENTS] +impl EarlyLintPass for KeywordIdents { + fn check_mac_def(&mut self, cx: &EarlyContext<'_>, mac_def: &ast::MacroDef, _id: ast::NodeId) { + self.check_tokens(cx, mac_def.stream()); + } + fn check_mac(&mut self, cx: &EarlyContext<'_>, mac: &ast::Mac) { + self.check_tokens(cx, mac.node.tts.clone().into()); + } + fn check_ident(&mut self, cx: &EarlyContext<'_>, ident: ast::Ident) { + self.check_ident_token(cx, UnderMacro(false), ident); } } +declare_lint_pass!(ExplicitOutlivesRequirements => [EXPLICIT_OUTLIVES_REQUIREMENTS]); + impl ExplicitOutlivesRequirements { fn collect_outlives_bound_spans( &self, - cx: &LateContext, + cx: &LateContext<'_, '_>, item_def_id: DefId, param_name: &str, bounds: &hir::GenericBounds, @@ -1746,21 +1635,22 @@ impl ExplicitOutlivesRequirements { impl<'a, 'tcx> LateLintPass<'a, 'tcx> for ExplicitOutlivesRequirements { fn check_item(&mut self, cx: &LateContext<'a, 'tcx>, item: &'tcx hir::Item) { let infer_static = cx.tcx.features().infer_static_outlives_requirements; - let def_id = cx.tcx.hir().local_def_id(item.id); + let def_id = cx.tcx.hir().local_def_id_from_hir_id(item.hir_id); if let hir::ItemKind::Struct(_, ref generics) = item.node { let mut bound_count = 0; let mut lint_spans = Vec::new(); for param in &generics.params { let param_name = match param.kind { - hir::GenericParamKind::Lifetime { .. } => { continue; }, + hir::GenericParamKind::Lifetime { .. } => continue, hir::GenericParamKind::Type { .. } => { match param.name { - hir::ParamName::Fresh(_) => { continue; }, - hir::ParamName::Error => { continue; }, - hir::ParamName::Plain(name) => name.to_string() + hir::ParamName::Fresh(_) => continue, + hir::ParamName::Error => continue, + hir::ParamName::Plain(name) => name.to_string(), } } + hir::GenericParamKind::Const { .. } => continue, }; let bound_spans = self.collect_outlives_bound_spans( cx, def_id, ¶m_name, ¶m.bounds, infer_static @@ -1836,7 +1726,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for ExplicitOutlivesRequirements { lint_spans.clone(), "outlives requirements can be inferred" ); - err.multipart_suggestion_with_applicability( + err.multipart_suggestion( if bound_count == 1 { "remove this bound" } else { diff --git a/src/librustc_lint/diagnostics.rs b/src/librustc_lint/diagnostics.rs index 59f005a5de8da..3165673111cca 100644 --- a/src/librustc_lint/diagnostics.rs +++ b/src/librustc_lint/diagnostics.rs @@ -1,12 +1,4 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +use syntax::{register_diagnostic, register_diagnostics}; register_diagnostics! { E0721, // `await` keyword diff --git a/src/librustc_lint/lib.rs b/src/librustc_lint/lib.rs index 347121833d3f1..07c505c6bde08 100644 --- a/src/librustc_lint/lib.rs +++ b/src/librustc_lint/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! # Lints in the Rust compiler //! //! This currently only contains the definitions and implementations @@ -19,26 +9,21 @@ //! //! This API is completely unstable and subject to change. -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![cfg_attr(test, feature(test))] #![feature(box_patterns)] #![feature(box_syntax)] #![feature(nll)] -#![feature(quote)] #![feature(rustc_diagnostic_macros)] -#[macro_use] -extern crate syntax; +#![recursion_limit="256"] + +#![deny(rust_2018_idioms)] +#![cfg_attr(not(stage0), deny(internal))] + #[macro_use] extern crate rustc; -#[macro_use] -extern crate log; -extern crate rustc_target; -extern crate syntax_pos; -extern crate rustc_data_structures; mod diagnostics; mod nonstandard_style; @@ -47,17 +32,23 @@ mod types; mod unused; use rustc::lint; -use rustc::lint::{LateContext, LateLintPass, LintPass, LintArray}; +use rustc::lint::{EarlyContext, LateContext, LateLintPass, EarlyLintPass, LintPass, LintArray}; use rustc::lint::builtin::{ BARE_TRAIT_OBJECTS, ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE, ELIDED_LIFETIMES_IN_PATHS, EXPLICIT_OUTLIVES_REQUIREMENTS, - parser::QUESTION_MARK_MACRO_SEP + INTRA_DOC_LINK_RESOLUTION_FAILURE, + MISSING_DOC_CODE_EXAMPLES, + PRIVATE_DOC_TESTS, + parser::QUESTION_MARK_MACRO_SEP, + parser::ILL_FORMED_ATTRIBUTE_INPUT, }; use rustc::session; -use rustc::util; use rustc::hir; +use rustc::hir::def_id::DefId; +use rustc::ty::query::Providers; +use rustc::ty::TyCtxt; use syntax::ast; use syntax::edition::Edition; @@ -71,95 +62,181 @@ use nonstandard_style::*; use builtin::*; use types::*; use unused::*; +use rustc::lint::internal::*; /// Useful for other parts of the compiler. pub use builtin::SoftLints; +pub fn provide(providers: &mut Providers<'_>) { + *providers = Providers { + lint_mod, + ..*providers + }; +} + +fn lint_mod<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>, module_def_id: DefId) { + lint::late_lint_mod(tcx, module_def_id, BuiltinCombinedModuleLateLintPass::new()); +} + +macro_rules! pre_expansion_lint_passes { + ($macro:path, $args:tt) => ( + $macro!($args, [ + KeywordIdents: KeywordIdents, + UnusedDocComment: UnusedDocComment, + ]); + ) +} + +macro_rules! early_lint_passes { + ($macro:path, $args:tt) => ( + $macro!($args, [ + UnusedParens: UnusedParens, + UnusedImportBraces: UnusedImportBraces, + UnsafeCode: UnsafeCode, + AnonymousParameters: AnonymousParameters, + EllipsisInclusiveRangePatterns: EllipsisInclusiveRangePatterns, + NonCamelCaseTypes: NonCamelCaseTypes, + DeprecatedAttr: DeprecatedAttr::new(), + ]); + ) +} + +macro_rules! declare_combined_early_pass { + ([$name:ident], $passes:tt) => ( + early_lint_methods!(declare_combined_early_lint_pass, [pub $name, $passes]); + ) +} + +pre_expansion_lint_passes!(declare_combined_early_pass, [BuiltinCombinedPreExpansionLintPass]); +early_lint_passes!(declare_combined_early_pass, [BuiltinCombinedEarlyLintPass]); + +macro_rules! late_lint_passes { + ($macro:path, $args:tt) => ( + $macro!($args, [ + // FIXME: Look into regression when this is used as a module lint + // May Depend on constants elsewhere + UnusedBrokenConst: UnusedBrokenConst, + + // Uses attr::is_used which is untracked, can't be an incremental module pass. + UnusedAttributes: UnusedAttributes, + + // Needs to run after UnusedAttributes as it marks all `feature` attributes as used. + UnstableFeatures: UnstableFeatures, + + // Tracks state across modules + UnnameableTestItems: UnnameableTestItems::new(), + + // Tracks attributes of parents + MissingDoc: MissingDoc::new(), + + // Depends on access levels + // FIXME: Turn the computation of types which implement Debug into a query + // and change this to a module lint pass + MissingDebugImplementations: MissingDebugImplementations::new(), + ]); + ) +} + +macro_rules! late_lint_mod_passes { + ($macro:path, $args:tt) => ( + $macro!($args, [ + HardwiredLints: HardwiredLints, + WhileTrue: WhileTrue, + ImproperCTypes: ImproperCTypes, + VariantSizeDifferences: VariantSizeDifferences, + BoxPointers: BoxPointers, + PathStatements: PathStatements, + + // Depends on referenced function signatures in expressions + UnusedResults: UnusedResults, + + NonUpperCaseGlobals: NonUpperCaseGlobals, + NonShorthandFieldPatterns: NonShorthandFieldPatterns, + UnusedAllocation: UnusedAllocation, + + // Depends on types used in type definitions + MissingCopyImplementations: MissingCopyImplementations, + + PluginAsLibrary: PluginAsLibrary, + + // Depends on referenced function signatures in expressions + MutableTransmutes: MutableTransmutes, + + // Depends on types of fields, checks if they implement Drop + UnionsWithDropFields: UnionsWithDropFields, + + TypeAliasBounds: TypeAliasBounds, + + TrivialConstraints: TrivialConstraints, + TypeLimits: TypeLimits::new(), + + NonSnakeCase: NonSnakeCase, + InvalidNoMangleItems: InvalidNoMangleItems, + + // Depends on access levels + UnreachablePub: UnreachablePub, + + ExplicitOutlivesRequirements: ExplicitOutlivesRequirements, + ]); + ) +} + +macro_rules! declare_combined_late_pass { + ([$v:vis $name:ident], $passes:tt) => ( + late_lint_methods!(declare_combined_late_lint_pass, [$v $name, $passes], ['tcx]); + ) +} + +// FIXME: Make a separate lint type which do not require typeck tables +late_lint_passes!(declare_combined_late_pass, [pub BuiltinCombinedLateLintPass]); + +late_lint_mod_passes!(declare_combined_late_pass, [BuiltinCombinedModuleLateLintPass]); + /// Tell the `LintStore` about all the built-in lints (the ones /// defined in this crate and the ones defined in /// `rustc::lint::builtin`). pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { - macro_rules! add_early_builtin { - ($sess:ident, $($name:ident),*,) => ( - {$( - store.register_early_pass($sess, false, box $name); - )*} + macro_rules! add_lint_group { + ($sess:ident, $name:expr, $($lint:ident),*) => ( + store.register_group($sess, false, $name, None, vec![$(LintId::of($lint)),*]); ) } - macro_rules! add_pre_expansion_builtin { - ($sess:ident, $($name:ident),*,) => ( - {$( - store.register_pre_expansion_pass($sess, box $name); - )*} + macro_rules! register_pass { + ($method:ident, $constructor:expr, [$($args:expr),*]) => ( + store.$method(sess, false, false, $($args,)* box $constructor); ) } - macro_rules! add_early_builtin_with_new { - ($sess:ident, $($name:ident),*,) => ( - {$( - store.register_early_pass($sess, false, box $name::new()); - )*} + macro_rules! register_passes { + ([$method:ident, $args:tt], [$($passes:ident: $constructor:expr,)*]) => ( + $( + register_pass!($method, $constructor, $args); + )* ) } - macro_rules! add_lint_group { - ($sess:ident, $name:expr, $($lint:ident),*) => ( - store.register_group($sess, false, $name, None, vec![$(LintId::of($lint)),*]); - ) + if sess.map(|sess| sess.opts.debugging_opts.no_interleave_lints).unwrap_or(false) { + pre_expansion_lint_passes!(register_passes, [register_pre_expansion_pass, []]); + early_lint_passes!(register_passes, [register_early_pass, []]); + late_lint_passes!(register_passes, [register_late_pass, [false]]); + late_lint_mod_passes!(register_passes, [register_late_pass, [true]]); + } else { + store.register_pre_expansion_pass( + sess, + false, + true, + box BuiltinCombinedPreExpansionLintPass::new() + ); + store.register_early_pass(sess, false, true, box BuiltinCombinedEarlyLintPass::new()); + store.register_late_pass( + sess, false, true, true, box BuiltinCombinedModuleLateLintPass::new() + ); + store.register_late_pass( + sess, false, true, false, box BuiltinCombinedLateLintPass::new() + ); } - add_pre_expansion_builtin!(sess, - KeywordIdents, - ); - - add_early_builtin!(sess, - UnusedParens, - UnusedImportBraces, - AnonymousParameters, - UnusedDocComment, - BadRepr, - EllipsisInclusiveRangePatterns, - ); - - add_early_builtin_with_new!(sess, - DeprecatedAttr, - ); - - late_lint_methods!(declare_combined_late_lint_pass, [BuiltinCombinedLateLintPass, [ - HardwiredLints: HardwiredLints, - WhileTrue: WhileTrue, - ImproperCTypes: ImproperCTypes, - VariantSizeDifferences: VariantSizeDifferences, - BoxPointers: BoxPointers, - UnusedAttributes: UnusedAttributes, - PathStatements: PathStatements, - UnusedResults: UnusedResults, - NonCamelCaseTypes: NonCamelCaseTypes, - NonSnakeCase: NonSnakeCase, - NonUpperCaseGlobals: NonUpperCaseGlobals, - NonShorthandFieldPatterns: NonShorthandFieldPatterns, - UnsafeCode: UnsafeCode, - UnusedAllocation: UnusedAllocation, - MissingCopyImplementations: MissingCopyImplementations, - UnstableFeatures: UnstableFeatures, - InvalidNoMangleItems: InvalidNoMangleItems, - PluginAsLibrary: PluginAsLibrary, - MutableTransmutes: MutableTransmutes, - UnionsWithDropFields: UnionsWithDropFields, - UnreachablePub: UnreachablePub, - UnnameableTestItems: UnnameableTestItems::new(), - TypeAliasBounds: TypeAliasBounds, - UnusedBrokenConst: UnusedBrokenConst, - TrivialConstraints: TrivialConstraints, - TypeLimits: TypeLimits::new(), - MissingDoc: MissingDoc::new(), - MissingDebugImplementations: MissingDebugImplementations::new(), - ExplicitOutlivesRequirements: ExplicitOutlivesRequirements, - ]], ['tcx]); - - store.register_late_pass(sess, false, box BuiltinCombinedLateLintPass::new()); - add_lint_group!(sess, "nonstandard_style", NON_CAMEL_CASE_TYPES, @@ -204,6 +281,12 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { // MACRO_USE_EXTERN_CRATE, ); + add_lint_group!(sess, + "rustdoc", + INTRA_DOC_LINK_RESOLUTION_FAILURE, + MISSING_DOC_CODE_EXAMPLES, + PRIVATE_DOC_TESTS); + // Guidelines for creating a future incompatibility lint: // // - Create a lint defaulting to warn as normal, with ideally the same error @@ -335,6 +418,31 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { reference: "issue #52234 ", edition: None, }, + FutureIncompatibleInfo { + id: LintId::of(ILL_FORMED_ATTRIBUTE_INPUT), + reference: "issue #57571 ", + edition: None, + }, + FutureIncompatibleInfo { + id: LintId::of(AMBIGUOUS_ASSOCIATED_ITEMS), + reference: "issue #57644 ", + edition: None, + }, + FutureIncompatibleInfo { + id: LintId::of(DUPLICATE_MATCHER_BINDING_NAME), + reference: "issue #57593 ", + edition: None, + }, + FutureIncompatibleInfo { + id: LintId::of(NESTED_IMPL_TRAIT), + reference: "issue #59014 ", + edition: None, + }, + FutureIncompatibleInfo { + id: LintId::of(MUTABLE_BORROW_RESERVATION_CONFLICT), + reference: "issue #59159 ", + edition: None, + } ]); // Register renamed and removed lints. @@ -384,4 +492,21 @@ pub fn register_builtins(store: &mut lint::LintStore, sess: Option<&Session>) { "no longer a warning, #[no_mangle] functions always exported"); store.register_removed("private_no_mangle_statics", "no longer a warning, #[no_mangle] statics always exported"); + store.register_removed("bad_repr", + "replaced with a generic attribute input check"); +} + +pub fn register_internals(store: &mut lint::LintStore, sess: Option<&Session>) { + store.register_early_pass(sess, false, false, box DefaultHashTypes::new()); + store.register_late_pass(sess, false, false, false, box TyKindUsage); + store.register_group( + sess, + false, + "internal", + None, + vec![ + LintId::of(DEFAULT_HASH_TYPES), + LintId::of(USAGE_OF_TY_TYKIND), + ], + ); } diff --git a/src/librustc_lint/nonstandard_style.rs b/src/librustc_lint/nonstandard_style.rs index e071c34ff7f53..1d8979f7d1c1b 100644 --- a/src/librustc_lint/nonstandard_style.rs +++ b/src/librustc_lint/nonstandard_style.rs @@ -1,23 +1,15 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir::{self, GenericParamKind, PatKind}; use rustc::hir::def::Def; use rustc::hir::intravisit::FnKind; +use rustc::lint; use rustc::ty; use rustc_target::spec::abi::Abi; -use lint::{LateContext, LintContext, LintArray}; -use lint::{LintPass, LateLintPass}; +use lint::{EarlyContext, LateContext, LintContext, LintArray}; +use lint::{EarlyLintPass, LintPass, LateLintPass}; use syntax::ast; use syntax::attr; -use syntax_pos::Span; +use syntax::errors::Applicability; +use syntax_pos::{BytePos, symbol::Ident, Span}; #[derive(PartialEq)] pub enum MethodLateContext { @@ -26,8 +18,8 @@ pub enum MethodLateContext { PlainImpl, } -pub fn method_context(cx: &LateContext, id: ast::NodeId) -> MethodLateContext { - let def_id = cx.tcx.hir().local_def_id(id); +pub fn method_context(cx: &LateContext<'_, '_>, id: hir::HirId) -> MethodLateContext { + let def_id = cx.tcx.hir().local_def_id_from_hir_id(id); let item = cx.tcx.associated_item(def_id); match item.container { ty::TraitContainer(..) => MethodLateContext::TraitAutoImpl, @@ -46,110 +38,120 @@ declare_lint! { "types, variants, traits and type parameters should have camel case names" } -#[derive(Copy, Clone)] -pub struct NonCamelCaseTypes; +declare_lint_pass!(NonCamelCaseTypes => [NON_CAMEL_CASE_TYPES]); -impl NonCamelCaseTypes { - fn check_case(&self, cx: &LateContext, sort: &str, name: ast::Name, span: Span) { - fn char_has_case(c: char) -> bool { - c.is_lowercase() || c.is_uppercase() - } +fn char_has_case(c: char) -> bool { + c.is_lowercase() || c.is_uppercase() +} - fn is_camel_case(name: ast::Name) -> bool { - let name = name.as_str(); - let name = name.trim_matches('_'); - if name.is_empty() { - return true; +fn is_camel_case(name: &str) -> bool { + let name = name.trim_matches('_'); + if name.is_empty() { + return true; + } + + // start with a non-lowercase letter rather than non-uppercase + // ones (some scripts don't have a concept of upper/lowercase) + !name.chars().next().unwrap().is_lowercase() + && !name.contains("__") + && !name.chars().collect::>().windows(2).any(|pair| { + // contains a capitalisable character followed by, or preceded by, an underscore + char_has_case(pair[0]) && pair[1] == '_' || char_has_case(pair[1]) && pair[0] == '_' + }) +} + +fn to_camel_case(s: &str) -> String { + s.trim_matches('_') + .split('_') + .filter(|component| !component.is_empty()) + .map(|component| { + let mut camel_cased_component = String::new(); + + let mut new_word = true; + let mut prev_is_lower_case = true; + + for c in component.chars() { + // Preserve the case if an uppercase letter follows a lowercase letter, so that + // `camelCase` is converted to `CamelCase`. + if prev_is_lower_case && c.is_uppercase() { + new_word = true; + } + + if new_word { + camel_cased_component.push_str(&c.to_uppercase().to_string()); + } else { + camel_cased_component.push_str(&c.to_lowercase().to_string()); + } + + prev_is_lower_case = c.is_lowercase(); + new_word = false; } - // start with a non-lowercase letter rather than non-uppercase - // ones (some scripts don't have a concept of upper/lowercase) - !name.is_empty() && !name.chars().next().unwrap().is_lowercase() && - !name.contains("__") && !name.chars().collect::>().windows(2).any(|pair| { - // contains a capitalisable character followed by, or preceded by, an underscore - char_has_case(pair[0]) && pair[1] == '_' || - char_has_case(pair[1]) && pair[0] == '_' - }) - } + camel_cased_component + }) + .fold( + (String::new(), None), + |(acc, prev): (String, Option), next| { + // separate two components with an underscore if their boundary cannot + // be distinguished using a uppercase/lowercase case distinction + let join = if let Some(prev) = prev { + let l = prev.chars().last().unwrap(); + let f = next.chars().next().unwrap(); + !char_has_case(l) && !char_has_case(f) + } else { + false + }; + (acc + if join { "_" } else { "" } + &next, Some(next)) + }, + ) + .0 +} - fn to_camel_case(s: &str) -> String { - s.trim_matches('_') - .split('_') - .map(|word| { - word.chars().enumerate().map(|(i, c)| if i == 0 { - c.to_uppercase().collect::() - } else { - c.to_lowercase().collect() - }) - .collect::() - }) - .filter(|x| !x.is_empty()) - .fold((String::new(), None), |(acc, prev): (String, Option), next| { - // separate two components with an underscore if their boundary cannot - // be distinguished using a uppercase/lowercase case distinction - let join = if let Some(prev) = prev { - let l = prev.chars().last().unwrap(); - let f = next.chars().next().unwrap(); - !char_has_case(l) && !char_has_case(f) - } else { false }; - (acc + if join { "_" } else { "" } + &next, Some(next)) - }).0 - } +impl NonCamelCaseTypes { + fn check_case(&self, cx: &EarlyContext<'_>, sort: &str, ident: &Ident) { + let name = &ident.name.as_str(); if !is_camel_case(name) { - let c = to_camel_case(&name.as_str()); - let m = if c.is_empty() { - format!("{} `{}` should have a camel case name such as `CamelCase`", sort, name) - } else { - format!("{} `{}` should have a camel case name such as `{}`", sort, name, c) - }; - cx.span_lint(NON_CAMEL_CASE_TYPES, span, &m); + let msg = format!("{} `{}` should have an upper camel case name", sort, name); + cx.struct_span_lint(NON_CAMEL_CASE_TYPES, ident.span, &msg) + .span_suggestion( + ident.span, + "convert the identifier to upper camel case", + to_camel_case(name), + Applicability::MaybeIncorrect, + ) + .emit(); } } } -impl LintPass for NonCamelCaseTypes { - fn get_lints(&self) -> LintArray { - lint_array!(NON_CAMEL_CASE_TYPES) - } -} - -impl<'a, 'tcx> LateLintPass<'a, 'tcx> for NonCamelCaseTypes { - fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { +impl EarlyLintPass for NonCamelCaseTypes { + fn check_item(&mut self, cx: &EarlyContext<'_>, it: &ast::Item) { let has_repr_c = it.attrs .iter() - .any(|attr| { - attr::find_repr_attrs(&cx.tcx.sess.parse_sess, attr) - .iter() - .any(|r| r == &attr::ReprC) - }); + .any(|attr| attr::find_repr_attrs(&cx.sess.parse_sess, attr).contains(&attr::ReprC)); if has_repr_c { return; } match it.node { - hir::ItemKind::Ty(..) | - hir::ItemKind::Enum(..) | - hir::ItemKind::Struct(..) | - hir::ItemKind::Union(..) => self.check_case(cx, "type", it.name, it.span), - hir::ItemKind::Trait(..) => self.check_case(cx, "trait", it.name, it.span), + ast::ItemKind::Ty(..) | + ast::ItemKind::Enum(..) | + ast::ItemKind::Struct(..) | + ast::ItemKind::Union(..) => self.check_case(cx, "type", &it.ident), + ast::ItemKind::Trait(..) => self.check_case(cx, "trait", &it.ident), _ => (), } } - fn check_variant(&mut self, cx: &LateContext, v: &hir::Variant, _: &hir::Generics) { - self.check_case(cx, "variant", v.node.name, v.span); + fn check_variant(&mut self, cx: &EarlyContext<'_>, v: &ast::Variant, _: &ast::Generics) { + self.check_case(cx, "variant", &v.node.ident); } - fn check_generic_param(&mut self, cx: &LateContext, param: &hir::GenericParam) { - match param.kind { - GenericParamKind::Lifetime { .. } => {} - GenericParamKind::Type { synthetic, .. } => { - if synthetic.is_none() { - self.check_case(cx, "type parameter", param.name.ident().name, param.span); - } - } + fn check_generic_param(&mut self, cx: &EarlyContext<'_>, param: &ast::GenericParam) { + if let ast::GenericParamKind::Type { .. } = param.kind { + self.check_case(cx, "type parameter", ¶m.ident); } } } @@ -160,8 +162,7 @@ declare_lint! { "variables, methods, functions, lifetime parameters and modules should have snake case names" } -#[derive(Copy, Clone)] -pub struct NonSnakeCase; +declare_lint_pass!(NonSnakeCase => [NON_SNAKE_CASE]); impl NonSnakeCase { fn to_snake_case(mut str: &str) -> String { @@ -194,7 +195,8 @@ impl NonSnakeCase { words.join("_") } - fn check_snake_case(&self, cx: &LateContext, sort: &str, name: &str, span: Option) { + /// Checks if a given identifier is snake case, and reports a diagnostic if not. + fn check_snake_case(&self, cx: &LateContext<'_, '_>, sort: &str, ident: &Ident) { fn is_snake_case(ident: &str) -> bool { if ident.is_empty() { return true; @@ -216,113 +218,143 @@ impl NonSnakeCase { }) } + let name = &ident.name.as_str(); + if !is_snake_case(name) { let sc = NonSnakeCase::to_snake_case(name); - let msg = if sc != name { - format!("{} `{}` should have a snake case name such as `{}`", - sort, - name, - sc) + + let msg = format!("{} `{}` should have a snake case name", sort, name); + let mut err = cx.struct_span_lint(NON_SNAKE_CASE, ident.span, &msg); + + // We have a valid span in almost all cases, but we don't have one when linting a crate + // name provided via the command line. + if !ident.span.is_dummy() { + err.span_suggestion( + ident.span, + "convert the identifier to snake case", + sc, + Applicability::MaybeIncorrect, + ); } else { - format!("{} `{}` should have a snake case name", sort, name) - }; - match span { - Some(span) => cx.span_lint(NON_SNAKE_CASE, span, &msg), - None => cx.lint(NON_SNAKE_CASE, &msg), + err.help(&format!("convert the identifier to snake case: `{}`", sc)); } - } - } -} -impl LintPass for NonSnakeCase { - fn get_lints(&self) -> LintArray { - lint_array!(NON_SNAKE_CASE) + err.emit(); + } } } impl<'a, 'tcx> LateLintPass<'a, 'tcx> for NonSnakeCase { - fn check_crate(&mut self, cx: &LateContext, cr: &hir::Crate) { - let attr_crate_name = attr::find_by_name(&cr.attrs, "crate_name") - .and_then(|at| at.value_str().map(|s| (at, s))); - if let Some(ref name) = cx.tcx.sess.opts.crate_name { - self.check_snake_case(cx, "crate", name, None); - } else if let Some((attr, name)) = attr_crate_name { - self.check_snake_case(cx, "crate", &name.as_str(), Some(attr.span)); + fn check_mod(&mut self, cx: &LateContext<'_, '_>, _: &'tcx hir::Mod, _: Span, id: hir::HirId) { + if id != hir::CRATE_HIR_ID { + return; + } + + let crate_ident = if let Some(name) = &cx.tcx.sess.opts.crate_name { + Some(Ident::from_str(name)) + } else { + attr::find_by_name(&cx.tcx.hir().attrs_by_hir_id(hir::CRATE_HIR_ID), "crate_name") + .and_then(|attr| attr.meta()) + .and_then(|meta| { + meta.name_value_literal().and_then(|lit| { + if let ast::LitKind::Str(name, ..) = lit.node { + // Discard the double quotes surrounding the literal. + let sp = cx.sess().source_map().span_to_snippet(lit.span) + .ok() + .and_then(|snippet| { + let left = snippet.find('"')?; + let right = snippet.rfind('"').map(|pos| snippet.len() - pos)?; + + Some( + lit.span + .with_lo(lit.span.lo() + BytePos(left as u32 + 1)) + .with_hi(lit.span.hi() - BytePos(right as u32)), + ) + }) + .unwrap_or_else(|| lit.span); + + Some(Ident::new(name, sp)) + } else { + None + } + }) + }) + }; + + if let Some(ident) = &crate_ident { + self.check_snake_case(cx, "crate", ident); } } - fn check_generic_param(&mut self, cx: &LateContext, param: &hir::GenericParam) { - match param.kind { - GenericParamKind::Lifetime { .. } => { - let name = param.name.ident().as_str(); - self.check_snake_case(cx, "lifetime", &name, Some(param.span)); - } - GenericParamKind::Type { .. } => {} + fn check_generic_param(&mut self, cx: &LateContext<'_, '_>, param: &hir::GenericParam) { + if let GenericParamKind::Lifetime { .. } = param.kind { + self.check_snake_case(cx, "lifetime", ¶m.name.ident()); } } - fn check_fn(&mut self, - cx: &LateContext, - fk: FnKind, - _: &hir::FnDecl, - _: &hir::Body, - span: Span, - id: ast::NodeId) { - match fk { - FnKind::Method(name, ..) => { + fn check_fn( + &mut self, + cx: &LateContext<'_, '_>, + fk: FnKind<'_>, + _: &hir::FnDecl, + _: &hir::Body, + _: Span, + id: hir::HirId, + ) { + match &fk { + FnKind::Method(ident, ..) => { match method_context(cx, id) { MethodLateContext::PlainImpl => { - self.check_snake_case(cx, "method", &name.as_str(), Some(span)) + self.check_snake_case(cx, "method", ident); } MethodLateContext::TraitAutoImpl => { - self.check_snake_case(cx, "trait method", &name.as_str(), Some(span)) + self.check_snake_case(cx, "trait method", ident); } _ => (), } } - FnKind::ItemFn(name, _, header, _, attrs) => { + FnKind::ItemFn(ident, _, header, _, attrs) => { // Skip foreign-ABI #[no_mangle] functions (Issue #31924) - if header.abi != Abi::Rust && attr::find_by_name(attrs, "no_mangle").is_some() { + if header.abi != Abi::Rust && attr::contains_name(attrs, "no_mangle") { return; } - self.check_snake_case(cx, "function", &name.as_str(), Some(span)) + self.check_snake_case(cx, "function", ident); } FnKind::Closure(_) => (), } } - fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { + fn check_item(&mut self, cx: &LateContext<'_, '_>, it: &hir::Item) { if let hir::ItemKind::Mod(_) = it.node { - self.check_snake_case(cx, "module", &it.name.as_str(), Some(it.span)); + self.check_snake_case(cx, "module", &it.ident); } } - fn check_trait_item(&mut self, cx: &LateContext, item: &hir::TraitItem) { - if let hir::TraitItemKind::Method(_, hir::TraitMethod::Required(ref pnames)) = item.node { - self.check_snake_case(cx, - "trait method", - &item.ident.as_str(), - Some(item.span)); + fn check_trait_item(&mut self, cx: &LateContext<'_, '_>, item: &hir::TraitItem) { + if let hir::TraitItemKind::Method(_, hir::TraitMethod::Required(pnames)) = &item.node { + self.check_snake_case(cx, "trait method", &item.ident); for param_name in pnames { - self.check_snake_case(cx, "variable", ¶m_name.as_str(), Some(param_name.span)); + self.check_snake_case(cx, "variable", param_name); } } } - fn check_pat(&mut self, cx: &LateContext, p: &hir::Pat) { - if let &PatKind::Binding(_, _, ref ident, _) = &p.node { - self.check_snake_case(cx, "variable", &ident.as_str(), Some(p.span)); + fn check_pat(&mut self, cx: &LateContext<'_, '_>, p: &hir::Pat) { + if let &PatKind::Binding(_, _, ident, _) = &p.node { + self.check_snake_case(cx, "variable", &ident); } } - fn check_struct_def(&mut self, - cx: &LateContext, - s: &hir::VariantData, - _: ast::Name, - _: &hir::Generics, - _: ast::NodeId) { + fn check_struct_def( + &mut self, + cx: &LateContext<'_, '_>, + s: &hir::VariantData, + _: ast::Name, + _: &hir::Generics, + _: hir::HirId, + ) { for sf in s.fields() { - self.check_snake_case(cx, "structure field", &sf.ident.as_str(), Some(sf.span)); + self.check_snake_case(cx, "structure field", &sf.ident); } } } @@ -333,82 +365,100 @@ declare_lint! { "static constants should have uppercase identifiers" } -#[derive(Copy, Clone)] -pub struct NonUpperCaseGlobals; +declare_lint_pass!(NonUpperCaseGlobals => [NON_UPPER_CASE_GLOBALS]); impl NonUpperCaseGlobals { - fn check_upper_case(cx: &LateContext, sort: &str, name: ast::Name, span: Span) { - if name.as_str().chars().any(|c| c.is_lowercase()) { - let uc = NonSnakeCase::to_snake_case(&name.as_str()).to_uppercase(); - if name != &*uc { - cx.span_lint(NON_UPPER_CASE_GLOBALS, - span, - &format!("{} `{}` should have an upper case name such as `{}`", - sort, - name, - uc)); - } else { - cx.span_lint(NON_UPPER_CASE_GLOBALS, - span, - &format!("{} `{}` should have an upper case name", sort, name)); - } + fn check_upper_case(cx: &LateContext<'_, '_>, sort: &str, ident: &Ident) { + let name = &ident.name.as_str(); + + if name.chars().any(|c| c.is_lowercase()) { + let uc = NonSnakeCase::to_snake_case(&name).to_uppercase(); + + let msg = format!("{} `{}` should have an upper case name", sort, name); + cx.struct_span_lint(NON_UPPER_CASE_GLOBALS, ident.span, &msg) + .span_suggestion( + ident.span, + "convert the identifier to upper case", + uc, + Applicability::MaybeIncorrect, + ) + .emit(); } } } -impl LintPass for NonUpperCaseGlobals { - fn get_lints(&self) -> LintArray { - lint_array!(NON_UPPER_CASE_GLOBALS) - } -} - impl<'a, 'tcx> LateLintPass<'a, 'tcx> for NonUpperCaseGlobals { - fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { + fn check_item(&mut self, cx: &LateContext<'_, '_>, it: &hir::Item) { match it.node { - hir::ItemKind::Static(..) => { - if attr::find_by_name(&it.attrs, "no_mangle").is_some() { - return; - } - NonUpperCaseGlobals::check_upper_case(cx, "static variable", it.name, it.span); + hir::ItemKind::Static(..) if !attr::contains_name(&it.attrs, "no_mangle") => { + NonUpperCaseGlobals::check_upper_case(cx, "static variable", &it.ident); } hir::ItemKind::Const(..) => { - NonUpperCaseGlobals::check_upper_case(cx, "constant", it.name, it.span); + NonUpperCaseGlobals::check_upper_case(cx, "constant", &it.ident); } _ => {} } } - fn check_trait_item(&mut self, cx: &LateContext, ti: &hir::TraitItem) { - match ti.node { - hir::TraitItemKind::Const(..) => { - NonUpperCaseGlobals::check_upper_case(cx, "associated constant", - ti.ident.name, ti.span); - } - _ => {} + fn check_trait_item(&mut self, cx: &LateContext<'_, '_>, ti: &hir::TraitItem) { + if let hir::TraitItemKind::Const(..) = ti.node { + NonUpperCaseGlobals::check_upper_case(cx, "associated constant", &ti.ident); } } - fn check_impl_item(&mut self, cx: &LateContext, ii: &hir::ImplItem) { - match ii.node { - hir::ImplItemKind::Const(..) => { - NonUpperCaseGlobals::check_upper_case(cx, "associated constant", - ii.ident.name, ii.span); - } - _ => {} + fn check_impl_item(&mut self, cx: &LateContext<'_, '_>, ii: &hir::ImplItem) { + if let hir::ImplItemKind::Const(..) = ii.node { + NonUpperCaseGlobals::check_upper_case(cx, "associated constant", &ii.ident); } } - fn check_pat(&mut self, cx: &LateContext, p: &hir::Pat) { + fn check_pat(&mut self, cx: &LateContext<'_, '_>, p: &hir::Pat) { // Lint for constants that look like binding identifiers (#7526) if let PatKind::Path(hir::QPath::Resolved(None, ref path)) = p.node { if let Def::Const(..) = path.def { if path.segments.len() == 1 { - NonUpperCaseGlobals::check_upper_case(cx, - "constant in pattern", - path.segments[0].ident.name, - path.span); + NonUpperCaseGlobals::check_upper_case( + cx, + "constant in pattern", + &path.segments[0].ident + ); } } } } + + fn check_generic_param(&mut self, cx: &LateContext<'_, '_>, param: &hir::GenericParam) { + if let GenericParamKind::Const { .. } = param.kind { + NonUpperCaseGlobals::check_upper_case( + cx, + "const parameter", + ¶m.name.ident(), + ); + } + } +} + +#[cfg(test)] +mod tests { + use super::{is_camel_case, to_camel_case}; + + #[test] + fn camel_case() { + assert!(!is_camel_case("userData")); + assert_eq!(to_camel_case("userData"), "UserData"); + + assert!(is_camel_case("X86_64")); + + assert!(!is_camel_case("X86__64")); + assert_eq!(to_camel_case("X86__64"), "X86_64"); + + assert!(!is_camel_case("Abc_123")); + assert_eq!(to_camel_case("Abc_123"), "Abc123"); + + assert!(!is_camel_case("A1_b2_c3")); + assert_eq!(to_camel_case("A1_b2_c3"), "A1B2C3"); + + assert!(!is_camel_case("ONE_TWO_THREE")); + assert_eq!(to_camel_case("ONE_TWO_THREE"), "OneTwoThree"); + } } diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index e225e642e72e7..d3223c6edb809 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -1,19 +1,10 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_snake_case)] use rustc::hir::Node; -use rustc::ty::subst::Substs; +use rustc::ty::subst::SubstsRef; use rustc::ty::{self, AdtKind, ParamEnv, Ty, TyCtxt}; use rustc::ty::layout::{self, IntegerExt, LayoutOf, VariantIdx}; +use rustc::{lint, util}; use rustc_data_structures::indexed_vec::Idx; use util::nodemap::FxHashSet; use lint::{LateContext, LintContext, LintArray}; @@ -25,12 +16,15 @@ use std::{i8, i16, i32, i64, u8, u16, u32, u64, f32, f64}; use syntax::{ast, attr}; use syntax::errors::Applicability; use rustc_target::spec::abi::Abi; -use syntax::edition::Edition; use syntax_pos::Span; use syntax::source_map; use rustc::hir; +use rustc::mir::interpret::{sign_extend, truncate}; + +use log::debug; + declare_lint! { UNUSED_COMPARISONS, Warn, @@ -39,9 +33,8 @@ declare_lint! { declare_lint! { OVERFLOWING_LITERALS, - Warn, - "literal out of range for its type", - Edition::Edition2018 => Deny + Deny, + "literal out of range for its type" } declare_lint! { @@ -53,19 +46,14 @@ declare_lint! { #[derive(Copy, Clone)] pub struct TypeLimits { /// Id of the last visited negated expression - negated_expr_id: ast::NodeId, + negated_expr_id: hir::HirId, } +impl_lint_pass!(TypeLimits => [UNUSED_COMPARISONS, OVERFLOWING_LITERALS]); + impl TypeLimits { pub fn new() -> TypeLimits { - TypeLimits { negated_expr_id: ast::DUMMY_NODE_ID } - } -} - -impl LintPass for TypeLimits { - fn get_lints(&self) -> LintArray { - lint_array!(UNUSED_COMPARISONS, - OVERFLOWING_LITERALS) + TypeLimits { negated_expr_id: hir::DUMMY_HIR_ID } } } @@ -74,8 +62,8 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { match e.node { hir::ExprKind::Unary(hir::UnNeg, ref expr) => { // propagate negation, if the negation itself isn't negated - if self.negated_expr_id != e.id { - self.negated_expr_id = expr.id; + if self.negated_expr_id != e.hir_id { + self.negated_expr_id = expr.hir_id; } } hir::ExprKind::Binary(binop, ref l, ref r) => { @@ -86,7 +74,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { } } hir::ExprKind::Lit(ref lit) => { - match cx.tables.node_id_to_type(e.hir_id).sty { + match cx.tables.node_type(e.hir_id).sty { ty::Int(t) => { match lit.node { ast::LitKind::Int(v, ast::LitIntType::Signed(_)) | @@ -98,7 +86,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { }; let (_, max) = int_ty_range(int_type); let max = max as u128; - let negative = self.negated_expr_id == e.id; + let negative = self.negated_expr_id == e.hir_id; // Detect literal value out of range [min, max] inclusive // avoiding use of -min to prevent overflow/panic @@ -107,7 +95,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { report_bin_hex_error( cx, e, - ty::Int(t), + attr::IntType::SignedInt(t), repr_str, v, negative, @@ -139,15 +127,15 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { _ => bug!(), }; if lit_val < min || lit_val > max { - let parent_id = cx.tcx.hir().get_parent_node(e.id); - if let Node::Expr(parent_expr) = cx.tcx.hir().get(parent_id) { + let parent_id = cx.tcx.hir().get_parent_node_by_hir_id(e.hir_id); + if let Node::Expr(parent_expr) = cx.tcx.hir().get_by_hir_id(parent_id) { if let hir::ExprKind::Cast(..) = parent_expr.node { if let ty::Char = cx.tables.expr_ty(parent_expr).sty { let mut err = cx.struct_span_lint( OVERFLOWING_LITERALS, parent_expr.span, "only u8 can be cast into char"); - err.span_suggestion_with_applicability( + err.span_suggestion( parent_expr.span, &"use a char literal instead", format!("'\\u{{{:X}}}'", lit_val), @@ -162,7 +150,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { report_bin_hex_error( cx, e, - ty::Uint(t), + attr::IntType::UnsignedInt(t), repr_str, lit_val, false, @@ -245,7 +233,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { } } - fn check_limits(cx: &LateContext, + fn check_limits(cx: &LateContext<'_, '_>, binop: hir::BinOp, l: &hir::Expr, r: &hir::Expr) @@ -258,7 +246,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { // Normalize the binop so that the literal is always on the RHS in // the comparison let norm_binop = if swap { rev_binop(binop) } else { binop }; - match cx.tables.node_id_to_type(expr.hir_id).sty { + match cx.tables.node_type(expr.hir_id).sty { ty::Int(int_ty) => { let (min, max) = int_ty_range(int_ty); let lit_val: i128 = match lit.node { @@ -302,7 +290,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { } } - fn get_bin_hex_repr(cx: &LateContext, lit: &ast::Lit) -> Option { + fn get_bin_hex_repr(cx: &LateContext<'_, '_>, lit: &ast::Lit) -> Option { let src = cx.sess().source_map().span_to_snippet(lit.span).ok()?; let firstch = src.chars().next()?; @@ -324,7 +312,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { // // No suggestion for: `isize`, `usize`. fn get_type_suggestion<'a>( - t: &ty::TyKind, + t: Ty<'_>, val: u128, negative: bool, ) -> Option { @@ -350,14 +338,14 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { } } } - match t { - &ty::Int(i) => find_fit!(i, val, negative, + match t.sty { + ty::Int(i) => find_fit!(i, val, negative, I8 => [U8] => [I16, I32, I64, I128], I16 => [U16] => [I32, I64, I128], I32 => [U32] => [I64, I128], I64 => [U64] => [I128], I128 => [U128] => []), - &ty::Uint(u) => find_fit!(u, val, negative, + ty::Uint(u) => find_fit!(u, val, negative, U8 => [U8, U16, U32, U64, U128] => [], U16 => [U16, U32, U64, U128] => [], U32 => [U32, U64, U128] => [], @@ -368,27 +356,23 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { } fn report_bin_hex_error( - cx: &LateContext, + cx: &LateContext<'_, '_>, expr: &hir::Expr, - ty: ty::TyKind, + ty: attr::IntType, repr_str: String, val: u128, negative: bool, ) { + let size = layout::Integer::from_attr(&cx.tcx, ty).size(); let (t, actually) = match ty { - ty::Int(t) => { - let ity = attr::IntType::SignedInt(t); - let bits = layout::Integer::from_attr(&cx.tcx, ity).size().bits(); - let actually = (val << (128 - bits)) as i128 >> (128 - bits); + attr::IntType::SignedInt(t) => { + let actually = sign_extend(val, size) as i128; (format!("{:?}", t), actually.to_string()) } - ty::Uint(t) => { - let ity = attr::IntType::UnsignedInt(t); - let bits = layout::Integer::from_attr(&cx.tcx, ity).size().bits(); - let actually = (val << (128 - bits)) >> (128 - bits); + attr::IntType::UnsignedInt(t) => { + let actually = truncate(val, size); (format!("{:?}", t), actually.to_string()) } - _ => bug!(), }; let mut err = cx.struct_span_lint( OVERFLOWING_LITERALS, @@ -401,11 +385,11 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for TypeLimits { repr_str, val, t, actually, t )); if let Some(sugg_ty) = - get_type_suggestion(&cx.tables.node_id_to_type(expr.hir_id).sty, val, negative) + get_type_suggestion(&cx.tables.node_type(expr.hir_id), val, negative) { if let Some(pos) = repr_str.chars().position(|c| c == 'i' || c == 'u') { let (sans_suffix, _) = repr_str.split_at(pos); - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("consider using `{}` instead", sugg_ty), format!("{}{}", sans_suffix, sugg_ty), @@ -427,6 +411,8 @@ declare_lint! { "proper use of libc types in foreign modules" } +declare_lint_pass!(ImproperCTypes => [IMPROPER_CTYPES]); + struct ImproperCTypesVisitor<'a, 'tcx: 'a> { cx: &'a LateContext<'a, 'tcx>, } @@ -448,7 +434,7 @@ enum FfiResult<'tcx> { /// FIXME: This duplicates code in codegen. fn is_repr_nullable_ptr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def: &'tcx ty::AdtDef, - substs: &Substs<'tcx>) + substs: SubstsRef<'tcx>) -> bool { if def.variants.len() == 2 { let data_idx; @@ -480,12 +466,12 @@ fn is_repr_nullable_ptr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { - /// Check if the given type is "ffi-safe" (has a stable, well-defined + /// Checks if the given type is "ffi-safe" (has a stable, well-defined /// representation which can be exported to C code). fn check_type_for_ffi(&self, cache: &mut FxHashSet>, ty: Ty<'tcx>) -> FfiResult<'tcx> { - use self::FfiResult::*; + use FfiResult::*; let cx = self.cx.tcx; @@ -765,12 +751,19 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { } } - fn check_foreign_fn(&mut self, id: ast::NodeId, decl: &hir::FnDecl) { - let def_id = self.cx.tcx.hir().local_def_id(id); + fn check_foreign_fn(&mut self, id: hir::HirId, decl: &hir::FnDecl) { + let def_id = self.cx.tcx.hir().local_def_id_from_hir_id(id); let sig = self.cx.tcx.fn_sig(def_id); let sig = self.cx.tcx.erase_late_bound_regions(&sig); + let inputs = if sig.c_variadic { + // Don't include the spoofed `VaList` in the functions list + // of inputs. + &sig.inputs()[..sig.inputs().len() - 1] + } else { + &sig.inputs()[..] + }; - for (input_ty, input_hir) in sig.inputs().iter().zip(&decl.inputs) { + for (input_ty, input_hir) in inputs.iter().zip(&decl.inputs) { self.check_type_for_ffi_and_report_errors(input_hir.span, input_ty); } @@ -782,33 +775,24 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { } } - fn check_foreign_static(&mut self, id: ast::NodeId, span: Span) { - let def_id = self.cx.tcx.hir().local_def_id(id); + fn check_foreign_static(&mut self, id: hir::HirId, span: Span) { + let def_id = self.cx.tcx.hir().local_def_id_from_hir_id(id); let ty = self.cx.tcx.type_of(def_id); self.check_type_for_ffi_and_report_errors(span, ty); } } -#[derive(Copy, Clone)] -pub struct ImproperCTypes; - -impl LintPass for ImproperCTypes { - fn get_lints(&self) -> LintArray { - lint_array!(IMPROPER_CTYPES) - } -} - impl<'a, 'tcx> LateLintPass<'a, 'tcx> for ImproperCTypes { - fn check_foreign_item(&mut self, cx: &LateContext, it: &hir::ForeignItem) { + fn check_foreign_item(&mut self, cx: &LateContext<'_, '_>, it: &hir::ForeignItem) { let mut vis = ImproperCTypesVisitor { cx }; - let abi = cx.tcx.hir().get_foreign_abi(it.id); + let abi = cx.tcx.hir().get_foreign_abi_by_hir_id(it.hir_id); if abi != Abi::RustIntrinsic && abi != Abi::PlatformIntrinsic { match it.node { hir::ForeignItemKind::Fn(ref decl, _, _) => { - vis.check_foreign_fn(it.id, decl); + vis.check_foreign_fn(it.hir_id, decl); } hir::ForeignItemKind::Static(ref ty, _) => { - vis.check_foreign_static(it.id, ty.span); + vis.check_foreign_static(it.hir_id, ty.span); } hir::ForeignItemKind::Type => () } @@ -816,65 +800,65 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for ImproperCTypes { } } -pub struct VariantSizeDifferences; - -impl LintPass for VariantSizeDifferences { - fn get_lints(&self) -> LintArray { - lint_array!(VARIANT_SIZE_DIFFERENCES) - } -} +declare_lint_pass!(VariantSizeDifferences => [VARIANT_SIZE_DIFFERENCES]); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences { - fn check_item(&mut self, cx: &LateContext, it: &hir::Item) { + fn check_item(&mut self, cx: &LateContext<'_, '_>, it: &hir::Item) { if let hir::ItemKind::Enum(ref enum_definition, _) = it.node { - let item_def_id = cx.tcx.hir().local_def_id(it.id); + let item_def_id = cx.tcx.hir().local_def_id_from_hir_id(it.hir_id); let t = cx.tcx.type_of(item_def_id); let ty = cx.tcx.erase_regions(&t); - match cx.layout_of(ty) { - Ok(layout) => { - let variants = &layout.variants; - if let layout::Variants::Tagged { ref variants, ref tag, .. } = variants { - let discr_size = tag.value.size(&cx.tcx).bytes(); - - debug!("enum `{}` is {} bytes large with layout:\n{:#?}", - t, layout.size.bytes(), layout); - - let (largest, slargest, largest_index) = enum_definition.variants - .iter() - .zip(variants) - .map(|(variant, variant_layout)| { - // Subtract the size of the enum discriminant. - let bytes = variant_layout.size.bytes().saturating_sub(discr_size); - - debug!("- variant `{}` is {} bytes large", - variant.node.name, - bytes); - bytes - }) - .enumerate() - .fold((0, 0, 0), |(l, s, li), (idx, size)| if size > l { - (size, l, idx) - } else if size > s { - (l, size, li) - } else { - (l, s, li) - }); - - // We only warn if the largest variant is at least thrice as large as - // the second-largest. - if largest > slargest * 3 && slargest > 0 { - cx.span_lint(VARIANT_SIZE_DIFFERENCES, - enum_definition.variants[largest_index].span, - &format!("enum variant is more than three times \ - larger ({} bytes) than the next largest", - largest)); - } - } - } + let layout = match cx.layout_of(ty) { + Ok(layout) => layout, Err(ty::layout::LayoutError::Unknown(_)) => return, Err(err @ ty::layout::LayoutError::SizeOverflow(_)) => { bug!("failed to get layout for `{}`: {}", t, err); } + }; + let (variants, tag) = match layout.variants { + layout::Variants::Multiple { + discr_kind: layout::DiscriminantKind::Tag, + ref discr, + ref variants, + .. + } => (variants, discr), + _ => return, + }; + + let discr_size = tag.value.size(&cx.tcx).bytes(); + + debug!("enum `{}` is {} bytes large with layout:\n{:#?}", + t, layout.size.bytes(), layout); + + let (largest, slargest, largest_index) = enum_definition.variants + .iter() + .zip(variants) + .map(|(variant, variant_layout)| { + // Subtract the size of the enum discriminant. + let bytes = variant_layout.size.bytes().saturating_sub(discr_size); + + debug!("- variant `{}` is {} bytes large", + variant.node.ident, + bytes); + bytes + }) + .enumerate() + .fold((0, 0, 0), |(l, s, li), (idx, size)| if size > l { + (size, l, idx) + } else if size > s { + (l, size, li) + } else { + (l, s, li) + }); + + // We only warn if the largest variant is at least thrice as large as + // the second-largest. + if largest > slargest * 3 && slargest > 0 { + cx.span_lint(VARIANT_SIZE_DIFFERENCES, + enum_definition.variants[largest_index].span, + &format!("enum variant is more than three times \ + larger ({} bytes) than the next largest", + largest)); } } } diff --git a/src/librustc_lint/unused.rs b/src/librustc_lint/unused.rs index d3e1af70c21da..d41d97f58bcbe 100644 --- a/src/librustc_lint/unused.rs +++ b/src/librustc_lint/unused.rs @@ -1,15 +1,6 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir::def::Def; use rustc::hir::def_id::DefId; +use rustc::lint; use rustc::ty; use rustc::ty::adjustment; use lint::{LateContext, EarlyContext, LintContext, LintArray}; @@ -26,6 +17,8 @@ use syntax_pos::Span; use rustc::hir; +use log::debug; + declare_lint! { pub UNUSED_MUST_USE, Warn, @@ -39,19 +32,12 @@ declare_lint! { "unused result of an expression in a statement" } -#[derive(Copy, Clone)] -pub struct UnusedResults; - -impl LintPass for UnusedResults { - fn get_lints(&self) -> LintArray { - lint_array!(UNUSED_MUST_USE, UNUSED_RESULTS) - } -} +declare_lint_pass!(UnusedResults => [UNUSED_MUST_USE, UNUSED_RESULTS]); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedResults { - fn check_stmt(&mut self, cx: &LateContext, s: &hir::Stmt) { + fn check_stmt(&mut self, cx: &LateContext<'_, '_>, s: &hir::Stmt) { let expr = match s.node { - hir::StmtKind::Semi(ref expr, _) => &**expr, + hir::StmtKind::Semi(ref expr) => &**expr, _ => return, }; @@ -61,7 +47,9 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedResults { let t = cx.tables.expr_ty(&expr); let type_permits_lack_of_use = if t.is_unit() - || cx.tcx.is_ty_uninhabited_from(cx.tcx.hir().get_module_parent(expr.id), t) { + || cx.tcx.is_ty_uninhabited_from( + cx.tcx.hir().get_module_parent_by_hir_id(expr.hir_id), t) + { true } else { match t.sty { @@ -113,7 +101,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedResults { } }, hir::ExprKind::MethodCall(..) => { - cx.tables.type_dependent_defs().get(expr.hir_id).cloned() + cx.tables.type_dependent_def(expr.hir_id) }, _ => None }; @@ -174,7 +162,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedResults { } fn check_must_use( - cx: &LateContext, + cx: &LateContext<'_, '_>, def_id: DefId, sp: Span, descr_pre_path: &str, @@ -183,7 +171,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedResults { for attr in cx.tcx.get_attrs(def_id).iter() { if attr.check_name("must_use") { let msg = format!("unused {}`{}`{} that must be used", - descr_pre_path, cx.tcx.item_path_str(def_id), descr_post_path); + descr_pre_path, cx.tcx.def_path_str(def_id), descr_post_path); let mut err = cx.struct_span_lint(UNUSED_MUST_USE, sp, &msg); // check for #[must_use = "..."] if let Some(note) = attr.value_str() { @@ -204,18 +192,11 @@ declare_lint! { "path statements with no effect" } -#[derive(Copy, Clone)] -pub struct PathStatements; - -impl LintPass for PathStatements { - fn get_lints(&self) -> LintArray { - lint_array!(PATH_STATEMENTS) - } -} +declare_lint_pass!(PathStatements => [PATH_STATEMENTS]); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for PathStatements { - fn check_stmt(&mut self, cx: &LateContext, s: &hir::Stmt) { - if let hir::StmtKind::Semi(ref expr, _) = s.node { + fn check_stmt(&mut self, cx: &LateContext<'_, '_>, s: &hir::Stmt) { + if let hir::StmtKind::Semi(ref expr) = s.node { if let hir::ExprKind::Path(_) = expr.node { cx.span_lint(PATH_STATEMENTS, s.span, "path statement with no effect"); } @@ -229,20 +210,13 @@ declare_lint! { "detects attributes that were not used by the compiler" } -#[derive(Copy, Clone)] -pub struct UnusedAttributes; - -impl LintPass for UnusedAttributes { - fn get_lints(&self) -> LintArray { - lint_array!(UNUSED_ATTRIBUTES) - } -} +declare_lint_pass!(UnusedAttributes => [UNUSED_ATTRIBUTES]); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedAttributes { - fn check_attribute(&mut self, cx: &LateContext, attr: &ast::Attribute) { + fn check_attribute(&mut self, cx: &LateContext<'_, '_>, attr: &ast::Attribute) { debug!("checking attribute: {:?}", attr); // Note that check_name() marks the attribute as used if it matches. - for &(ref name, ty, _) in BUILTIN_ATTRIBUTES { + for &(name, ty, ..) in BUILTIN_ATTRIBUTES { match ty { AttributeType::Whitelisted if attr.check_name(name) => { debug!("{:?} is Whitelisted", name); @@ -260,19 +234,21 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedAttributes { } } - let name = attr.name(); + let name = attr.name_or_empty(); if !attr::is_used(attr) { debug!("Emitting warning for: {:?}", attr); cx.span_lint(UNUSED_ATTRIBUTES, attr.span, "unused attribute"); // Is it a builtin attribute that must be used at the crate level? let known_crate = BUILTIN_ATTRIBUTES.iter() - .find(|&&(builtin, ty, _)| name == builtin && ty == AttributeType::CrateLevel) + .find(|&&(builtin, ty, ..)| { + name == builtin && ty == AttributeType::CrateLevel + }) .is_some(); // Has a plugin registered this attribute as one that must be used at // the crate level? let plugin_crate = plugin_attributes.iter() - .find(|&&(ref x, t)| name == &**x && AttributeType::CrateLevel == t) + .find(|&&(ref x, t)| name == x.as_str() && AttributeType::CrateLevel == t) .is_some(); if known_crate || plugin_crate { let msg = match attr.style { @@ -296,12 +272,11 @@ declare_lint! { "`if`, `match`, `while` and `return` do not need parentheses" } -#[derive(Copy, Clone)] -pub struct UnusedParens; +declare_lint_pass!(UnusedParens => [UNUSED_PARENS]); impl UnusedParens { fn check_unused_parens_expr(&self, - cx: &EarlyContext, + cx: &EarlyContext<'_>, value: &ast::Expr, msg: &str, followed_by_block: bool) { @@ -323,7 +298,7 @@ impl UnusedParens { } fn check_unused_parens_pat(&self, - cx: &EarlyContext, + cx: &EarlyContext<'_>, value: &ast::Pat, msg: &str) { if let ast::PatKind::Paren(_) = value.node { @@ -337,7 +312,7 @@ impl UnusedParens { } } - fn remove_outer_parens(cx: &EarlyContext, span: Span, pattern: &str, msg: &str) { + fn remove_outer_parens(cx: &EarlyContext<'_>, span: Span, pattern: &str, msg: &str) { let span_msg = format!("unnecessary parentheses around {}", msg); let mut err = cx.struct_span_lint(UNUSED_PARENS, span, &span_msg); let mut ate_left_paren = false; @@ -364,24 +339,18 @@ impl UnusedParens { _ => false, } }).to_owned(); - err.span_suggestion_short_with_applicability( - span, - "remove these parentheses", - parens_removed, - Applicability::MachineApplicable - ); + err.span_suggestion_short( + span, + "remove these parentheses", + parens_removed, + Applicability::MachineApplicable, + ); err.emit(); } } -impl LintPass for UnusedParens { - fn get_lints(&self) -> LintArray { - lint_array!(UNUSED_PARENS) - } -} - impl EarlyLintPass for UnusedParens { - fn check_expr(&mut self, cx: &EarlyContext, e: &ast::Expr) { + fn check_expr(&mut self, cx: &EarlyContext<'_>, e: &ast::Expr) { use syntax::ast::ExprKind::*; let (value, msg, followed_by_block) = match e.node { If(ref cond, ..) => (cond, "`if` condition", true), @@ -423,7 +392,7 @@ impl EarlyLintPass for UnusedParens { self.check_unused_parens_expr(cx, &value, msg, followed_by_block); } - fn check_pat(&mut self, cx: &EarlyContext, p: &ast::Pat, _: &mut bool) { + fn check_pat(&mut self, cx: &EarlyContext<'_>, p: &ast::Pat, _: &mut bool) { use ast::PatKind::{Paren, Range}; // The lint visitor will visit each subpattern of `p`. We do not want to lint any range // pattern no matter where it occurs in the pattern. For something like `&(a..=b)`, there @@ -437,7 +406,7 @@ impl EarlyLintPass for UnusedParens { } } - fn check_stmt(&mut self, cx: &EarlyContext, s: &ast::Stmt) { + fn check_stmt(&mut self, cx: &EarlyContext<'_>, s: &ast::Stmt) { if let ast::StmtKind::Local(ref local) = s.node { if let Some(ref value) = local.init { self.check_unused_parens_expr(cx, &value, "assigned value", false); @@ -452,11 +421,10 @@ declare_lint! { "unnecessary braces around an imported item" } -#[derive(Copy, Clone)] -pub struct UnusedImportBraces; +declare_lint_pass!(UnusedImportBraces => [UNUSED_IMPORT_BRACES]); impl UnusedImportBraces { - fn check_use_tree(&self, cx: &EarlyContext, use_tree: &ast::UseTree, item: &ast::Item) { + fn check_use_tree(&self, cx: &EarlyContext<'_>, use_tree: &ast::UseTree, item: &ast::Item) { if let ast::UseTreeKind::Nested(ref items) = use_tree.kind { // Recursively check nested UseTrees for &(ref tree, _) in items { @@ -492,14 +460,8 @@ impl UnusedImportBraces { } } -impl LintPass for UnusedImportBraces { - fn get_lints(&self) -> LintArray { - lint_array!(UNUSED_IMPORT_BRACES) - } -} - impl EarlyLintPass for UnusedImportBraces { - fn check_item(&mut self, cx: &EarlyContext, item: &ast::Item) { + fn check_item(&mut self, cx: &EarlyContext<'_>, item: &ast::Item) { if let ast::ItemKind::Use(ref use_tree) = item.node { self.check_use_tree(cx, use_tree, item); } @@ -512,17 +474,10 @@ declare_lint! { "detects unnecessary allocations that can be eliminated" } -#[derive(Copy, Clone)] -pub struct UnusedAllocation; - -impl LintPass for UnusedAllocation { - fn get_lints(&self) -> LintArray { - lint_array!(UNUSED_ALLOCATION) - } -} +declare_lint_pass!(UnusedAllocation => [UNUSED_ALLOCATION]); impl<'a, 'tcx> LateLintPass<'a, 'tcx> for UnusedAllocation { - fn check_expr(&mut self, cx: &LateContext, e: &hir::Expr) { + fn check_expr(&mut self, cx: &LateContext<'_, '_>, e: &hir::Expr) { match e.node { hir::ExprKind::Box(_) => {} _ => return, diff --git a/src/librustc_llvm/Cargo.toml b/src/librustc_llvm/Cargo.toml index 013badb71cc5a..0fe327d5deeeb 100644 --- a/src/librustc_llvm/Cargo.toml +++ b/src/librustc_llvm/Cargo.toml @@ -3,6 +3,7 @@ authors = ["The Rust Project Developers"] name = "rustc_llvm" version = "0.0.0" build = "build.rs" +edition = "2018" [lib] name = "rustc_llvm" diff --git a/src/librustc_llvm/build.rs b/src/librustc_llvm/build.rs index ce482087bbae5..dd7237948f6d0 100644 --- a/src/librustc_llvm/build.rs +++ b/src/librustc_llvm/build.rs @@ -1,16 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -extern crate cc; -extern crate build_helper; - use std::process::Command; use std::env; use std::path::{PathBuf, Path}; @@ -34,6 +21,8 @@ fn main() { return; } + build_helper::restore_library_path(); + let target = env::var("TARGET").expect("TARGET was not set"); let llvm_config = env::var_os("LLVM_CONFIG") .map(PathBuf::from) @@ -81,7 +70,7 @@ fn main() { let is_crossed = target != host; let mut optional_components = - vec!["x86", "arm", "aarch64", "amdgpu", "mips", "powerpc", + vec!["x86", "arm", "aarch64", "amdgpu", "avr", "mips", "powerpc", "systemz", "jsbackend", "webassembly", "msp430", "sparc", "nvptx"]; let mut version_cmd = Command::new(&llvm_config); @@ -142,6 +131,10 @@ fn main() { continue; } + if flag.starts_with("-flto") { + continue; + } + // -Wdate-time is not supported by the netbsd cross compiler if is_crossed && target.contains("netbsd") && flag.contains("date-time") { continue; @@ -242,15 +235,20 @@ fn main() { } let llvm_static_stdcpp = env::var_os("LLVM_STATIC_STDCPP"); + let llvm_use_libcxx = env::var_os("LLVM_USE_LIBCXX"); let stdcppname = if target.contains("openbsd") { // llvm-config on OpenBSD doesn't mention stdlib=libc++ "c++" } else if target.contains("freebsd") { "c++" + } else if target.contains("darwin") { + "c++" } else if target.contains("netbsd") && llvm_static_stdcpp.is_some() { // NetBSD uses a separate library when relocation is required "stdc++_pic" + } else if llvm_use_libcxx.is_some() { + "c++" } else { "stdc++" }; diff --git a/src/librustc_llvm/lib.rs b/src/librustc_llvm/lib.rs index 10f35e5598b2b..4b26bca109363 100644 --- a/src/librustc_llvm/lib.rs +++ b/src/librustc_llvm/lib.rs @@ -1,19 +1,8 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - +#![deny(rust_2018_idioms)] #![feature(nll)] #![feature(static_nobundle)] -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] // See librustc_cratesio_shim/Cargo.toml for a comment explaining this. #[allow(unused_extern_crates)] @@ -52,6 +41,12 @@ pub fn initialize_available_targets() { LLVMInitializeARMTargetMC, LLVMInitializeARMAsmPrinter, LLVMInitializeARMAsmParser); + init_target!(llvm_component = "avr", + LLVMInitializeAVRTargetInfo, + LLVMInitializeAVRTarget, + LLVMInitializeAVRTargetMC, + LLVMInitializeAVRAsmPrinter, + LLVMInitializeAVRAsmParser); init_target!(llvm_component = "aarch64", LLVMInitializeAArch64TargetInfo, LLVMInitializeAArch64Target, diff --git a/src/librustc_lsan/Cargo.toml b/src/librustc_lsan/Cargo.toml index a8e11df7670cf..9a24361f44e64 100644 --- a/src/librustc_lsan/Cargo.toml +++ b/src/librustc_lsan/Cargo.toml @@ -3,6 +3,7 @@ authors = ["The Rust Project Developers"] build = "build.rs" name = "rustc_lsan" version = "0.0.0" +edition = "2018" [lib] name = "rustc_lsan" @@ -11,7 +12,7 @@ test = false [build-dependencies] build_helper = { path = "../build_helper" } -cmake = "0.1.18" +cmake = "0.1.38" [dependencies] alloc = { path = "../liballoc" } diff --git a/src/librustc_lsan/build.rs b/src/librustc_lsan/build.rs index 05e40cdceddda..b8c7b7c2d5537 100644 --- a/src/librustc_lsan/build.rs +++ b/src/librustc_lsan/build.rs @@ -1,16 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -extern crate build_helper; -extern crate cmake; - use std::env; use build_helper::sanitizer_lib_boilerplate; @@ -18,6 +5,8 @@ use cmake::Config; fn main() { if let Some(llvm_config) = env::var_os("LLVM_CONFIG") { + build_helper::restore_library_path(); + let (native, target) = match sanitizer_lib_boilerplate("lsan") { Ok(native) => native, _ => return, diff --git a/src/librustc_lsan/lib.rs b/src/librustc_lsan/lib.rs index 47f917e40c1ff..3bdb86d313dcb 100644 --- a/src/librustc_lsan/lib.rs +++ b/src/librustc_lsan/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![sanitizer_runtime] #![feature(nll)] #![feature(sanitizer_runtime)] @@ -16,3 +6,5 @@ #![unstable(feature = "sanitizer_runtime_lib", reason = "internal implementation detail of sanitizers", issue = "0")] + +#![deny(rust_2018_idioms)] diff --git a/src/librustc_macros/Cargo.toml b/src/librustc_macros/Cargo.toml new file mode 100644 index 0000000000000..6e32a53c364a6 --- /dev/null +++ b/src/librustc_macros/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "rustc_macros" +version = "0.1.0" +authors = ["The Rust Project Developers"] +edition = "2018" + +[lib] +proc-macro = true + +[dependencies] +synstructure = "0.10.1" +syn = { version = "0.15.22", features = ["full"] } +proc-macro2 = "0.4.24" +quote = "0.6.10" +itertools = "0.8" diff --git a/src/librustc_macros/src/hash_stable.rs b/src/librustc_macros/src/hash_stable.rs new file mode 100644 index 0000000000000..6d7590c7d1cd3 --- /dev/null +++ b/src/librustc_macros/src/hash_stable.rs @@ -0,0 +1,87 @@ +use synstructure; +use syn::{self, Meta, NestedMeta, parse_quote}; +use proc_macro2::{self, Ident}; +use quote::quote; + +struct Attributes { + ignore: bool, + project: Option, +} + +fn parse_attributes(field: &syn::Field) -> Attributes { + let mut attrs = Attributes { + ignore: false, + project: None, + }; + for attr in &field.attrs { + if let Ok(meta) = attr.parse_meta() { + if &meta.name().to_string() != "stable_hasher" { + continue; + } + let mut any_attr = false; + if let Meta::List(list) = meta { + for nested in list.nested.iter() { + if let NestedMeta::Meta(meta) = nested { + if &meta.name().to_string() == "ignore" { + attrs.ignore = true; + any_attr = true; + } + if &meta.name().to_string() == "project" { + if let Meta::List(list) = meta { + if let Some(nested) = list.nested.iter().next() { + if let NestedMeta::Meta(meta) = nested { + attrs.project = Some(meta.name()); + any_attr = true; + } + } + } + } + } + } + } + if !any_attr { + panic!("error parsing stable_hasher"); + } + } + } + attrs +} + +pub fn hash_stable_derive(mut s: synstructure::Structure<'_>) -> proc_macro2::TokenStream { + let generic: syn::GenericParam = parse_quote!('__ctx); + s.add_bounds(synstructure::AddBounds::Generics); + s.add_impl_generic(generic); + let body = s.each(|bi| { + let attrs = parse_attributes(bi.ast()); + if attrs.ignore { + quote!{} + } else if let Some(project) = attrs.project { + quote!{ + &#bi.#project.hash_stable(__hcx, __hasher); + } + } else { + quote!{ + #bi.hash_stable(__hcx, __hasher); + } + } + }); + + let discriminant = match s.ast().data { + syn::Data::Enum(_) => quote! { + ::std::mem::discriminant(self).hash_stable(__hcx, __hasher); + }, + syn::Data::Struct(_) => quote! {}, + syn::Data::Union(_) => panic!("cannot derive on union"), + }; + + s.bound_impl(quote!(::rustc_data_structures::stable_hasher::HashStable + <::rustc::ich::StableHashingContext<'__ctx>>), quote!{ + fn hash_stable<__W: ::rustc_data_structures::stable_hasher::StableHasherResult>( + &self, + __hcx: &mut ::rustc::ich::StableHashingContext<'__ctx>, + __hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher<__W>) { + #discriminant + match *self { #body } + } + }) +} diff --git a/src/librustc_macros/src/lib.rs b/src/librustc_macros/src/lib.rs new file mode 100644 index 0000000000000..e99ceb1b0c79b --- /dev/null +++ b/src/librustc_macros/src/lib.rs @@ -0,0 +1,18 @@ +#![feature(proc_macro_hygiene)] +#![deny(rust_2018_idioms)] + +extern crate proc_macro; + +use synstructure::decl_derive; + +use proc_macro::TokenStream; + +mod hash_stable; +mod query; + +#[proc_macro] +pub fn rustc_queries(input: TokenStream) -> TokenStream { + query::rustc_queries(input) +} + +decl_derive!([HashStable, attributes(stable_hasher)] => hash_stable::hash_stable_derive); diff --git a/src/librustc_macros/src/query.rs b/src/librustc_macros/src/query.rs new file mode 100644 index 0000000000000..e4a6dfcd4e85e --- /dev/null +++ b/src/librustc_macros/src/query.rs @@ -0,0 +1,524 @@ +use proc_macro::TokenStream; +use syn::{ + Token, Ident, Type, Attribute, ReturnType, Expr, Block, Error, + braced, parenthesized, parse_macro_input, +}; +use syn::spanned::Spanned; +use syn::parse::{Result, Parse, ParseStream}; +use syn::punctuated::Punctuated; +use syn; +use quote::quote; +use itertools::Itertools; + +#[allow(non_camel_case_types)] +mod kw { + syn::custom_keyword!(query); +} + +/// Ident or a wildcard `_`. +struct IdentOrWild(Ident); + +impl Parse for IdentOrWild { + fn parse(input: ParseStream<'_>) -> Result { + Ok(if input.peek(Token![_]) { + let underscore = input.parse::()?; + IdentOrWild(Ident::new("_", underscore.span())) + } else { + IdentOrWild(input.parse()?) + }) + } +} + +/// A modifier for a query +enum QueryModifier { + /// The description of the query. + Desc(Option, Punctuated), + + /// Cache the query to disk if the `Expr` returns true. + Cache(Option, Expr), + + /// Custom code to load the query from disk. + LoadCached(Ident, Ident, Block), + + /// A cycle error for this query aborting the compilation with a fatal error. + FatalCycle, + + /// A cycle error results in a delay_bug call + CycleDelayBug, + + /// Don't hash the result, instead just mark a query red if it runs + NoHash, + + /// Don't force the query + NoForce, + + /// Generate a dep node based on the dependencies of the query + Anon, + + // Always evaluate the query, ignoring its depdendencies + EvalAlways, +} + +impl Parse for QueryModifier { + fn parse(input: ParseStream<'_>) -> Result { + let modifier: Ident = input.parse()?; + if modifier == "desc" { + // Parse a description modifier like: + // `desc { |tcx| "foo {}", tcx.item_path(key) }` + let attr_content; + braced!(attr_content in input); + let tcx = if attr_content.peek(Token![|]) { + attr_content.parse::()?; + let tcx = attr_content.parse()?; + attr_content.parse::()?; + Some(tcx) + } else { + None + }; + let desc = attr_content.parse_terminated(Expr::parse)?; + Ok(QueryModifier::Desc(tcx, desc)) + } else if modifier == "cache" { + // Parse a cache modifier like: + // `cache { |tcx| key.is_local() }` + let attr_content; + braced!(attr_content in input); + let tcx = if attr_content.peek(Token![|]) { + attr_content.parse::()?; + let tcx = attr_content.parse()?; + attr_content.parse::()?; + Some(tcx) + } else { + None + }; + let expr = attr_content.parse()?; + Ok(QueryModifier::Cache(tcx, expr)) + } else if modifier == "load_cached" { + // Parse a load_cached modifier like: + // `load_cached(tcx, id) { tcx.queries.on_disk_cache.try_load_query_result(tcx, id) }` + let args; + parenthesized!(args in input); + let tcx = args.parse()?; + args.parse::()?; + let id = args.parse()?; + let block = input.parse()?; + Ok(QueryModifier::LoadCached(tcx, id, block)) + } else if modifier == "fatal_cycle" { + Ok(QueryModifier::FatalCycle) + } else if modifier == "cycle_delay_bug" { + Ok(QueryModifier::CycleDelayBug) + } else if modifier == "no_hash" { + Ok(QueryModifier::NoHash) + } else if modifier == "no_force" { + Ok(QueryModifier::NoForce) + } else if modifier == "anon" { + Ok(QueryModifier::Anon) + } else if modifier == "eval_always" { + Ok(QueryModifier::EvalAlways) + } else { + Err(Error::new(modifier.span(), "unknown query modifier")) + } + } +} + +/// Ensures only doc comment attributes are used +fn check_attributes(attrs: Vec) -> Result<()> { + for attr in attrs { + if !attr.path.is_ident("doc") { + return Err(Error::new(attr.span(), "attributes not supported on queries")); + } + } + Ok(()) +} + +/// A compiler query. `query ... { ... }` +struct Query { + modifiers: List, + name: Ident, + key: IdentOrWild, + arg: Type, + result: ReturnType, +} + +impl Parse for Query { + fn parse(input: ParseStream<'_>) -> Result { + check_attributes(input.call(Attribute::parse_outer)?)?; + + // Parse the query declaration. Like `query type_of(key: DefId) -> Ty<'tcx>` + input.parse::()?; + let name: Ident = input.parse()?; + let arg_content; + parenthesized!(arg_content in input); + let key = arg_content.parse()?; + arg_content.parse::()?; + let arg = arg_content.parse()?; + let result = input.parse()?; + + // Parse the query modifiers + let content; + braced!(content in input); + let modifiers = content.parse()?; + + Ok(Query { + modifiers, + name, + key, + arg, + result, + }) + } +} + +/// A type used to greedily parse another type until the input is empty. +struct List(Vec); + +impl Parse for List { + fn parse(input: ParseStream<'_>) -> Result { + let mut list = Vec::new(); + while !input.is_empty() { + list.push(input.parse()?); + } + Ok(List(list)) + } +} + +/// A named group containing queries. +struct Group { + name: Ident, + queries: List, +} + +impl Parse for Group { + fn parse(input: ParseStream<'_>) -> Result { + let name: Ident = input.parse()?; + let content; + braced!(content in input); + Ok(Group { + name, + queries: content.parse()?, + }) + } +} + +struct QueryModifiers { + /// The description of the query. + desc: Option<(Option, Punctuated)>, + + /// Cache the query to disk if the `Expr` returns true. + cache: Option<(Option, Expr)>, + + /// Custom code to load the query from disk. + load_cached: Option<(Ident, Ident, Block)>, + + /// A cycle error for this query aborting the compilation with a fatal error. + fatal_cycle: bool, + + /// A cycle error results in a delay_bug call + cycle_delay_bug: bool, + + /// Don't hash the result, instead just mark a query red if it runs + no_hash: bool, + + /// Don't force the query + no_force: bool, + + /// Generate a dep node based on the dependencies of the query + anon: bool, + + // Always evaluate the query, ignoring its depdendencies + eval_always: bool, +} + +/// Process query modifiers into a struct, erroring on duplicates +fn process_modifiers(query: &mut Query) -> QueryModifiers { + let mut load_cached = None; + let mut cache = None; + let mut desc = None; + let mut fatal_cycle = false; + let mut cycle_delay_bug = false; + let mut no_hash = false; + let mut no_force = false; + let mut anon = false; + let mut eval_always = false; + for modifier in query.modifiers.0.drain(..) { + match modifier { + QueryModifier::LoadCached(tcx, id, block) => { + if load_cached.is_some() { + panic!("duplicate modifier `load_cached` for query `{}`", query.name); + } + load_cached = Some((tcx, id, block)); + } + QueryModifier::Cache(tcx, expr) => { + if cache.is_some() { + panic!("duplicate modifier `cache` for query `{}`", query.name); + } + cache = Some((tcx, expr)); + } + QueryModifier::Desc(tcx, list) => { + if desc.is_some() { + panic!("duplicate modifier `desc` for query `{}`", query.name); + } + desc = Some((tcx, list)); + } + QueryModifier::FatalCycle => { + if fatal_cycle { + panic!("duplicate modifier `fatal_cycle` for query `{}`", query.name); + } + fatal_cycle = true; + } + QueryModifier::CycleDelayBug => { + if cycle_delay_bug { + panic!("duplicate modifier `cycle_delay_bug` for query `{}`", query.name); + } + cycle_delay_bug = true; + } + QueryModifier::NoHash => { + if no_hash { + panic!("duplicate modifier `no_hash` for query `{}`", query.name); + } + no_hash = true; + } + QueryModifier::NoForce => { + if no_force { + panic!("duplicate modifier `no_force` for query `{}`", query.name); + } + no_force = true; + } + QueryModifier::Anon => { + if anon { + panic!("duplicate modifier `anon` for query `{}`", query.name); + } + anon = true; + } + QueryModifier::EvalAlways => { + if eval_always { + panic!("duplicate modifier `eval_always` for query `{}`", query.name); + } + eval_always = true; + } + } + } + QueryModifiers { + load_cached, + cache, + desc, + fatal_cycle, + cycle_delay_bug, + no_hash, + no_force, + anon, + eval_always, + } +} + +/// Add the impl of QueryDescription for the query to `impls` if one is requested +fn add_query_description_impl( + query: &Query, + modifiers: QueryModifiers, + impls: &mut proc_macro2::TokenStream +) { + let name = &query.name; + let arg = &query.arg; + let key = &query.key.0; + + // Find out if we should cache the query on disk + let cache = modifiers.cache.as_ref().map(|(tcx, expr)| { + let try_load_from_disk = if let Some((tcx, id, block)) = modifiers.load_cached.as_ref() { + // Use custom code to load the query from disk + quote! { + #[inline] + fn try_load_from_disk( + #tcx: TyCtxt<'_, 'tcx, 'tcx>, + #id: SerializedDepNodeIndex + ) -> Option { + #block + } + } + } else { + // Use the default code to load the query from disk + quote! { + #[inline] + fn try_load_from_disk( + tcx: TyCtxt<'_, 'tcx, 'tcx>, + id: SerializedDepNodeIndex + ) -> Option { + tcx.queries.on_disk_cache.try_load_query_result(tcx, id) + } + } + }; + + let tcx = tcx.as_ref().map(|t| quote! { #t }).unwrap_or(quote! { _ }); + quote! { + #[inline] + #[allow(unused_variables)] + fn cache_on_disk(#tcx: TyCtxt<'_, 'tcx, 'tcx>, #key: Self::Key) -> bool { + #expr + } + + #try_load_from_disk + } + }); + + if cache.is_none() && modifiers.load_cached.is_some() { + panic!("load_cached modifier on query `{}` without a cache modifier", name); + } + + let desc = modifiers.desc.as_ref().map(|(tcx, desc)| { + let tcx = tcx.as_ref().map(|t| quote! { #t }).unwrap_or(quote! { _ }); + quote! { + #[allow(unused_variables)] + fn describe( + #tcx: TyCtxt<'_, '_, '_>, + #key: #arg, + ) -> Cow<'static, str> { + format!(#desc).into() + } + } + }); + + if desc.is_some() || cache.is_some() { + let cache = cache.unwrap_or(quote! {}); + let desc = desc.unwrap_or(quote! {}); + + impls.extend(quote! { + impl<'tcx> QueryDescription<'tcx> for queries::#name<'tcx> { + #desc + #cache + } + }); + } +} + +pub fn rustc_queries(input: TokenStream) -> TokenStream { + let groups = parse_macro_input!(input as List); + + let mut query_stream = quote! {}; + let mut query_description_stream = quote! {}; + let mut dep_node_def_stream = quote! {}; + let mut dep_node_force_stream = quote! {}; + let mut no_force_queries = Vec::new(); + + for group in groups.0 { + let mut group_stream = quote! {}; + for mut query in group.queries.0 { + let modifiers = process_modifiers(&mut query); + let name = &query.name; + let arg = &query.arg; + let result_full = &query.result; + let result = match query.result { + ReturnType::Default => quote! { -> () }, + _ => quote! { #result_full }, + }; + + let mut attributes = Vec::new(); + + // Pass on the fatal_cycle modifier + if modifiers.fatal_cycle { + attributes.push(quote! { fatal_cycle }); + }; + // Pass on the cycle_delay_bug modifier + if modifiers.cycle_delay_bug { + attributes.push(quote! { cycle_delay_bug }); + }; + // Pass on the no_hash modifier + if modifiers.no_hash { + attributes.push(quote! { no_hash }); + }; + + let mut attribute_stream = quote! {}; + + for e in attributes.into_iter().intersperse(quote! {,}) { + attribute_stream.extend(e); + } + + // Add the query to the group + group_stream.extend(quote! { + [#attribute_stream] fn #name: #name(#arg) #result, + }); + + let mut attributes = Vec::new(); + + // Pass on the anon modifier + if modifiers.anon { + attributes.push(quote! { anon }); + }; + // Pass on the eval_always modifier + if modifiers.eval_always { + attributes.push(quote! { eval_always }); + }; + + let mut attribute_stream = quote! {}; + for e in attributes.into_iter().intersperse(quote! {,}) { + attribute_stream.extend(e); + } + // Create a dep node for the query + dep_node_def_stream.extend(quote! { + [#attribute_stream] #name(#arg), + }); + + if modifiers.no_force { + no_force_queries.push(name.clone()); + } else { + // Add a match arm to force the query given the dep node + dep_node_force_stream.extend(quote! { + DepKind::#name => { + if let Some(key) = RecoverKey::recover($tcx, $dep_node) { + force_ex!($tcx, #name, key); + } else { + return false; + } + } + }); + } + + add_query_description_impl(&query, modifiers, &mut query_description_stream); + } + let name = &group.name; + query_stream.extend(quote! { + #name { #group_stream }, + }); + } + + // Add an arm for the no force queries to panic when trying to force them + for query in no_force_queries { + dep_node_force_stream.extend(quote! { + DepKind::#query | + }); + } + dep_node_force_stream.extend(quote! { + DepKind::Null => { + bug!("Cannot force dep node: {:?}", $dep_node) + } + }); + + TokenStream::from(quote! { + macro_rules! rustc_query_append { + ([$($macro:tt)*][$($other:tt)*]) => { + $($macro)* { + $($other)* + + #query_stream + + } + } + } + macro_rules! rustc_dep_node_append { + ([$($macro:tt)*][$($other:tt)*]) => { + $($macro)*( + $($other)* + + #dep_node_def_stream + ); + } + } + macro_rules! rustc_dep_node_force { + ([$dep_node:expr, $tcx:expr] $($other:tt)*) => { + match $dep_node.kind { + $($other)* + + #dep_node_force_stream + } + } + } + #query_description_stream + }) +} diff --git a/src/librustc_metadata/Cargo.toml b/src/librustc_metadata/Cargo.toml index 337c87c24ba2b..e234f4f880703 100644 --- a/src/librustc_metadata/Cargo.toml +++ b/src/librustc_metadata/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustc_metadata" version = "0.0.0" +edition = "2018" [lib] name = "rustc_metadata" @@ -14,7 +15,7 @@ log = "0.4" memmap = "0.6" rustc = { path = "../librustc" } rustc_data_structures = { path = "../librustc_data_structures" } -rustc_errors = { path = "../librustc_errors" } +errors = { path = "../librustc_errors", package = "rustc_errors" } rustc_target = { path = "../librustc_target" } serialize = { path = "../libserialize" } stable_deref_trait = "1.0.0" diff --git a/src/librustc_metadata/build.rs b/src/librustc_metadata/build.rs index f18a3f9b94016..d230ba91039ad 100644 --- a/src/librustc_metadata/build.rs +++ b/src/librustc_metadata/build.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - fn main() { println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rerun-if-env-changed=CFG_VERSION"); diff --git a/src/librustc_metadata/creader.rs b/src/librustc_metadata/creader.rs index 4ff29f5c04d41..36d9bf9f50dd5 100644 --- a/src/librustc_metadata/creader.rs +++ b/src/librustc_metadata/creader.rs @@ -1,19 +1,9 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Validates all used crates and extern libraries and loads their metadata -use cstore::{self, CStore, CrateSource, MetadataBlob}; -use locator::{self, CratePaths}; -use decoder::proc_macro_def_path_table; -use schema::CrateRoot; +use crate::cstore::{self, CStore, CrateSource, MetadataBlob}; +use crate::locator::{self, CratePaths}; +use crate::decoder::proc_macro_def_path_table; +use crate::schema::CrateRoot; use rustc_data_structures::sync::{Lrc, RwLock, Lock}; use rustc::hir::def_id::CrateNum; @@ -39,8 +29,9 @@ use syntax::attr; use syntax::ext::base::SyntaxExtension; use syntax::symbol::Symbol; use syntax::visit; +use syntax::{span_err, span_fatal}; use syntax_pos::{Span, DUMMY_SP}; -use log; +use log::{debug, info, log_enabled}; pub struct Library { pub dylib: Option<(PathBuf, PathKind)>, @@ -197,13 +188,15 @@ impl<'a> CrateLoader<'a> { }); } - fn register_crate(&mut self, - root: &Option, - ident: Symbol, - span: Span, - lib: Library, - dep_kind: DepKind) - -> (CrateNum, Lrc) { + fn register_crate( + &mut self, + host_lib: Option, + root: &Option, + ident: Symbol, + span: Span, + lib: Library, + dep_kind: DepKind + ) -> (CrateNum, Lrc) { let crate_root = lib.metadata.get_root(); info!("register crate `extern crate {} as {}`", crate_root.name, ident); self.verify_no_symbol_conflicts(span, &crate_root); @@ -231,7 +224,16 @@ impl<'a> CrateLoader<'a> { let dependencies: Vec = cnum_map.iter().cloned().collect(); let proc_macros = crate_root.proc_macro_decls_static.map(|_| { - self.load_derive_macros(&crate_root, dylib.clone().map(|p| p.0), span) + if self.sess.opts.debugging_opts.dual_proc_macros { + let host_lib = host_lib.unwrap(); + self.load_derive_macros( + &host_lib.metadata.get_root(), + host_lib.dylib.clone().map(|p| p.0), + span + ) + } else { + self.load_derive_macros(&crate_root, dylib.clone().map(|p| p.0), span) + } }); let def_path_table = record_time(&self.sess.perf_stats.decode_def_path_tables_time, || { @@ -278,6 +280,61 @@ impl<'a> CrateLoader<'a> { (cnum, cmeta) } + fn load_proc_macro<'b> ( + &mut self, + locate_ctxt: &mut locator::Context<'b>, + path_kind: PathKind, + ) -> Option<(LoadResult, Option)> + where + 'a: 'b + { + // Use a new locator Context so trying to load a proc macro doesn't affect the error + // message we emit + let mut proc_macro_locator = locate_ctxt.clone(); + + // Try to load a proc macro + proc_macro_locator.is_proc_macro = Some(true); + + // Load the proc macro crate for the target + let (locator, target_result) = if self.sess.opts.debugging_opts.dual_proc_macros { + proc_macro_locator.reset(); + let result = match self.load(&mut proc_macro_locator)? { + LoadResult::Previous(cnum) => return Some((LoadResult::Previous(cnum), None)), + LoadResult::Loaded(library) => Some(LoadResult::Loaded(library)) + }; + // Don't look for a matching hash when looking for the host crate. + // It won't be the same as the target crate hash + locate_ctxt.hash = None; + // Use the locate_ctxt when looking for the host proc macro crate, as that is required + // so we want it to affect the error message + (locate_ctxt, result) + } else { + (&mut proc_macro_locator, None) + }; + + // Load the proc macro crate for the host + + locator.reset(); + locator.is_proc_macro = Some(true); + locator.target = &self.sess.host; + locator.triple = TargetTriple::from_triple(config::host_triple()); + locator.filesearch = self.sess.host_filesearch(path_kind); + + let host_result = self.load(locator)?; + + Some(if self.sess.opts.debugging_opts.dual_proc_macros { + let host_result = match host_result { + LoadResult::Previous(..) => { + panic!("host and target proc macros must be loaded in lock-step") + } + LoadResult::Loaded(library) => library + }; + (target_result.unwrap(), Some(host_result)) + } else { + (host_result, None) + }) + } + fn resolve_crate<'b>( &'b mut self, root: &'b Option, @@ -291,7 +348,7 @@ impl<'a> CrateLoader<'a> { ) -> Result<(CrateNum, Lrc), LoadError<'b>> { info!("resolving crate `extern crate {} as {}`", name, ident); let result = if let Some(cnum) = self.existing_match(name, hash, path_kind) { - LoadResult::Previous(cnum) + (LoadResult::Previous(cnum), None) } else { info!("falling back to a load"); let mut locate_ctxt = locator::Context { @@ -303,7 +360,7 @@ impl<'a> CrateLoader<'a> { extra_filename: extra_filename, filesearch: self.sess.target_filesearch(path_kind), target: &self.sess.target.target, - triple: &self.sess.opts.target_triple, + triple: self.sess.opts.target_triple.clone(), root, rejected_via_hash: vec![], rejected_via_triple: vec![], @@ -315,28 +372,14 @@ impl<'a> CrateLoader<'a> { metadata_loader: &*self.cstore.metadata_loader, }; - self.load(&mut locate_ctxt).or_else(|| { + self.load(&mut locate_ctxt).map(|r| (r, None)).or_else(|| { dep_kind = DepKind::UnexportedMacrosOnly; - - let mut proc_macro_locator = locator::Context { - target: &self.sess.host, - triple: &TargetTriple::from_triple(config::host_triple()), - filesearch: self.sess.host_filesearch(path_kind), - rejected_via_hash: vec![], - rejected_via_triple: vec![], - rejected_via_kind: vec![], - rejected_via_version: vec![], - rejected_via_filename: vec![], - is_proc_macro: Some(true), - ..locate_ctxt - }; - - self.load(&mut proc_macro_locator) + self.load_proc_macro(&mut locate_ctxt, path_kind) }).ok_or_else(move || LoadError::LocatorError(locate_ctxt))? }; match result { - LoadResult::Previous(cnum) => { + (LoadResult::Previous(cnum), None) => { let data = self.cstore.get_crate_data(cnum); if data.root.proc_macro_decls_static.is_some() { dep_kind = DepKind::UnexportedMacrosOnly; @@ -346,13 +389,14 @@ impl<'a> CrateLoader<'a> { }); Ok((cnum, data)) } - LoadResult::Loaded(library) => { - Ok(self.register_crate(root, ident, span, library, dep_kind)) + (LoadResult::Loaded(library), host_library) => { + Ok(self.register_crate(host_library, root, ident, span, library, dep_kind)) } + _ => panic!() } } - fn load(&mut self, locate_ctxt: &mut locator::Context) -> Option { + fn load(&mut self, locate_ctxt: &mut locator::Context<'_>) -> Option { let library = locate_ctxt.maybe_load_library_crate()?; // In the case that we're loading a crate, but not matching @@ -364,7 +408,7 @@ impl<'a> CrateLoader<'a> { // don't want to match a host crate against an equivalent target one // already loaded. let root = library.metadata.get_root(); - if locate_ctxt.triple == &self.sess.opts.target_triple { + if locate_ctxt.triple == self.sess.opts.target_triple { let mut result = LoadResult::Loaded(library); self.cstore.iter_crate_data(|cnum, data| { if data.root.name == root.name && root.hash == data.root.hash { @@ -437,7 +481,7 @@ impl<'a> CrateLoader<'a> { // The map from crate numbers in the crate we're resolving to local crate numbers. // We map 0 and all other holes in the map to our parent crate. The "additional" // self-dependencies should be harmless. - ::std::iter::once(krate).chain(crate_root.crate_deps + std::iter::once(krate).chain(crate_root.crate_deps .decode(metadata) .map(|dep| { info!("resolving dep crate {} hash: `{}` extra filename: `{}`", dep.name, dep.hash, @@ -460,9 +504,9 @@ impl<'a> CrateLoader<'a> { fn read_extension_crate(&mut self, span: Span, orig_name: Symbol, rename: Symbol) -> ExtensionCrate { info!("read extension crate `extern crate {} as {}`", orig_name, rename); - let target_triple = &self.sess.opts.target_triple; + let target_triple = self.sess.opts.target_triple.clone(); let host_triple = TargetTriple::from_triple(config::host_triple()); - let is_cross = target_triple != &host_triple; + let is_cross = target_triple != host_triple; let mut target_only = false; let mut locate_ctxt = locator::Context { sess: self.sess, @@ -473,7 +517,7 @@ impl<'a> CrateLoader<'a> { extra_filename: None, filesearch: self.sess.host_filesearch(PathKind::Crate), target: &self.sess.host, - triple: &host_triple, + triple: host_triple, root: &None, rejected_via_hash: vec![], rejected_via_triple: vec![], @@ -522,7 +566,7 @@ impl<'a> CrateLoader<'a> { } } - /// Load custom derive macros. + /// Loads custom derive macros. /// /// Note that this is intentionally similar to how we load plugins today, /// but also intentionally separate. Plugins are likely always going to be @@ -532,7 +576,7 @@ impl<'a> CrateLoader<'a> { fn load_derive_macros(&mut self, root: &CrateRoot, dylib: Option, span: Span) -> Vec<(ast::Name, Lrc)> { use std::{env, mem}; - use dynamic_lib::DynamicLibrary; + use crate::dynamic_lib::DynamicLibrary; use proc_macro::bridge::client::ProcMacro; use syntax_ext::deriving::custom::ProcMacroDerive; use syntax_ext::proc_macro_impl::{AttrProcMacro, BangProcMacro}; @@ -579,7 +623,7 @@ impl<'a> CrateLoader<'a> { ProcMacro::Bang { name, client } => { (name, SyntaxExtension::ProcMacro { expander: Box::new(BangProcMacro { client }), - allow_internal_unstable: false, + allow_internal_unstable: None, edition: root.edition, }) } @@ -1006,7 +1050,7 @@ impl<'a> CrateLoader<'a> { item.ident, orig_name); let orig_name = match orig_name { Some(orig_name) => { - ::validate_crate_name(Some(self.sess), &orig_name.as_str(), + crate::validate_crate_name(Some(self.sess), &orig_name.as_str(), Some(item.span)); orig_name } diff --git a/src/librustc_metadata/cstore.rs b/src/librustc_metadata/cstore.rs index f650db5aafa01..d646879b4d45d 100644 --- a/src/librustc_metadata/cstore.rs +++ b/src/librustc_metadata/cstore.rs @@ -1,17 +1,7 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // The crate store - a central repo for information collected about external // crates and libraries -use schema; +use crate::schema; use rustc::hir::def_id::{CrateNum, DefIndex}; use rustc::hir::map::definitions::DefPathTable; use rustc::middle::cstore::{DepKind, ExternCrate, MetadataLoader}; @@ -29,7 +19,7 @@ pub use rustc::middle::cstore::{NativeLibrary, NativeLibraryKind, LinkagePrefere pub use rustc::middle::cstore::NativeLibraryKind::*; pub use rustc::middle::cstore::{CrateSource, LibSource, ForeignModule}; -pub use cstore_impl::{provide, provide_extern}; +pub use crate::cstore_impl::{provide, provide_extern}; // A map from external crate numbers (as decoded from some crate file) to // local crate numbers (as generated during this session). Each external @@ -56,7 +46,7 @@ pub struct CrateMetadata { /// Original name of the crate. pub name: Symbol, - /// Name of the crate as imported. I.e., if imported with + /// Name of the crate as imported. I.e., if imported with /// `extern crate foo as bar;` this will be `bar`. pub imported_name: Symbol, @@ -76,9 +66,9 @@ pub struct CrateMetadata { pub root: schema::CrateRoot, - /// For each public item in this crate, we encode a key. When the + /// For each public item in this crate, we encode a key. When the /// crate is loaded, we read all the keys and put them in this - /// hashmap, which gives the reverse mapping. This allows us to + /// hashmap, which gives the reverse mapping. This allows us to /// quickly retrace a `DefPath`, which is needed for incremental /// compilation support. pub def_path_table: Lrc, diff --git a/src/librustc_metadata/cstore_impl.rs b/src/librustc_metadata/cstore_impl.rs index da2ba392c5eb1..995532a00cd6e 100644 --- a/src/librustc_metadata/cstore_impl.rs +++ b/src/librustc_metadata/cstore_impl.rs @@ -1,19 +1,9 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use cstore::{self, LoadedMacro}; -use encoder; -use link_args; -use native_libs; -use foreign_modules; -use schema; +use crate::cstore::{self, LoadedMacro}; +use crate::encoder; +use crate::link_args; +use crate::native_libs; +use crate::foreign_modules; +use crate::schema; use rustc::ty::query::QueryConfig; use rustc::middle::cstore::{CrateStore, DepKind, @@ -39,6 +29,7 @@ use syntax::attr; use syntax::source_map; use syntax::edition::Edition; use syntax::parse::source_file_to_stream; +use syntax::parse::parser::emit_unclosed_delims; use syntax::symbol::Symbol; use syntax_pos::{Span, NO_EXPANSION, FileName}; use rustc_data_structures::bit_set::BitSet; @@ -61,7 +52,7 @@ macro_rules! provide { index: CRATE_DEF_INDEX }); let dep_node = def_path_hash - .to_dep_node(::rustc::dep_graph::DepKind::CrateMetadata); + .to_dep_node(rustc::dep_graph::DepKind::CrateMetadata); // The DepNodeIndex of the DepNode::CrateMetadata should be // cached somewhere, so that we can use read_index(). $tcx.dep_graph.read(dep_node); @@ -416,6 +407,14 @@ impl cstore::CStore { self.get_crate_data(def.krate).get_struct_field_names(def.index) } + pub fn ctor_kind_untracked(&self, def: DefId) -> def::CtorKind { + self.get_crate_data(def.krate).get_ctor_kind(def.index) + } + + pub fn item_attrs_untracked(&self, def: DefId, sess: &Session) -> Lrc<[ast::Attribute]> { + self.get_crate_data(def.krate).get_item_attrs(def.index, sess) + } + pub fn item_children_untracked(&self, def_id: DefId, sess: &Session) -> Vec { let mut result = vec![]; self.get_crate_data(def_id.krate) @@ -431,10 +430,12 @@ impl cstore::CStore { use syntax::ext::base::SyntaxExtension; use syntax_ext::proc_macro_impl::BangProcMacro; - let client = ::proc_macro::bridge::client::Client::expand1(::proc_macro::quote); + let client = proc_macro::bridge::client::Client::expand1(proc_macro::quote); let ext = SyntaxExtension::ProcMacro { expander: Box::new(BangProcMacro { client }), - allow_internal_unstable: true, + allow_internal_unstable: Some(vec![ + Symbol::intern("proc_macro_def_site"), + ].into()), edition: data.root.edition, }; return LoadedMacro::ProcMacro(Lrc::new(ext)); @@ -446,7 +447,8 @@ impl cstore::CStore { let source_file = sess.parse_sess.source_map().new_source_file(source_name, def.body); let local_span = Span::new(source_file.start_pos, source_file.end_pos, NO_EXPANSION); - let body = source_file_to_stream(&sess.parse_sess, source_file, None); + let (body, mut errors) = source_file_to_stream(&sess.parse_sess, source_file, None); + emit_unclosed_delims(&mut errors, &sess.diagnostic()); // Mark the attrs as used let attrs = data.get_item_attrs(id.index, sess); diff --git a/src/librustc_metadata/decoder.rs b/src/librustc_metadata/decoder.rs index 1c7e3c95d1470..f456a5c1619c5 100644 --- a/src/librustc_metadata/decoder.rs +++ b/src/librustc_metadata/decoder.rs @@ -1,24 +1,14 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // Decoding metadata from a single crate's metadata -use cstore::{self, CrateMetadata, MetadataBlob, NativeLibrary, ForeignModule}; -use schema::*; +use crate::cstore::{self, CrateMetadata, MetadataBlob, NativeLibrary, ForeignModule}; +use crate::schema::*; use rustc_data_structures::sync::{Lrc, ReadGuard}; use rustc::hir::map::{DefKey, DefPath, DefPathData, DefPathHash, Definitions}; use rustc::hir; use rustc::middle::cstore::LinkagePreference; use rustc::middle::exported_symbols::{ExportedSymbol, SymbolExportLevel}; -use rustc::hir::def::{self, Def, CtorKind}; +use rustc::hir::def::{self, Def, CtorOf, CtorKind}; use rustc::hir::def_id::{CrateNum, DefId, DefIndex, DefIndexAddressSpace, CRATE_DEF_INDEX, LOCAL_CRATE, LocalDefId}; use rustc::hir::map::definitions::DefPathTable; @@ -44,6 +34,7 @@ use syntax::symbol::InternedString; use syntax::ext::base::{MacroKind, SyntaxExtension}; use syntax::ext::hygiene::Mark; use syntax_pos::{self, Span, BytePos, Pos, DUMMY_SP, NO_EXPANSION}; +use log::debug; pub struct DecodeContext<'a, 'tcx: 'a> { opaque: opaque::Decoder<'a>, @@ -422,12 +413,15 @@ impl<'tcx> EntryKind<'tcx> { EntryKind::ForeignFn(_) => Def::Fn(did), EntryKind::Method(_) => Def::Method(did), EntryKind::Type => Def::TyAlias(did), + EntryKind::TypeParam => Def::TyParam(did), + EntryKind::ConstParam => Def::ConstParam(did), EntryKind::Existential => Def::Existential(did), EntryKind::AssociatedType(_) => Def::AssociatedTy(did), EntryKind::AssociatedExistential(_) => Def::AssociatedExistential(did), EntryKind::Mod(_) => Def::Mod(did), EntryKind::Variant(_) => Def::Variant(did), EntryKind::Trait(_) => Def::Trait(did), + EntryKind::TraitAlias(_) => Def::TraitAlias(did), EntryKind::Enum(..) => Def::Enum(did), EntryKind::MacroDef(_) => Def::Macro(did, MacroKind::Bang), EntryKind::ForeignType => Def::ForeignTy(did), @@ -442,7 +436,7 @@ impl<'tcx> EntryKind<'tcx> { } } -/// Create the "fake" DefPathTable for a given proc macro crate. +/// Creates the "fake" DefPathTable for a given proc macro crate. /// /// The DefPathTable is as follows: /// @@ -530,26 +524,36 @@ impl<'a, 'tcx> CrateMetadata { } pub fn get_trait_def(&self, item_id: DefIndex, sess: &Session) -> ty::TraitDef { - let data = match self.entry(item_id).kind { - EntryKind::Trait(data) => data.decode((self, sess)), - _ => bug!(), - }; - - ty::TraitDef::new(self.local_def_id(item_id), - data.unsafety, - data.paren_sugar, - data.has_auto_impl, - data.is_marker, - self.def_path_table.def_path_hash(item_id)) + match self.entry(item_id).kind { + EntryKind::Trait(data) => { + let data = data.decode((self, sess)); + ty::TraitDef::new(self.local_def_id(item_id), + data.unsafety, + data.paren_sugar, + data.has_auto_impl, + data.is_marker, + self.def_path_table.def_path_hash(item_id)) + }, + EntryKind::TraitAlias(_) => { + ty::TraitDef::new(self.local_def_id(item_id), + hir::Unsafety::Normal, + false, + false, + false, + self.def_path_table.def_path_hash(item_id)) + }, + _ => bug!("def-index does not refer to trait or trait alias"), + } } - fn get_variant(&self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - item: &Entry, - index: DefIndex, - adt_kind: ty::AdtKind) - -> ty::VariantDef - { + fn get_variant( + &self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + item: &Entry<'_>, + index: DefIndex, + parent_did: DefId, + adt_kind: ty::AdtKind + ) -> ty::VariantDef { let data = match item.kind { EntryKind::Variant(data) | EntryKind::Struct(data, _) | @@ -557,13 +561,18 @@ impl<'a, 'tcx> CrateMetadata { _ => bug!(), }; - let def_id = self.local_def_id(data.struct_ctor.unwrap_or(index)); - let attribute_def_id = self.local_def_id(index); + let variant_did = if adt_kind == ty::AdtKind::Enum { + Some(self.local_def_id(index)) + } else { + None + }; + let ctor_did = data.ctor.map(|index| self.local_def_id(index)); ty::VariantDef::new( tcx, - def_id, - self.item_name(index).as_symbol(), + Ident::from_interned_str(self.item_name(index)), + variant_did, + ctor_did, data.discr, item.children.decode(self).map(|index| { let f = self.entry(index); @@ -573,9 +582,10 @@ impl<'a, 'tcx> CrateMetadata { vis: f.visibility.decode(self) } }).collect(), - adt_kind, data.ctor_kind, - attribute_def_id + adt_kind, + parent_did, + false, ) } @@ -597,11 +607,11 @@ impl<'a, 'tcx> CrateMetadata { item.children .decode(self) .map(|index| { - self.get_variant(tcx, &self.entry(index), index, kind) + self.get_variant(tcx, &self.entry(index), index, did, kind) }) .collect() } else { - std::iter::once(self.get_variant(tcx, &item, item_id, kind)).collect() + std::iter::once(self.get_variant(tcx, &item, item_id, did, kind)).collect() }; tcx.alloc_adt_def(did, kind, variants, repr) @@ -625,10 +635,13 @@ impl<'a, 'tcx> CrateMetadata { item_id: DefIndex, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> ty::GenericPredicates<'tcx> { - match self.entry(item_id).kind { - EntryKind::Trait(data) => data.decode(self).super_predicates.decode((self, tcx)), - _ => bug!(), - } + let super_predicates = match self.entry(item_id).kind { + EntryKind::Trait(data) => data.decode(self).super_predicates, + EntryKind::TraitAlias(data) => data.decode(self).super_predicates, + _ => bug!("def-index does not refer to trait or trait alias"), + }; + + super_predicates.decode((self, tcx)) } pub fn get_generics(&self, @@ -644,7 +657,7 @@ impl<'a, 'tcx> CrateMetadata { pub fn get_stability(&self, id: DefIndex) -> Option { match self.is_proc_macro(id) { - true => None, + true => self.root.proc_macro_stability.clone(), false => self.entry(id).stability.map(|stab| stab.decode(self)), } } @@ -803,22 +816,22 @@ impl<'a, 'tcx> CrateMetadata { // Re-export lists automatically contain constructors when necessary. match def { Def::Struct(..) => { - if let Some(ctor_def_id) = self.get_struct_ctor_def_id(child_index) { + if let Some(ctor_def_id) = self.get_ctor_def_id(child_index) { let ctor_kind = self.get_ctor_kind(child_index); - let ctor_def = Def::StructCtor(ctor_def_id, ctor_kind); - callback(def::Export { - def: ctor_def, - vis: self.get_visibility(ctor_def_id.index), - ident, span, - }); + let ctor_def = Def::Ctor(ctor_def_id, CtorOf::Struct, ctor_kind); + let vis = self.get_visibility(ctor_def_id.index); + callback(def::Export { def: ctor_def, vis, ident, span }); } } Def::Variant(def_id) => { // Braced variants, unlike structs, generate unusable names in // value namespace, they are reserved for possible future use. + // It's ok to use the variant's id as a ctor id since an + // error will be reported on any use of such resolution anyway. + let ctor_def_id = self.get_ctor_def_id(child_index).unwrap_or(def_id); let ctor_kind = self.get_ctor_kind(child_index); - let ctor_def = Def::VariantCtor(def_id, ctor_kind); - let vis = self.get_visibility(child_index); + let ctor_def = Def::Ctor(ctor_def_id, CtorOf::Variant, ctor_kind); + let vis = self.get_visibility(ctor_def_id.index); callback(def::Export { def: ctor_def, ident, vis, span }); } _ => {} @@ -890,6 +903,9 @@ impl<'a, 'tcx> CrateMetadata { EntryKind::AssociatedType(container) => { (ty::AssociatedKind::Type, container, false) } + EntryKind::AssociatedExistential(container) => { + (ty::AssociatedKind::Existential, container, false) + } _ => bug!("cannot get associated-item of `{:?}`", def_key) }; @@ -917,10 +933,13 @@ impl<'a, 'tcx> CrateMetadata { } } - pub fn get_struct_ctor_def_id(&self, node_id: DefIndex) -> Option { + pub fn get_ctor_def_id(&self, node_id: DefIndex) -> Option { match self.entry(node_id).kind { EntryKind::Struct(data, _) => { - data.decode(self).struct_ctor.map(|index| self.local_def_id(index)) + data.decode(self).ctor.map(|index| self.local_def_id(index)) + } + EntryKind::Variant(data) => { + data.decode(self).ctor.map(|index| self.local_def_id(index)) } _ => None, } @@ -931,11 +950,11 @@ impl<'a, 'tcx> CrateMetadata { return Lrc::new([]); } - // The attributes for a tuple struct are attached to the definition, not the ctor; + // The attributes for a tuple struct/variant are attached to the definition, not the ctor; // we assume that someone passing in a tuple struct ctor is actually wanting to // look at the definition let def_key = self.def_key(node_id); - let item_id = if def_key.disambiguated_data.data == DefPathData::StructCtor { + let item_id = if def_key.disambiguated_data.data == DefPathData::Ctor { def_key.parent.unwrap() } else { node_id @@ -1024,7 +1043,8 @@ impl<'a, 'tcx> CrateMetadata { } def_key.parent.and_then(|parent_index| { match self.entry(parent_index).kind { - EntryKind::Trait(_) => Some(self.local_def_id(parent_index)), + EntryKind::Trait(_) | + EntryKind::TraitAlias(_) => Some(self.local_def_id(parent_index)), _ => None, } }) diff --git a/src/librustc_metadata/diagnostics.rs b/src/librustc_metadata/diagnostics.rs index b38c12355737b..9ac582ebc42da 100644 --- a/src/librustc_metadata/diagnostics.rs +++ b/src/librustc_metadata/diagnostics.rs @@ -1,15 +1,7 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_snake_case)] +use syntax::{register_diagnostic, register_diagnostics, register_long_diagnostics}; + register_long_diagnostics! { E0454: r##" A link name was given with an empty name. Erroneous code example: @@ -45,7 +37,7 @@ extern {} ``` See more: -https://doc.rust-lang.org/book/first-edition/conditional-compilation.html +https://doc.rust-lang.org/reference/attributes.html#conditional-compilation "##, E0458: r##" diff --git a/src/librustc_metadata/dynamic_lib.rs b/src/librustc_metadata/dynamic_lib.rs index 182a071277ece..9dd160c24c373 100644 --- a/src/librustc_metadata/dynamic_lib.rs +++ b/src/librustc_metadata/dynamic_lib.rs @@ -1,13 +1,3 @@ -// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Dynamic library facilities. //! //! A simple wrapper over the platform's dynamic library facilities @@ -42,7 +32,7 @@ impl DynamicLibrary { } } - /// Load a dynamic library into the global namespace (RTLD_GLOBAL on Unix) + /// Loads a dynamic library into the global namespace (RTLD_GLOBAL on Unix) /// and do it now (don't use RTLD_LAZY on Unix). pub fn open_global_now(filename: &Path) -> Result { let maybe_library = dl::open_global_now(filename.as_os_str()); @@ -86,7 +76,6 @@ impl DynamicLibrary { #[cfg(test)] mod tests { use super::*; - use libc; use std::mem; #[test] @@ -137,7 +126,6 @@ mod tests { #[cfg(unix)] mod dl { - use libc; use std::ffi::{CStr, OsStr, CString}; use std::os::unix::prelude::*; use std::ptr; diff --git a/src/librustc_metadata/encoder.rs b/src/librustc_metadata/encoder.rs index feed70c61226d..796d2f6a18ba3 100644 --- a/src/librustc_metadata/encoder.rs +++ b/src/librustc_metadata/encoder.rs @@ -1,22 +1,13 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use index::Index; -use index_builder::{FromId, IndexBuilder, Untracked}; -use isolated_encoder::IsolatedEncoder; -use schema::*; +use crate::index::Index; +use crate::index_builder::{FromId, IndexBuilder, Untracked}; +use crate::isolated_encoder::IsolatedEncoder; +use crate::schema::*; use rustc::middle::cstore::{LinkagePreference, NativeLibrary, EncodedMetadata, ForeignModule}; use rustc::hir::def::CtorKind; use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, DefIndex, DefId, LocalDefId, LOCAL_CRATE}; +use rustc::hir::GenericParamKind; use rustc::hir::map::definitions::DefPathTable; use rustc_data_structures::fingerprint::Fingerprint; use rustc::middle::dependency_format::Linkage; @@ -39,11 +30,12 @@ use std::hash::Hash; use std::path::Path; use rustc_data_structures::sync::Lrc; use std::u32; -use syntax::ast::{self, CRATE_NODE_ID}; +use syntax::ast; use syntax::attr; use syntax::source_map::Spanned; use syntax::symbol::keywords; use syntax_pos::{self, hygiene, FileName, SourceFile, Span}; +use log::{debug, trace}; use rustc::hir::{self, PatKind}; use rustc::hir::itemlikevisit::ItemLikeVisitor; @@ -323,7 +315,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { let vis = Spanned { span: syntax_pos::DUMMY_SP, node: hir::VisibilityKind::Public }; index.record(DefId::local(CRATE_DEF_INDEX), IsolatedEncoder::encode_info_for_mod, - FromId(CRATE_NODE_ID, (&krate.module, &krate.attrs, &vis))); + FromId(hir::CRATE_HIR_ID, (&krate.module, &krate.attrs, &vis))); let mut visitor = EncodeVisitor { index }; krate.visit_all_item_likes(&mut visitor.as_deep_visitor()); for macro_def in &krate.exported_macros { @@ -492,17 +484,18 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { has_global_allocator: has_global_allocator, has_panic_handler: has_panic_handler, has_default_lib_allocator: has_default_lib_allocator, - plugin_registrar_fn: tcx.sess - .plugin_registrar_fn - .get() - .map(|id| tcx.hir().local_def_id(id).index), + plugin_registrar_fn: tcx.plugin_registrar_fn(LOCAL_CRATE).map(|id| id.index), proc_macro_decls_static: if is_proc_macro { - let id = tcx.sess.proc_macro_decls_static.get().unwrap(); - Some(tcx.hir().local_def_id(id).index) + let id = tcx.proc_macro_decls_static(LOCAL_CRATE).unwrap(); + Some(id.index) + } else { + None + }, + proc_macro_stability: if is_proc_macro { + tcx.lookup_stability(DefId::local(CRATE_DEF_INDEX)).map(|stab| stab.clone()) } else { None }, - compiler_builtins: attr::contains_name(&attrs, "compiler_builtins"), needs_allocator: attr::contains_name(&attrs, "needs_allocator"), needs_panic_runtime: attr::contains_name(&attrs, "needs_panic_runtime"), @@ -580,28 +573,25 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { /// will have to lookup the adt-def by its id, and that gives us /// the right to access any information in the adt-def (including, /// e.g., the length of the various vectors). - fn encode_enum_variant_info(&mut self, - (enum_did, Untracked(index)): (DefId, Untracked)) - -> Entry<'tcx> { + fn encode_enum_variant_info( + &mut self, + (enum_did, Untracked(index)): (DefId, Untracked), + ) -> Entry<'tcx> { let tcx = self.tcx; let def = tcx.adt_def(enum_did); let variant = &def.variants[index]; - let def_id = variant.did; + let def_id = variant.def_id; debug!("IsolatedEncoder::encode_enum_variant_info({:?})", def_id); let data = VariantData { ctor_kind: variant.ctor_kind, discr: variant.discr, - struct_ctor: None, - ctor_sig: if variant.ctor_kind == CtorKind::Fn { - Some(self.lazy(&tcx.fn_sig(def_id))) - } else { - None - } + ctor: variant.ctor_def_id.map(|did| did.index), + ctor_sig: None, }; - let enum_id = tcx.hir().as_local_node_id(enum_did).unwrap(); - let enum_vis = &tcx.hir().expect_item(enum_id).vis; + let enum_id = tcx.hir().as_local_hir_id(enum_did).unwrap(); + let enum_vis = &tcx.hir().expect_item_by_hir_id(enum_id).vis; Entry { kind: EntryKind::Variant(self.lazy(&data)), @@ -630,13 +620,69 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { } } + /// Encode the constructor for the given variant of the given ADT. See + /// `encode_enum_variant_info` for an explanation about why the index is untracked. + fn encode_enum_variant_ctor( + &mut self, + (enum_did, Untracked(index)): (DefId, Untracked), + ) -> Entry<'tcx> { + let tcx = self.tcx; + let def = tcx.adt_def(enum_did); + let variant = &def.variants[index]; + let def_id = variant.ctor_def_id.unwrap(); + debug!("IsolatedEncoder::encode_enum_variant_ctor({:?})", def_id); + + let data = VariantData { + ctor_kind: variant.ctor_kind, + discr: variant.discr, + ctor: Some(def_id.index), + ctor_sig: if variant.ctor_kind == CtorKind::Fn { + Some(self.lazy(&tcx.fn_sig(def_id))) + } else { + None + } + }; + + // Variant constructors have the same visibility as the parent enums, unless marked as + // non-exhaustive, in which case they are lowered to `pub(crate)`. + let enum_id = tcx.hir().as_local_hir_id(enum_did).unwrap(); + let enum_vis = &tcx.hir().expect_item_by_hir_id(enum_id).vis; + let mut ctor_vis = ty::Visibility::from_hir(enum_vis, enum_id, tcx); + if variant.is_field_list_non_exhaustive() && ctor_vis == ty::Visibility::Public { + ctor_vis = ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX)); + } + + Entry { + kind: EntryKind::Variant(self.lazy(&data)), + visibility: self.lazy(&ctor_vis), + span: self.lazy(&tcx.def_span(def_id)), + attributes: LazySeq::empty(), + children: LazySeq::empty(), + stability: self.encode_stability(def_id), + deprecation: self.encode_deprecation(def_id), + + ty: Some(self.encode_item_type(def_id)), + inherent_impls: LazySeq::empty(), + variances: if variant.ctor_kind == CtorKind::Fn { + self.encode_variances_of(def_id) + } else { + LazySeq::empty() + }, + generics: Some(self.encode_generics(def_id)), + predicates: Some(self.encode_predicates(def_id)), + predicates_defined_on: None, + + mir: self.encode_optimized_mir(def_id), + } + } + fn encode_info_for_mod(&mut self, FromId(id, (md, attrs, vis)): FromId<(&hir::Mod, &[ast::Attribute], &hir::Visibility)>) -> Entry<'tcx> { let tcx = self.tcx; - let def_id = tcx.hir().local_def_id(id); + let def_id = tcx.hir().local_def_id_from_hir_id(id); debug!("IsolatedEncoder::encode_info_for_mod({:?})", def_id); let data = ModData { @@ -652,7 +698,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { span: self.lazy(&tcx.def_span(def_id)), attributes: self.encode_attributes(attrs), children: self.lazy_seq(md.item_ids.iter().map(|item_id| { - tcx.hir().local_def_id(item_id.id).index + tcx.hir().local_def_id_from_hir_id(item_id.id).index })), stability: self.encode_stability(def_id), deprecation: self.encode_deprecation(def_id), @@ -686,7 +732,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { let def_id = field.did; debug!("IsolatedEncoder::encode_field({:?})", def_id); - let variant_id = tcx.hir().as_local_node_id(variant.did).unwrap(); + let variant_id = tcx.hir().as_local_hir_id(variant.def_id).unwrap(); let variant_data = tcx.hir().expect_variant_data(variant_id); Entry { @@ -718,7 +764,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { let data = VariantData { ctor_kind: variant.ctor_kind, discr: variant.discr, - struct_ctor: Some(def_id.index), + ctor: Some(def_id.index), ctor_sig: if variant.ctor_kind == CtorKind::Fn { Some(self.lazy(&tcx.fn_sig(def_id))) } else { @@ -726,8 +772,8 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { } }; - let struct_id = tcx.hir().as_local_node_id(adt_def_id).unwrap(); - let struct_vis = &tcx.hir().expect_item(struct_id).vis; + let struct_id = tcx.hir().as_local_hir_id(adt_def_id).unwrap(); + let struct_vis = &tcx.hir().expect_item_by_hir_id(struct_id).vis; let mut ctor_vis = ty::Visibility::from_hir(struct_vis, struct_id, tcx); for field in &variant.fields { if ctor_vis.is_at_least(field.vis, tcx) { @@ -791,8 +837,8 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { debug!("IsolatedEncoder::encode_info_for_trait_item({:?})", def_id); let tcx = self.tcx; - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); - let ast_item = tcx.hir().expect_trait_item(node_id); + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); + let ast_item = tcx.hir().expect_trait_item(hir_id); let trait_item = tcx.associated_item(def_id); let container = match trait_item.defaultness { @@ -901,8 +947,8 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { debug!("IsolatedEncoder::encode_info_for_impl_item({:?})", def_id); let tcx = self.tcx; - let node_id = self.tcx.hir().as_local_node_id(def_id).unwrap(); - let ast_item = self.tcx.hir().expect_impl_item(node_id); + let hir_id = self.tcx.hir().as_local_hir_id(def_id).unwrap(); + let ast_item = self.tcx.hir().expect_impl_item(hir_id); let impl_item = self.tcx.associated_item(def_id); let container = match impl_item.defaultness { @@ -1067,7 +1113,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { EntryKind::Fn(self.lazy(&data)) } hir::ItemKind::Mod(ref m) => { - return self.encode_info_for_mod(FromId(item.id, (m, &item.attrs, &item.vis))); + return self.encode_info_for_mod(FromId(item.hir_id, (m, &item.attrs, &item.vis))); } hir::ItemKind::ForeignMod(_) => EntryKind::ForeignMod, hir::ItemKind::GlobalAsm(..) => EntryKind::GlobalAsm, @@ -1080,18 +1126,15 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { // Encode def_ids for each field and method // for methods, write all the stuff get_trait_method // needs to know - let struct_ctor = if !struct_def.is_struct() { - Some(tcx.hir().local_def_id(struct_def.id()).index) - } else { - None - }; + let ctor = struct_def.ctor_hir_id() + .map(|ctor_hir_id| tcx.hir().local_def_id_from_hir_id(ctor_hir_id).index); let repr_options = get_repr_options(&tcx, def_id); EntryKind::Struct(self.lazy(&VariantData { ctor_kind: variant.ctor_kind, discr: variant.discr, - struct_ctor, + ctor, ctor_sig: None, }), repr_options) } @@ -1102,7 +1145,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { EntryKind::Union(self.lazy(&VariantData { ctor_kind: variant.ctor_kind, discr: variant.discr, - struct_ctor: None, + ctor: None, ctor_sig: None, }), repr_options) } @@ -1141,8 +1184,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { EntryKind::Impl(self.lazy(&data)) } - hir::ItemKind::Trait(..) | - hir::ItemKind::TraitAlias(..) => { + hir::ItemKind::Trait(..) => { let trait_def = tcx.trait_def(def_id); let data = TraitData { unsafety: trait_def.unsafety, @@ -1154,26 +1196,34 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { EntryKind::Trait(self.lazy(&data)) } + hir::ItemKind::TraitAlias(..) => { + let data = TraitAliasData { + super_predicates: self.lazy(&tcx.super_predicates_of(def_id)), + }; + + EntryKind::TraitAlias(self.lazy(&data)) + } hir::ItemKind::ExternCrate(_) | hir::ItemKind::Use(..) => bug!("cannot encode info for item {:?}", item), }; Entry { kind, - visibility: self.lazy(&ty::Visibility::from_hir(&item.vis, item.id, tcx)), + visibility: self.lazy(&ty::Visibility::from_hir(&item.vis, item.hir_id, tcx)), span: self.lazy(&item.span), attributes: self.encode_attributes(&item.attrs), children: match item.node { hir::ItemKind::ForeignMod(ref fm) => { self.lazy_seq(fm.items .iter() - .map(|foreign_item| tcx.hir().local_def_id(foreign_item.id).index)) + .map(|foreign_item| tcx.hir().local_def_id_from_hir_id( + foreign_item.hir_id).index)) } hir::ItemKind::Enum(..) => { let def = self.tcx.adt_def(def_id); self.lazy_seq(def.variants.iter().map(|v| { - assert!(v.did.is_local()); - v.did.index + assert!(v.def_id.is_local()); + v.def_id.index })) } hir::ItemKind::Struct(..) | @@ -1227,6 +1277,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { hir::ItemKind::Impl(..) | hir::ItemKind::Existential(..) | hir::ItemKind::Trait(..) => Some(self.encode_generics(def_id)), + hir::ItemKind::TraitAlias(..) => Some(self.encode_generics(def_id)), _ => None, }, predicates: match item.node { @@ -1239,7 +1290,8 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { hir::ItemKind::Union(..) | hir::ItemKind::Impl(..) | hir::ItemKind::Existential(..) | - hir::ItemKind::Trait(..) => Some(self.encode_predicates(def_id)), + hir::ItemKind::Trait(..) | + hir::ItemKind::TraitAlias(..) => Some(self.encode_predicates(def_id)), _ => None, }, @@ -1249,7 +1301,8 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { // hack. (No reason not to expand it in the future if // necessary.) predicates_defined_on: match item.node { - hir::ItemKind::Trait(..) => Some(self.encode_predicates_defined_on(def_id)), + hir::ItemKind::Trait(..) | + hir::ItemKind::TraitAlias(..) => Some(self.encode_predicates_defined_on(def_id)), _ => None, // not *wrong* for other kinds of items, but not needed }, @@ -1282,7 +1335,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { /// Serialize the text of exported macros fn encode_info_for_macro_def(&mut self, macro_def: &hir::MacroDef) -> Entry<'tcx> { use syntax::print::pprust; - let def_id = self.tcx.hir().local_def_id(macro_def.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(macro_def.hir_id); Entry { kind: EntryKind::MacroDef(self.lazy(&MacroDef { body: pprust::tts_to_string(¯o_def.body.trees().collect::>()), @@ -1305,25 +1358,22 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { } } - fn encode_info_for_ty_param(&mut self, - (def_id, Untracked(has_default)): (DefId, Untracked)) - -> Entry<'tcx> { - debug!("IsolatedEncoder::encode_info_for_ty_param({:?})", def_id); + fn encode_info_for_generic_param( + &mut self, + def_id: DefId, + entry_kind: EntryKind<'tcx>, + encode_type: bool, + ) -> Entry<'tcx> { let tcx = self.tcx; Entry { - kind: EntryKind::Type, + kind: entry_kind, visibility: self.lazy(&ty::Visibility::Public), span: self.lazy(&tcx.def_span(def_id)), attributes: LazySeq::empty(), children: LazySeq::empty(), stability: None, deprecation: None, - - ty: if has_default { - Some(self.encode_item_type(def_id)) - } else { - None - }, + ty: if encode_type { Some(self.encode_item_type(def_id)) } else { None }, inherent_impls: LazySeq::empty(), variances: LazySeq::empty(), generics: None, @@ -1334,14 +1384,29 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { } } + fn encode_info_for_ty_param( + &mut self, + (def_id, Untracked(encode_type)): (DefId, Untracked), + ) -> Entry<'tcx> { + debug!("IsolatedEncoder::encode_info_for_ty_param({:?})", def_id); + self.encode_info_for_generic_param(def_id, EntryKind::TypeParam, encode_type) + } + + fn encode_info_for_const_param( + &mut self, + def_id: DefId, + ) -> Entry<'tcx> { + debug!("IsolatedEncoder::encode_info_for_const_param({:?})", def_id); + self.encode_info_for_generic_param(def_id, EntryKind::ConstParam, true) + } + fn encode_info_for_closure(&mut self, def_id: DefId) -> Entry<'tcx> { debug!("IsolatedEncoder::encode_info_for_closure({:?})", def_id); let tcx = self.tcx; let tables = self.tcx.typeck_tables_of(def_id); - let node_id = self.tcx.hir().as_local_node_id(def_id).unwrap(); - let hir_id = self.tcx.hir().node_to_hir_id(node_id); - let kind = match tables.node_id_to_type(hir_id).sty { + let hir_id = self.tcx.hir().as_local_hir_id(def_id).unwrap(); + let kind = match tables.node_type(hir_id).sty { ty::Generator(def_id, ..) => { let layout = self.tcx.generator_layout(def_id); let data = GeneratorData { @@ -1382,7 +1447,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { fn encode_info_for_anon_const(&mut self, def_id: DefId) -> Entry<'tcx> { debug!("IsolatedEncoder::encode_info_for_anon_const({:?})", def_id); let tcx = self.tcx; - let id = tcx.hir().as_local_node_id(def_id).unwrap(); + let id = tcx.hir().as_local_hir_id(def_id).unwrap(); let body_id = tcx.hir().body_owned_by(id); let const_data = self.encode_rendered_const_for_body(body_id); let mir = tcx.mir_const_qualif(def_id).0; @@ -1525,7 +1590,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { // symbol associated with them (they weren't translated) or if they're an FFI // definition (as that's not defined in this crate). fn encode_exported_symbols(&mut self, - exported_symbols: &[(ExportedSymbol, SymbolExportLevel)]) + exported_symbols: &[(ExportedSymbol<'_>, SymbolExportLevel)]) -> EncodedExportedSymbols { // The metadata symbol name is special. It should not show up in // downstream crates. @@ -1589,7 +1654,7 @@ impl<'a, 'b: 'a, 'tcx: 'b> IsolatedEncoder<'a, 'b, 'tcx> { Entry { kind, - visibility: self.lazy(&ty::Visibility::from_hir(&nitem.vis, nitem.id, tcx)), + visibility: self.lazy(&ty::Visibility::from_hir(&nitem.vis, nitem.hir_id, tcx)), span: self.lazy(&nitem.span), attributes: self.encode_attributes(&nitem.attrs), children: LazySeq::empty(), @@ -1625,7 +1690,7 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for EncodeVisitor<'a, 'b, 'tcx> { } fn visit_item(&mut self, item: &'tcx hir::Item) { intravisit::walk_item(self, item); - let def_id = self.index.tcx.hir().local_def_id(item.id); + let def_id = self.index.tcx.hir().local_def_id_from_hir_id(item.hir_id); match item.node { hir::ItemKind::ExternCrate(_) | hir::ItemKind::Use(..) => (), // ignore these @@ -1635,7 +1700,7 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for EncodeVisitor<'a, 'b, 'tcx> { } fn visit_foreign_item(&mut self, ni: &'tcx hir::ForeignItem) { intravisit::walk_foreign_item(self, ni); - let def_id = self.index.tcx.hir().local_def_id(ni.id); + let def_id = self.index.tcx.hir().local_def_id_from_hir_id(ni.hir_id); self.index.record(def_id, IsolatedEncoder::encode_info_for_foreign_item, (def_id, ni)); @@ -1643,11 +1708,11 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for EncodeVisitor<'a, 'b, 'tcx> { fn visit_variant(&mut self, v: &'tcx hir::Variant, g: &'tcx hir::Generics, - id: ast::NodeId) { + id: hir::HirId) { intravisit::walk_variant(self, v, g, id); if let Some(ref discr) = v.node.disr_expr { - let def_id = self.index.tcx.hir().local_def_id(discr.id); + let def_id = self.index.tcx.hir().local_def_id_from_hir_id(discr.hir_id); self.index.record(def_id, IsolatedEncoder::encode_info_for_anon_const, def_id); } } @@ -1660,7 +1725,7 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for EncodeVisitor<'a, 'b, 'tcx> { self.index.encode_info_for_ty(ty); } fn visit_macro_def(&mut self, macro_def: &'tcx hir::MacroDef) { - let def_id = self.index.tcx.hir().local_def_id(macro_def.id); + let def_id = self.index.tcx.hir().local_def_id_from_hir_id(macro_def.hir_id); self.index.record(def_id, IsolatedEncoder::encode_info_for_macro_def, macro_def); } } @@ -1679,13 +1744,18 @@ impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { fn encode_info_for_generics(&mut self, generics: &hir::Generics) { for param in &generics.params { + let def_id = self.tcx.hir().local_def_id_from_hir_id(param.hir_id); match param.kind { - hir::GenericParamKind::Lifetime { .. } => {} - hir::GenericParamKind::Type { ref default, .. } => { - let def_id = self.tcx.hir().local_def_id(param.id); - let has_default = Untracked(default.is_some()); - let encode_info = IsolatedEncoder::encode_info_for_ty_param; - self.record(def_id, encode_info, (def_id, has_default)); + GenericParamKind::Lifetime { .. } => continue, + GenericParamKind::Type { ref default, .. } => { + self.record( + def_id, + IsolatedEncoder::encode_info_for_ty_param, + (def_id, Untracked(default.is_some())), + ); + } + GenericParamKind::Const { .. } => { + self.record(def_id, IsolatedEncoder::encode_info_for_const_param, def_id); } } } @@ -1694,7 +1764,7 @@ impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { fn encode_info_for_ty(&mut self, ty: &hir::Ty) { match ty.node { hir::TyKind::Array(_, ref length) => { - let def_id = self.tcx.hir().local_def_id(length.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(length.hir_id); self.record(def_id, IsolatedEncoder::encode_info_for_anon_const, def_id); } _ => {} @@ -1704,7 +1774,7 @@ impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { fn encode_info_for_expr(&mut self, expr: &hir::Expr) { match expr.node { hir::ExprKind::Closure(..) => { - let def_id = self.tcx.hir().local_def_id(expr.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(expr.hir_id); self.record(def_id, IsolatedEncoder::encode_info_for_closure, def_id); } _ => {} @@ -1716,7 +1786,7 @@ impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { /// so it's easier to do that here then to wait until we would encounter /// normally in the visitor walk. fn encode_addl_info_for_item(&mut self, item: &hir::Item) { - let def_id = self.tcx.hir().local_def_id(item.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); match item.node { hir::ItemKind::Static(..) | hir::ItemKind::Const(..) | @@ -1736,17 +1806,23 @@ impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { let def = self.tcx.adt_def(def_id); for (i, variant) in def.variants.iter_enumerated() { - self.record(variant.did, + self.record(variant.def_id, IsolatedEncoder::encode_enum_variant_info, (def_id, Untracked(i))); + + if let Some(ctor_def_id) = variant.ctor_def_id { + self.record(ctor_def_id, + IsolatedEncoder::encode_enum_variant_ctor, + (def_id, Untracked(i))); + } } } hir::ItemKind::Struct(ref struct_def, _) => { self.encode_fields(def_id); // If the struct has a constructor, encode it. - if !struct_def.is_struct() { - let ctor_def_id = self.tcx.hir().local_def_id(struct_def.id()); + if let Some(ctor_hir_id) = struct_def.ctor_hir_id() { + let ctor_def_id = self.tcx.hir().local_def_id_from_hir_id(ctor_hir_id); self.record(ctor_def_id, IsolatedEncoder::encode_struct_ctor, (def_id, ctor_def_id)); @@ -1781,7 +1857,7 @@ struct ImplVisitor<'a, 'tcx: 'a> { impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for ImplVisitor<'a, 'tcx> { fn visit_item(&mut self, item: &hir::Item) { if let hir::ItemKind::Impl(..) = item.node { - let impl_id = self.tcx.hir().local_def_id(item.id); + let impl_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); if let Some(trait_ref) = self.tcx.impl_trait_ref(impl_id) { self.impls .entry(trait_ref.def_id) diff --git a/src/librustc_metadata/foreign_modules.rs b/src/librustc_metadata/foreign_modules.rs index 5b2002f2a9683..284f6796145a8 100644 --- a/src/librustc_metadata/foreign_modules.rs +++ b/src/librustc_metadata/foreign_modules.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir::itemlikevisit::ItemLikeVisitor; use rustc::hir; use rustc::middle::cstore::ForeignModule; @@ -35,11 +25,11 @@ impl<'a, 'tcx> ItemLikeVisitor<'tcx> for Collector<'a, 'tcx> { }; let foreign_items = fm.items.iter() - .map(|it| self.tcx.hir().local_def_id(it.id)) + .map(|it| self.tcx.hir().local_def_id_from_hir_id(it.hir_id)) .collect(); self.modules.push(ForeignModule { foreign_items, - def_id: self.tcx.hir().local_def_id(it.id), + def_id: self.tcx.hir().local_def_id_from_hir_id(it.hir_id), }); } diff --git a/src/librustc_metadata/index.rs b/src/librustc_metadata/index.rs index 0b4f7e579acb9..18f30383090cd 100644 --- a/src/librustc_metadata/index.rs +++ b/src/librustc_metadata/index.rs @@ -1,19 +1,10 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use schema::*; +use crate::schema::*; use rustc::hir::def_id::{DefId, DefIndex, DefIndexAddressSpace}; use rustc_serialize::opaque::Encoder; use std::slice; use std::u32; +use log::debug; /// While we are generating the metadata, we also track the position /// of each DefIndex. It is not required that all definitions appear @@ -34,12 +25,12 @@ impl Index { } } - pub fn record(&mut self, def_id: DefId, entry: Lazy) { + pub fn record(&mut self, def_id: DefId, entry: Lazy>) { assert!(def_id.is_local()); self.record_index(def_id.index, entry); } - pub fn record_index(&mut self, item: DefIndex, entry: Lazy) { + pub fn record_index(&mut self, item: DefIndex, entry: Lazy>) { assert!(entry.position < (u32::MAX as usize)); let position = entry.position as u32; let space_index = item.address_space().index(); diff --git a/src/librustc_metadata/index_builder.rs b/src/librustc_metadata/index_builder.rs index aea3ac1be8188..8343171b99f4b 100644 --- a/src/librustc_metadata/index_builder.rs +++ b/src/librustc_metadata/index_builder.rs @@ -1,19 +1,9 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Builder types for generating the "item data" section of the //! metadata. This section winds up looking like this: //! //! ``` //! // big list of item-like things... -//! // ...for most def-ids, there is an entry. +//! // ...for most `DefId`s, there is an entry. //! //! //! ``` @@ -55,10 +45,10 @@ //! give a callback fn, rather than taking a closure: it allows us to //! easily control precisely what data is given to that fn. -use encoder::EncodeContext; -use index::Index; -use schema::*; -use isolated_encoder::IsolatedEncoder; +use crate::encoder::EncodeContext; +use crate::index::Index; +use crate::schema::*; +use crate::isolated_encoder::IsolatedEncoder; use rustc::hir; use rustc::hir::def_id::DefId; @@ -95,7 +85,7 @@ impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { } } - /// Emit the data for a def-id to the metadata. The function to + /// Emit the data for a `DefId` to the metadata. The function to /// emit the data is `op`, and it will be given `data` as /// arguments. This `record` function will call `op` to generate /// the `Entry` (which may point to other encoded information) @@ -139,25 +129,25 @@ impl<'a, 'b, 'tcx> IndexBuilder<'a, 'b, 'tcx> { } /// Trait used for data that can be passed from outside a dep-graph -/// task. The data must either be of some safe type, such as a +/// task. The data must either be of some safe type, such as a /// `DefId` index, or implement the `read` method so that it can add /// a read of whatever dep-graph nodes are appropriate. pub trait DepGraphRead { - fn read(&self, tcx: TyCtxt); + fn read(&self, tcx: TyCtxt<'_, '_, '_>); } impl DepGraphRead for DefId { - fn read(&self, _tcx: TyCtxt) {} + fn read(&self, _tcx: TyCtxt<'_, '_, '_>) {} } impl DepGraphRead for ast::NodeId { - fn read(&self, _tcx: TyCtxt) {} + fn read(&self, _tcx: TyCtxt<'_, '_, '_>) {} } impl DepGraphRead for Option where T: DepGraphRead { - fn read(&self, tcx: TyCtxt) { + fn read(&self, tcx: TyCtxt<'_, '_, '_>) { match *self { Some(ref v) => v.read(tcx), None => (), @@ -168,7 +158,7 @@ impl DepGraphRead for Option impl DepGraphRead for [T] where T: DepGraphRead { - fn read(&self, tcx: TyCtxt) { + fn read(&self, tcx: TyCtxt<'_, '_, '_>) { for i in self { i.read(tcx); } @@ -181,7 +171,7 @@ macro_rules! read_tuple { where $($name: DepGraphRead),* { #[allow(non_snake_case)] - fn read(&self, tcx: TyCtxt) { + fn read(&self, tcx: TyCtxt<'_, '_, '_>) { let &($(ref $name),*) = self; $($name.read(tcx);)* } @@ -194,8 +184,8 @@ read_tuple!(A, B, C); macro_rules! read_hir { ($t:ty) => { impl<'tcx> DepGraphRead for &'tcx $t { - fn read(&self, tcx: TyCtxt) { - tcx.hir().read(self.id); + fn read(&self, tcx: TyCtxt<'_, '_, '_>) { + tcx.hir().read_by_hir_id(self.hir_id); } } } @@ -218,17 +208,17 @@ read_hir!(hir::MacroDef); pub struct Untracked(pub T); impl DepGraphRead for Untracked { - fn read(&self, _tcx: TyCtxt) {} + fn read(&self, _tcx: TyCtxt<'_, '_, '_>) {} } /// Newtype that can be used to package up misc data extracted from a -/// HIR node that doesn't carry its own id. This will allow an +/// HIR node that doesn't carry its own ID. This will allow an /// arbitrary `T` to be passed in, but register a read on the given -/// node-id. -pub struct FromId(pub ast::NodeId, pub T); +/// `NodeId`. +pub struct FromId(pub hir::HirId, pub T); impl DepGraphRead for FromId { - fn read(&self, tcx: TyCtxt) { - tcx.hir().read(self.0); + fn read(&self, tcx: TyCtxt<'_, '_, '_>) { + tcx.hir().read_by_hir_id(self.0); } } diff --git a/src/librustc_metadata/isolated_encoder.rs b/src/librustc_metadata/isolated_encoder.rs index 88594afa32024..e879a73e650bb 100644 --- a/src/librustc_metadata/isolated_encoder.rs +++ b/src/librustc_metadata/isolated_encoder.rs @@ -1,15 +1,5 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use encoder::EncodeContext; -use schema::{Lazy, LazySeq}; +use crate::encoder::EncodeContext; +use crate::schema::{Lazy, LazySeq}; use rustc::ty::TyCtxt; use rustc_serialize::Encodable; diff --git a/src/librustc_metadata/lib.rs b/src/librustc_metadata/lib.rs index ee99f7465b905..4078171733fc3 100644 --- a/src/librustc_metadata/lib.rs +++ b/src/librustc_metadata/lib.rs @@ -1,48 +1,28 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![feature(box_patterns)] +#![feature(drain_filter)] #![feature(libc)] #![feature(nll)] #![feature(proc_macro_internals)] #![feature(proc_macro_quote)] -#![feature(quote)] #![feature(rustc_diagnostic_macros)] -#![feature(slice_sort_by_cached_key)] #![feature(crate_visibility_modifier)] #![feature(specialization)] #![feature(rustc_private)] #![recursion_limit="256"] +#![deny(rust_2018_idioms)] +#![cfg_attr(not(stage0), deny(internal))] + extern crate libc; -#[macro_use] -extern crate log; -extern crate memmap; -extern crate stable_deref_trait; -#[macro_use] -extern crate syntax; -extern crate syntax_pos; -extern crate flate2; +#[allow(unused_extern_crates)] extern crate serialize as rustc_serialize; // used by deriving -extern crate rustc_errors as errors; -extern crate syntax_ext; extern crate proc_macro; #[macro_use] extern crate rustc; -extern crate rustc_target; #[macro_use] extern crate rustc_data_structures; diff --git a/src/librustc_metadata/link_args.rs b/src/librustc_metadata/link_args.rs index decd85c507e25..6741b5235db36 100644 --- a/src/librustc_metadata/link_args.rs +++ b/src/librustc_metadata/link_args.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir::itemlikevisit::ItemLikeVisitor; use rustc::hir; use rustc::ty::TyCtxt; diff --git a/src/librustc_metadata/locator.rs b/src/librustc_metadata/locator.rs index f01ed9e3ceef7..81878c4f687b6 100644 --- a/src/librustc_metadata/locator.rs +++ b/src/librustc_metadata/locator.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Finds crate binaries and loads their metadata //! //! Might I be the first to welcome you to a world of platform differences, @@ -37,7 +27,7 @@ //! //! The reason for this is that any of B's types could be composed of C's types, //! any function in B could return a type from C, etc. To be able to guarantee -//! that we can always typecheck/translate any function, we have to have +//! that we can always type-check/translate any function, we have to have //! complete knowledge of the whole ecosystem, not just our immediate //! dependencies. //! @@ -222,9 +212,9 @@ //! no means all of the necessary details. Take a look at the rest of //! metadata::locator or metadata::creader for all the juicy details! -use cstore::{MetadataRef, MetadataBlob}; -use creader::Library; -use schema::{METADATA_HEADER, rustc_version}; +use crate::cstore::{MetadataRef, MetadataBlob}; +use crate::creader::Library; +use crate::schema::{METADATA_HEADER, rustc_version}; use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::svh::Svh; @@ -236,6 +226,7 @@ use rustc::util::nodemap::FxHashMap; use errors::DiagnosticBuilder; use syntax::symbol::Symbol; +use syntax::struct_span_err; use syntax_pos::Span; use rustc_target::spec::{Target, TargetTriple}; @@ -251,11 +242,15 @@ use flate2::read::DeflateDecoder; use rustc_data_structures::owning_ref::OwningRef; +use log::{debug, info, warn}; + +#[derive(Clone)] pub struct CrateMismatch { path: PathBuf, got: String, } +#[derive(Clone)] pub struct Context<'a> { pub sess: &'a Session, pub span: Span, @@ -265,7 +260,7 @@ pub struct Context<'a> { pub extra_filename: Option<&'a str>, // points to either self.sess.target.target or self.sess.host, must match triple pub target: &'a Target, - pub triple: &'a TargetTriple, + pub triple: TargetTriple, pub filesearch: FileSearch<'a>, pub root: &'a Option, pub rejected_via_hash: Vec, @@ -293,7 +288,7 @@ enum CrateFlavor { } impl fmt::Display for CrateFlavor { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(match *self { CrateFlavor::Rlib => "rlib", CrateFlavor::Rmeta => "rmeta", @@ -309,6 +304,14 @@ impl CratePaths { } impl<'a> Context<'a> { + pub fn reset(&mut self) { + self.rejected_via_hash.clear(); + self.rejected_via_triple.clear(); + self.rejected_via_kind.clear(); + self.rejected_via_version.clear(); + self.rejected_via_filename.clear(); + } + pub fn maybe_load_library_crate(&mut self) -> Option { let mut seen_paths = FxHashSet::default(); match self.extra_filename { @@ -406,7 +409,7 @@ impl<'a> Context<'a> { add); if (self.ident == "std" || self.ident == "core") - && self.triple != &TargetTriple::from_triple(config::host_triple()) { + && self.triple != TargetTriple::from_triple(config::host_triple()) { err.note(&format!("the `{}` target may not be installed", self.triple)); } err.span_label(self.span, "can't find crate"); @@ -610,7 +613,7 @@ impl<'a> Context<'a> { } } - let mut err: Option = None; + let mut err: Option> = None; for (lib, kind) in m { info!("{} reading metadata from: {}", flavor, lib.display()); let (hash, metadata) = @@ -725,7 +728,7 @@ impl<'a> Context<'a> { } } - if &root.triple != self.triple { + if root.triple != self.triple { info!("Rejecting via crate triple: expected {} got {}", self.triple, root.triple); @@ -925,7 +928,7 @@ fn get_metadata_section_imp(target: &Target, } } -// A diagnostic function for dumping crate metadata to an output stream +/// A diagnostic function for dumping crate metadata to an output stream. pub fn list_file_metadata(target: &Target, path: &Path, loader: &dyn MetadataLoader, diff --git a/src/librustc_metadata/native_libs.rs b/src/librustc_metadata/native_libs.rs index 6f85418b297ed..e0665127c0fb0 100644 --- a/src/librustc_metadata/native_libs.rs +++ b/src/librustc_metadata/native_libs.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir::itemlikevisit::ItemLikeVisitor; use rustc::hir; use rustc::middle::cstore::{self, NativeLibrary}; @@ -19,6 +9,7 @@ use syntax::attr; use syntax::source_map::Span; use syntax::feature_gate::{self, GateIssue}; use syntax::symbol::Symbol; +use syntax::{span_err, struct_span_err}; pub fn collect<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Vec { let mut collector = Collector { @@ -65,7 +56,7 @@ impl<'a, 'tcx> ItemLikeVisitor<'tcx> for Collector<'a, 'tcx> { name: None, kind: cstore::NativeUnknown, cfg: None, - foreign_module: Some(self.tcx.hir().local_def_id(it.id)), + foreign_module: Some(self.tcx.hir().local_def_id_from_hir_id(it.hir_id)), wasm_import_module: None, }; let mut kind_specified = false; @@ -83,9 +74,10 @@ impl<'a, 'tcx> ItemLikeVisitor<'tcx> for Collector<'a, 'tcx> { "dylib" => cstore::NativeUnknown, "framework" => cstore::NativeFramework, k => { - struct_span_err!(self.tcx.sess, m.span, E0458, + struct_span_err!(self.tcx.sess, item.span(), E0458, "unknown kind: `{}`", k) - .span_label(item.span, "unknown kind").emit(); + .span_label(item.span(), "unknown kind") + .span_label(m.span, "").emit(); cstore::NativeUnknown } }; @@ -173,7 +165,7 @@ impl<'a, 'tcx> Collector<'a, 'tcx> { !self.tcx.features().static_nobundle { feature_gate::emit_feature_err(&self.tcx.sess.parse_sess, "static_nobundle", - span.unwrap(), + span.unwrap_or_else(|| syntax_pos::DUMMY_SP), GateIssue::Language, "kind=\"static-nobundle\" is feature gated"); } @@ -208,34 +200,31 @@ impl<'a, 'tcx> Collector<'a, 'tcx> { } // Update kind and, optionally, the name of all native libraries - // (there may be more than one) with the specified name. + // (there may be more than one) with the specified name. If any + // library is mentioned more than once, keep the latest mention + // of it, so that any possible dependent libraries appear before + // it. (This ensures that the linker is able to see symbols from + // all possible dependent libraries before linking in the library + // in question.) for &(ref name, ref new_name, kind) in &self.tcx.sess.opts.libs { - let mut found = false; - for lib in self.libs.iter_mut() { - let lib_name = match lib.name { - Some(n) => n, - None => continue, - }; - if lib_name == name as &str { - let mut changed = false; - if let Some(k) = kind { - lib.kind = k; - changed = true; - } - if let &Some(ref new_name) = new_name { - lib.name = Some(Symbol::intern(new_name)); - changed = true; - } - if !changed { - let msg = format!("redundant linker flag specified for \ - library `{}`", name); - self.tcx.sess.warn(&msg); + // If we've already added any native libraries with the same + // name, they will be pulled out into `existing`, so that we + // can move them to the end of the list below. + let mut existing = self.libs.drain_filter(|lib| { + if let Some(lib_name) = lib.name { + if lib_name == name as &str { + if let Some(k) = kind { + lib.kind = k; + } + if let &Some(ref new_name) = new_name { + lib.name = Some(Symbol::intern(new_name)); + } + return true; } - - found = true; } - } - if !found { + false + }).collect::>(); + if existing.is_empty() { // Add if not found let new_name = new_name.as_ref().map(|s| &**s); // &Option -> Option<&str> let lib = NativeLibrary { @@ -246,6 +235,10 @@ impl<'a, 'tcx> Collector<'a, 'tcx> { wasm_import_module: None, }; self.register_native_lib(None, lib); + } else { + // Move all existing libraries with the same name to the + // end of the command line. + self.libs.append(&mut existing); } } } diff --git a/src/librustc_metadata/schema.rs b/src/librustc_metadata/schema.rs index 1ae3f0a12bdd0..fe2ea26c32741 100644 --- a/src/librustc_metadata/schema.rs +++ b/src/librustc_metadata/schema.rs @@ -1,14 +1,4 @@ -// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use index; +use crate::index; use rustc::hir; use rustc::hir::def::{self, CtorKind}; @@ -197,6 +187,7 @@ pub struct CrateRoot { pub has_default_lib_allocator: bool, pub plugin_registrar_fn: Option, pub proc_macro_decls_static: Option, + pub proc_macro_stability: Option, pub crate_deps: LazySeq, pub dylib_dependency_formats: LazySeq>, @@ -308,6 +299,8 @@ pub enum EntryKind<'tcx> { ForeignType, GlobalAsm, Type, + TypeParam, + ConstParam, Existential, Enum(ReprOptions), Field, @@ -326,6 +319,7 @@ pub enum EntryKind<'tcx> { AssociatedType(AssociatedContainer), AssociatedExistential(AssociatedContainer), AssociatedConst(AssociatedContainer, ConstQualif, Lazy), + TraitAlias(Lazy>), } impl<'a, 'gcx> HashStable> for EntryKind<'gcx> { @@ -343,7 +337,9 @@ impl<'a, 'gcx> HashStable> for EntryKind<'gcx> { EntryKind::ForeignType | EntryKind::Field | EntryKind::Existential | - EntryKind::Type => { + EntryKind::Type | + EntryKind::TypeParam | + EntryKind::ConstParam => { // Nothing else to hash here. } EntryKind::Const(qualif, ref const_data) => { @@ -380,6 +376,9 @@ impl<'a, 'gcx> HashStable> for EntryKind<'gcx> { EntryKind::Trait(ref trait_data) => { trait_data.hash_stable(hcx, hasher); } + EntryKind::TraitAlias(ref trait_alias_data) => { + trait_alias_data.hash_stable(hcx, hasher); + } EntryKind::Impl(ref impl_data) => { impl_data.hash_stable(hcx, hasher); } @@ -450,11 +449,8 @@ impl_stable_hash_for!(struct FnData<'tcx> { constness, arg_names, sig }); pub struct VariantData<'tcx> { pub ctor_kind: CtorKind, pub discr: ty::VariantDiscr, - - /// If this is a struct's only variant, this - /// is the index of the "struct ctor" item. - pub struct_ctor: Option, - + /// If this is unit or tuple-variant/struct, then this is the index of the ctor id. + pub ctor: Option, /// If this is a tuple struct or variant /// ctor, this is its "function" signature. pub ctor_sig: Option>>, @@ -463,7 +459,7 @@ pub struct VariantData<'tcx> { impl_stable_hash_for!(struct VariantData<'tcx> { ctor_kind, discr, - struct_ctor, + ctor, ctor_sig }); @@ -484,6 +480,15 @@ impl_stable_hash_for!(struct TraitData<'tcx> { super_predicates }); +#[derive(RustcEncodable, RustcDecodable)] +pub struct TraitAliasData<'tcx> { + pub super_predicates: Lazy>, +} + +impl_stable_hash_for!(struct TraitAliasData<'tcx> { + super_predicates +}); + #[derive(RustcEncodable, RustcDecodable)] pub struct ImplData<'tcx> { pub polarity: hir::ImplPolarity, @@ -515,7 +520,7 @@ pub enum AssociatedContainer { ImplFinal, } -impl_stable_hash_for!(enum ::schema::AssociatedContainer { +impl_stable_hash_for!(enum crate::schema::AssociatedContainer { TraitRequired, TraitWithDefault, ImplDefault, diff --git a/src/librustc_mir/Cargo.toml b/src/librustc_mir/Cargo.toml index 5044e351962ed..c32bafa99205f 100644 --- a/src/librustc_mir/Cargo.toml +++ b/src/librustc_mir/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustc_mir" version = "0.0.0" +edition = "2018" [lib] name = "rustc_mir" @@ -10,12 +11,11 @@ crate-type = ["dylib"] [dependencies] arena = { path = "../libarena" } -bitflags = "1.0" either = "1.5.0" -graphviz = { path = "../libgraphviz" } +dot = { path = "../libgraphviz", package = "graphviz" } log = "0.4" log_settings = "0.1.1" -polonius-engine = "0.5.0" +polonius-engine = "0.6.2" rustc = { path = "../librustc" } rustc_target = { path = "../librustc_target" } rustc_data_structures = { path = "../librustc_data_structures" } diff --git a/src/librustc_mir/borrow_check/borrow_set.rs b/src/librustc_mir/borrow_check/borrow_set.rs index 947c32df0f6a3..c81da66672fbf 100644 --- a/src/librustc_mir/borrow_check/borrow_set.rs +++ b/src/librustc_mir/borrow_check/borrow_set.rs @@ -1,21 +1,9 @@ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::place_ext::PlaceExt; -use borrow_check::nll::ToRegionVid; -use dataflow::indexes::BorrowIndex; -use dataflow::move_paths::MoveData; +use crate::borrow_check::place_ext::PlaceExt; +use crate::borrow_check::nll::ToRegionVid; +use crate::dataflow::indexes::BorrowIndex; +use crate::dataflow::move_paths::MoveData; use rustc::mir::traversal; -use rustc::mir::visit::{ - PlaceContext, Visitor, NonUseContext, MutatingUseContext, NonMutatingUseContext -}; +use rustc::mir::visit::{PlaceContext, Visitor, NonUseContext, MutatingUseContext}; use rustc::mir::{self, Location, Mir, Local}; use rustc::ty::{RegionVid, TyCtxt}; use rustc::util::nodemap::{FxHashMap, FxHashSet}; @@ -36,12 +24,12 @@ crate struct BorrowSet<'tcx> { crate location_map: FxHashMap, /// Locations which activate borrows. - /// NOTE: A given location may activate more than one borrow in the future + /// NOTE: a given location may activate more than one borrow in the future /// when more general two-phase borrow support is introduced, but for now we - /// only need to store one borrow index + /// only need to store one borrow index. crate activation_map: FxHashMap>, - /// Map from local to all the borrows on that local + /// Map from local to all the borrows on that local. crate local_map: FxHashMap>, crate locals_state_at_exit: LocalsStateAtExit, @@ -55,8 +43,8 @@ impl<'tcx> Index for BorrowSet<'tcx> { } } -/// Location where a two phase borrow is activated, if a borrow -/// is in fact a two phase borrow. +/// Location where a two-phase borrow is activated, if a borrow +/// is in fact a two-phase borrow. #[derive(Copy, Clone, PartialEq, Eq, Debug)] crate enum TwoPhaseActivation { NotTwoPhase, @@ -64,7 +52,7 @@ crate enum TwoPhaseActivation { ActivatedAt(Location), } -#[derive(Debug)] +#[derive(Debug, Clone)] crate struct BorrowData<'tcx> { /// Location where the borrow reservation starts. /// In many cases, this will be equal to the activation location but not always. @@ -82,7 +70,7 @@ crate struct BorrowData<'tcx> { } impl<'tcx> fmt::Display for BorrowData<'tcx> { - fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result { let kind = match self.kind { mir::BorrowKind::Shared => "", mir::BorrowKind::Shallow => "shallow ", @@ -267,31 +255,21 @@ impl<'a, 'gcx, 'tcx> Visitor<'tcx> for GatherBorrows<'a, 'gcx, 'tcx> { ); } - // Otherwise, this is the unique later use - // that we expect. - borrow_data.activation_location = match context { - // The use of TMP in a shared borrow does not - // count as an actual activation. - PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow(..)) | - PlaceContext::NonMutatingUse(NonMutatingUseContext::ShallowBorrow(..)) => - TwoPhaseActivation::NotActivated, - _ => { - // Double check: This borrow is indeed a two-phase borrow (that is, - // we are 'transitioning' from `NotActivated` to `ActivatedAt`) and - // we've not found any other activations (checked above). - assert_eq!( - borrow_data.activation_location, - TwoPhaseActivation::NotActivated, - "never found an activation for this borrow!", - ); - - self.activation_map - .entry(location) - .or_default() - .push(borrow_index); - TwoPhaseActivation::ActivatedAt(location) - } - }; + // Otherwise, this is the unique later use that we expect. + // Double check: This borrow is indeed a two-phase borrow (that is, + // we are 'transitioning' from `NotActivated` to `ActivatedAt`) and + // we've not found any other activations (checked above). + assert_eq!( + borrow_data.activation_location, + TwoPhaseActivation::NotActivated, + "never found an activation for this borrow!", + ); + self.activation_map + .entry(location) + .or_default() + .push(borrow_index); + + borrow_data.activation_location = TwoPhaseActivation::ActivatedAt(location); } } @@ -321,7 +299,7 @@ impl<'a, 'gcx, 'tcx> Visitor<'tcx> for GatherBorrows<'a, 'gcx, 'tcx> { } impl<'a, 'gcx, 'tcx> GatherBorrows<'a, 'gcx, 'tcx> { - /// Returns true if the borrow represented by `kind` is + /// Returns `true` if the borrow represented by `kind` is /// allowed to be split into separate Reservation and /// Activation phases. fn allow_two_phase_borrow(&self, kind: mir::BorrowKind) -> bool { @@ -355,7 +333,7 @@ impl<'a, 'gcx, 'tcx> GatherBorrows<'a, 'gcx, 'tcx> { // TEMP = &foo // // so extract `temp`. - let temp = if let &mir::Place::Local(temp) = assigned_place { + let temp = if let &mir::Place::Base(mir::PlaceBase::Local(temp)) = assigned_place { temp } else { span_bug!( diff --git a/src/librustc_mir/borrow_check/error_reporting.rs b/src/librustc_mir/borrow_check/error_reporting.rs index 598c2f810beb7..16436a1f2b076 100644 --- a/src/librustc_mir/borrow_check/error_reporting.rs +++ b/src/librustc_mir/borrow_check/error_reporting.rs @@ -1,41 +1,34 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::nll::explain_borrow::BorrowExplanation; -use borrow_check::nll::region_infer::{RegionName, RegionNameSource}; -use borrow_check::prefixes::IsPrefixOf; -use borrow_check::WriteKind; +use crate::borrow_check::nll::explain_borrow::BorrowExplanation; +use crate::borrow_check::nll::region_infer::{RegionName, RegionNameSource}; +use crate::borrow_check::prefixes::IsPrefixOf; +use crate::borrow_check::WriteKind; use rustc::hir; +use rustc::hir::def::Namespace; use rustc::hir::def_id::DefId; use rustc::middle::region::ScopeTree; use rustc::mir::{ self, AggregateKind, BindingForm, BorrowKind, ClearCrossCrate, Constant, ConstraintCategory, Field, Local, LocalDecl, LocalKind, Location, Operand, - Place, PlaceProjection, ProjectionElem, Rvalue, Statement, StatementKind, - TerminatorKind, VarBindingForm, + Place, PlaceBase, PlaceProjection, ProjectionElem, Rvalue, Statement, StatementKind, + Static, StaticKind, TerminatorKind, VarBindingForm, }; use rustc::ty::{self, DefIdTree}; -use rustc::util::ppaux::with_highlight_region_for_bound_region; +use rustc::ty::layout::VariantIdx; +use rustc::ty::print::Print; use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::sync::Lrc; use rustc_errors::{Applicability, DiagnosticBuilder}; use syntax_pos::Span; +use syntax::source_map::CompilerDesugaringKind; use super::borrow_set::BorrowData; use super::{Context, MirBorrowckCtxt}; use super::{InitializationRequiringAction, PrefixSet}; -use dataflow::drop_flag_effects; -use dataflow::move_paths::indexes::MoveOutIndex; -use dataflow::move_paths::MovePathIndex; -use util::borrowck_errors::{BorrowckErrors, Origin}; +use crate::dataflow::drop_flag_effects; +use crate::dataflow::move_paths::indexes::MoveOutIndex; +use crate::dataflow::move_paths::MovePathIndex; +use crate::util::borrowck_errors::{BorrowckErrors, Origin}; #[derive(Debug)] struct MoveSite { @@ -43,7 +36,7 @@ struct MoveSite { /// then tell us where the move occurred. moi: MoveOutIndex, - /// True if we traversed a back edge while walking from the point + /// `true` if we traversed a back edge while walking from the point /// of error to the move site. traversed_back_edge: bool } @@ -133,13 +126,18 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { Origin::Mir, ); - self.add_closure_invoked_twice_with_moved_variable_suggestion( + self.add_moved_or_invoked_closure_note( context.loc, used_place, &mut err, ); let mut is_loop_move = false; + let is_partial_move = move_site_vec.iter().any(|move_site| { + let move_out = self.move_data.moves[(*move_site).moi]; + let moved_place = &self.move_data.move_paths[move_out.path].place; + used_place != moved_place && used_place.is_prefix_of(moved_place) + }); for move_site in &move_site_vec { let move_out = self.move_data.moves[(*move_site).moi]; let moved_place = &self.move_data.move_paths[move_out.path].place; @@ -158,6 +156,18 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { span, format!("value moved{} here, in previous iteration of loop", move_msg), ); + if Some(CompilerDesugaringKind::ForLoop) == span.compiler_desugaring_kind() { + if let Ok(snippet) = self.infcx.tcx.sess.source_map() + .span_to_snippet(span) + { + err.span_suggestion( + move_span, + "consider borrowing this to avoid moving it into the for loop", + format!("&{}", snippet), + Applicability::MaybeIncorrect, + ); + } + } is_loop_move = true; } else if move_site.traversed_back_edge { err.span_label( @@ -185,44 +195,61 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { err.span_label( span, format!( - "value {} here after move", - desired_action.as_verb_in_past_tense() + "value {} here {}", + desired_action.as_verb_in_past_tense(), + if is_partial_move { "after partial move" } else { "after move" }, ), ); } - if let Some(ty) = self.retrieve_type_for_place(used_place) { - let needs_note = match ty.sty { - ty::Closure(id, _) => { - let tables = self.infcx.tcx.typeck_tables_of(id); - let node_id = self.infcx.tcx.hir().as_local_node_id(id).unwrap(); - let hir_id = self.infcx.tcx.hir().node_to_hir_id(node_id); + let ty = used_place.ty(self.mir, self.infcx.tcx).ty; + let needs_note = match ty.sty { + ty::Closure(id, _) => { + let tables = self.infcx.tcx.typeck_tables_of(id); + let hir_id = self.infcx.tcx.hir().as_local_hir_id(id).unwrap(); - tables.closure_kind_origins().get(hir_id).is_none() - } - _ => true, - }; - - if needs_note { - let mpi = self.move_data.moves[move_out_indices[0]].path; - let place = &self.move_data.move_paths[mpi].place; + tables.closure_kind_origins().get(hir_id).is_none() + } + _ => true, + }; - if let Some(ty) = self.retrieve_type_for_place(place) { - let note_msg = match self.describe_place_with_options( - place, - IncludingDowncast(true), - ) { - Some(name) => format!("`{}`", name), - None => "value".to_owned(), - }; + if needs_note { + let mpi = self.move_data.moves[move_out_indices[0]].path; + let place = &self.move_data.move_paths[mpi].place; - err.note(&format!( - "move occurs because {} has type `{}`, \ - which does not implement the `Copy` trait", - note_msg, ty - )); + let ty = place.ty(self.mir, self.infcx.tcx).ty; + let opt_name = self.describe_place_with_options(place, IncludingDowncast(true)); + let note_msg = match opt_name { + Some(ref name) => format!("`{}`", name), + None => "value".to_owned(), + }; + if let ty::Param(param_ty) = ty.sty { + let tcx = self.infcx.tcx; + let generics = tcx.generics_of(self.mir_def_id); + let def_id = generics.type_param(¶m_ty, tcx).def_id; + if let Some(sp) = tcx.hir().span_if_local(def_id) { + err.span_label( + sp, + "consider adding a `Copy` constraint to this type argument", + ); } } + if let Place::Base(PlaceBase::Local(local)) = place { + let decl = &self.mir.local_decls[*local]; + err.span_label( + decl.source_info.span, + format!( + "move occurs because {} has type `{}`, \ + which does not implement the `Copy` trait", + note_msg, ty, + )); + } else { + err.note(&format!( + "move occurs because {} has type `{}`, \ + which does not implement the `Copy` trait", + note_msg, ty + )); + } } if let Some((_, mut old_err)) = self.move_error_reported @@ -278,8 +305,11 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { format!("move occurs due to use{}", move_spans.describe()) ); - self.explain_why_borrow_contains_point(context, borrow, None) - .add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, ""); + self.explain_why_borrow_contains_point( + context, + borrow, + None, + ).add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, "", Some(borrow_span)); err.buffer(&mut self.errors_buffer); } @@ -288,7 +318,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { context: Context, (place, _span): (&Place<'tcx>, Span), borrow: &BorrowData<'tcx>, - ) { + ) -> DiagnosticBuilder<'cx> { let tcx = self.infcx.tcx; let borrow_spans = self.retrieve_borrow_spans(borrow); @@ -316,8 +346,8 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { }); self.explain_why_borrow_contains_point(context, borrow, None) - .add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, ""); - err.buffer(&mut self.errors_buffer); + .add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, "", None); + err } pub(super) fn report_conflicting_borrow( @@ -326,7 +356,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { (place, span): (&Place<'tcx>, Span), gen_borrow_kind: BorrowKind, issued_borrow: &BorrowData<'tcx>, - ) { + ) -> DiagnosticBuilder<'cx> { let issued_spans = self.retrieve_borrow_spans(issued_borrow); let issued_span = issued_spans.args_or_use(); @@ -339,10 +369,8 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { "closure" }; - let desc_place = self.describe_place(place).unwrap_or_else(|| "_".to_owned()); - let tcx = self.infcx.tcx; - - let first_borrow_desc; + let (desc_place, msg_place, msg_borrow, union_type_name) = + self.describe_place_for_conflicting_borrow(place, &issued_borrow.borrowed_place); let explanation = self.explain_why_borrow_contains_point(context, issued_borrow, None); let second_borrow_desc = if explanation.is_explained() { @@ -352,6 +380,8 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { }; // FIXME: supply non-"" `opt_via` when appropriate + let tcx = self.infcx.tcx; + let first_borrow_desc; let mut err = match ( gen_borrow_kind, "immutable", @@ -365,12 +395,12 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { tcx.cannot_reborrow_already_borrowed( span, &desc_place, - "", + &msg_place, lft, issued_span, "it", rgt, - "", + &msg_borrow, None, Origin::Mir, ) @@ -380,12 +410,12 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { tcx.cannot_reborrow_already_borrowed( span, &desc_place, - "", + &msg_place, lft, issued_span, "it", rgt, - "", + &msg_borrow, None, Origin::Mir, ) @@ -396,9 +426,9 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { tcx.cannot_mutably_borrow_multiply( span, &desc_place, - "", + &msg_place, issued_span, - "", + &msg_borrow, None, Origin::Mir, ) @@ -430,9 +460,8 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { "borrow occurs due to use of `{}`{}", desc_place, borrow_spans.describe() ), ); - err.buffer(&mut self.errors_buffer); - return; + return err; } (BorrowKind::Unique, _, _, _, _, _) => { @@ -482,14 +511,10 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { ) } - (BorrowKind::Shallow, _, _, BorrowKind::Unique, _, _) - | (BorrowKind::Shallow, _, _, BorrowKind::Mut { .. }, _, _) => { - // Shallow borrows are uses from the user's point of view. - self.report_use_while_mutably_borrowed(context, (place, span), issued_borrow); - return; - } (BorrowKind::Shared, _, _, BorrowKind::Shared, _, _) | (BorrowKind::Shared, _, _, BorrowKind::Shallow, _, _) + | (BorrowKind::Shallow, _, _, BorrowKind::Mut { .. }, _, _) + | (BorrowKind::Shallow, _, _, BorrowKind::Unique, _, _) | (BorrowKind::Shallow, _, _, BorrowKind::Shared, _, _) | (BorrowKind::Shallow, _, _, BorrowKind::Shallow, _, _) => unreachable!(), }; @@ -522,10 +547,120 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { ); } - explanation - .add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, first_borrow_desc); + if union_type_name != "" { + err.note(&format!( + "`{}` is a field of the union `{}`, so it overlaps the field `{}`", + msg_place, union_type_name, msg_borrow, + )); + } - err.buffer(&mut self.errors_buffer); + explanation.add_explanation_to_diagnostic( + self.infcx.tcx, + self.mir, + &mut err, + first_borrow_desc, + None, + ); + + err + } + + /// Returns the description of the root place for a conflicting borrow and the full + /// descriptions of the places that caused the conflict. + /// + /// In the simplest case, where there are no unions involved, if a mutable borrow of `x` is + /// attempted while a shared borrow is live, then this function will return: + /// + /// ("x", "", "") + /// + /// In the simple union case, if a mutable borrow of a union field `x.z` is attempted while + /// a shared borrow of another field `x.y`, then this function will return: + /// + /// ("x", "x.z", "x.y") + /// + /// In the more complex union case, where the union is a field of a struct, then if a mutable + /// borrow of a union field in a struct `x.u.z` is attempted while a shared borrow of + /// another field `x.u.y`, then this function will return: + /// + /// ("x.u", "x.u.z", "x.u.y") + /// + /// This is used when creating error messages like below: + /// + /// > cannot borrow `a.u` (via `a.u.z.c`) as immutable because it is also borrowed as + /// > mutable (via `a.u.s.b`) [E0502] + pub(super) fn describe_place_for_conflicting_borrow( + &self, + first_borrowed_place: &Place<'tcx>, + second_borrowed_place: &Place<'tcx>, + ) -> (String, String, String, String) { + // Define a small closure that we can use to check if the type of a place + // is a union. + let is_union = |place: &Place<'tcx>| -> bool { + place.ty(self.mir, self.infcx.tcx).ty + .ty_adt_def() + .map(|adt| adt.is_union()) + .unwrap_or(false) + }; + + // Start with an empty tuple, so we can use the functions on `Option` to reduce some + // code duplication (particularly around returning an empty description in the failure + // case). + Some(()) + .filter(|_| { + // If we have a conflicting borrow of the same place, then we don't want to add + // an extraneous "via x.y" to our diagnostics, so filter out this case. + first_borrowed_place != second_borrowed_place + }) + .and_then(|_| { + // We're going to want to traverse the first borrowed place to see if we can find + // field access to a union. If we find that, then we will keep the place of the + // union being accessed and the field that was being accessed so we can check the + // second borrowed place for the same union and a access to a different field. + let mut current = first_borrowed_place; + while let Place::Projection(box PlaceProjection { base, elem }) = current { + match elem { + ProjectionElem::Field(field, _) if is_union(base) => { + return Some((base, field)); + }, + _ => current = base, + } + } + None + }) + .and_then(|(target_base, target_field)| { + // With the place of a union and a field access into it, we traverse the second + // borrowed place and look for a access to a different field of the same union. + let mut current = second_borrowed_place; + while let Place::Projection(box PlaceProjection { base, elem }) = current { + match elem { + ProjectionElem::Field(field, _) if { + is_union(base) && field != target_field && base == target_base + } => { + let desc_base = self.describe_place(base) + .unwrap_or_else(|| "_".to_owned()); + let desc_first = self.describe_place(first_borrowed_place) + .unwrap_or_else(|| "_".to_owned()); + let desc_second = self.describe_place(second_borrowed_place) + .unwrap_or_else(|| "_".to_owned()); + + // Also compute the name of the union type, eg. `Foo` so we + // can add a helpful note with it. + let ty = base.ty(self.mir, self.infcx.tcx).ty; + + return Some((desc_base, desc_first, desc_second, ty.to_string())); + }, + _ => current = base, + } + } + None + }) + .unwrap_or_else(|| { + // If we didn't find a field access into a union, or both places match, then + // only return the description of the first place. + let desc_place = self.describe_place(first_borrowed_place) + .unwrap_or_else(|| "_".to_owned()); + (desc_place, "".to_string(), "".to_string(), "".to_string()) + }) } /// Reports StorageDeadOrDrop of `place` conflicts with `borrow`. @@ -560,7 +695,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { let borrow_span = borrow_spans.var_or_use(); let proper_span = match *root_place { - Place::Local(local) => self.mir.local_decls[local].source_info.span, + Place::Base(PlaceBase::Local(local)) => self.mir.local_decls[local].source_info.span, _ => drop_span, }; @@ -713,20 +848,20 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { ); if let Some(annotation) = self.annotate_argument_and_return_for_borrow(borrow) { - let region_name = annotation.emit(&mut err); + let region_name = annotation.emit(self, &mut err); err.span_label( borrow_span, format!("`{}` would have to be valid for `{}`...", name, region_name), ); - if let Some(fn_node_id) = self.infcx.tcx.hir().as_local_node_id(self.mir_def_id) { + if let Some(fn_hir_id) = self.infcx.tcx.hir().as_local_hir_id(self.mir_def_id) { err.span_label( drop_span, format!( "...but `{}` will be dropped here, when the function `{}` returns", name, - self.infcx.tcx.hir().name(fn_node_id), + self.infcx.tcx.hir().name_by_hir_id(fn_hir_id), ), ); @@ -735,7 +870,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { functions can only return borrows to data passed as arguments", ); err.note( - "to learn more, visit ", ); } else { @@ -747,7 +882,13 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { if let BorrowExplanation::MustBeValidFor { .. } = explanation { } else { - explanation.add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, ""); + explanation.add_explanation_to_diagnostic( + self.infcx.tcx, + self.mir, + &mut err, + "", + None, + ); } } else { err.span_label(borrow_span, "borrowed value does not live long enough"); @@ -767,7 +908,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { format!("value captured here{}", within), ); - explanation.add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, ""); + explanation.add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, "", None); } err @@ -827,7 +968,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { _ => {} } - explanation.add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, ""); + explanation.add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, "", None); err.buffer(&mut self.errors_buffer); } @@ -908,7 +1049,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { } _ => {} } - explanation.add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, ""); + explanation.add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, "", None); let within = if borrow_spans.for_generator() { " by generator" @@ -942,7 +1083,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { let (place_desc, note) = if let Some(place_desc) = opt_place_desc { let local_kind = match borrow.borrowed_place { - Place::Local(local) => { + Place::Base(PlaceBase::Local(local)) => { match self.mir.local_kind(local) { LocalKind::ReturnPointer | LocalKind::Temp => bug!("temporary or return pointer with a name"), @@ -967,7 +1108,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { let root_place = self.prefixes(&borrow.borrowed_place, PrefixSet::All) .last() .unwrap(); - let local = if let Place::Local(local) = *root_place { + let local = if let Place::Base(PlaceBase::Local(local)) = *root_place { local } else { bug!("report_cannot_return_reference_to_local: not a local") @@ -1026,7 +1167,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { Err(_) => "move || ".to_string() }; - err.span_suggestion_with_applicability( + err.span_suggestion( args_span, &format!("to force the closure to take ownership of {} (and any \ other referenced variables), use the `move` keyword", @@ -1065,7 +1206,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { let escapes_from = if tcx.is_closure(self.mir_def_id) { let tables = tcx.typeck_tables_of(self.mir_def_id); let mir_hir_id = tcx.hir().def_index_to_hir_id(self.mir_def_id.index); - match tables.node_id_to_type(mir_hir_id).sty { + match tables.node_type(mir_hir_id).sty { ty::Closure(..) => "closure", ty::Generator(..) => "generator", _ => bug!("Closure body doesn't have a closure or generator type"), @@ -1217,22 +1358,30 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { let loan_span = loan_spans.args_or_use(); let tcx = self.infcx.tcx; - let mut err = if loan.kind == BorrowKind::Shallow { - tcx.cannot_mutate_in_match_guard( + if loan.kind == BorrowKind::Shallow { + let mut err = tcx.cannot_mutate_in_match_guard( span, loan_span, &self.describe_place(place).unwrap_or_else(|| "_".to_owned()), "assign", Origin::Mir, - ) - } else { - tcx.cannot_assign_to_borrowed( - span, - loan_span, - &self.describe_place(place).unwrap_or_else(|| "_".to_owned()), - Origin::Mir, - ) - }; + ); + loan_spans.var_span_label( + &mut err, + format!("borrow occurs due to use{}", loan_spans.describe()), + ); + + err.buffer(&mut self.errors_buffer); + + return; + } + + let mut err = tcx.cannot_assign_to_borrowed( + span, + loan_span, + &self.describe_place(place).unwrap_or_else(|| "_".to_owned()), + Origin::Mir, + ); loan_spans.var_span_label( &mut err, @@ -1240,7 +1389,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { ); self.explain_why_borrow_contains_point(context, loan, None) - .add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, ""); + .add_explanation_to_diagnostic(self.infcx.tcx, self.mir, &mut err, "", None); err.buffer(&mut self.errors_buffer); } @@ -1258,7 +1407,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { assigned_span: Span, err_place: &Place<'tcx>, ) { - let (from_arg, local_decl) = if let Place::Local(local) = *err_place { + let (from_arg, local_decl) = if let Place::Base(PlaceBase::Local(local)) = *err_place { if let LocalKind::Arg = self.mir.local_kind(local) { (true, Some(&self.mir.local_decls[local])) } else { @@ -1315,7 +1464,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { if let Some(decl) = local_decl { if let Some(name) = decl.name { if decl.can_be_made_mutable() { - err.span_suggestion_with_applicability( + err.span_suggestion( decl.source_info.span, "make this binding mutable", format!("mut {}", name), @@ -1341,7 +1490,8 @@ enum StorageDeadOrDrop<'tcx> { impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { - /// Adds a suggestion when a closure is invoked twice with a moved variable. + /// Adds a suggestion when a closure is invoked twice with a moved variable or when a closure + /// is moved after being invoked. /// /// ```text /// note: closure cannot be invoked more than once because it moves the variable `dict` out of @@ -1351,30 +1501,18 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { /// LL | for (key, value) in dict { /// | ^^^^ /// ``` - pub(super) fn add_closure_invoked_twice_with_moved_variable_suggestion( + pub(super) fn add_moved_or_invoked_closure_note( &self, location: Location, place: &Place<'tcx>, diag: &mut DiagnosticBuilder<'_>, ) { + debug!("add_moved_or_invoked_closure_note: location={:?} place={:?}", location, place); let mut target = place.local(); - debug!( - "add_closure_invoked_twice_with_moved_variable_suggestion: location={:?} place={:?} \ - target={:?}", - location, place, target, - ); for stmt in &self.mir[location.block].statements[location.statement_index..] { - debug!( - "add_closure_invoked_twice_with_moved_variable_suggestion: stmt={:?} \ - target={:?}", - stmt, target, - ); + debug!("add_moved_or_invoked_closure_note: stmt={:?} target={:?}", stmt, target); if let StatementKind::Assign(into, box Rvalue::Use(from)) = &stmt.kind { - debug!( - "add_closure_invoked_twice_with_moved_variable_suggestion: into={:?} \ - from={:?}", - into, from, - ); + debug!("add_fnonce_closure_note: into={:?} from={:?}", into, from); match from { Operand::Copy(ref place) | Operand::Move(ref place) if target == place.local() => @@ -1384,21 +1522,21 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { } } - + // Check if we are attempting to call a closure after it has been invoked. let terminator = self.mir[location.block].terminator(); - debug!( - "add_closure_invoked_twice_with_moved_variable_suggestion: terminator={:?}", - terminator, - ); + debug!("add_moved_or_invoked_closure_note: terminator={:?}", terminator); if let TerminatorKind::Call { func: Operand::Constant(box Constant { - literal: ty::Const { ty: &ty::TyS { sty: ty::TyKind::FnDef(id, _), .. }, .. }, + literal: ty::Const { + ty: &ty::TyS { sty: ty::FnDef(id, _), .. }, + .. + }, .. }), args, .. } = &terminator.kind { - debug!("add_closure_invoked_twice_with_moved_variable_suggestion: id={:?}", id); + debug!("add_moved_or_invoked_closure_note: id={:?}", id); if self.infcx.tcx.parent(id) == self.infcx.tcx.lang_items().fn_once_trait() { let closure = match args.first() { Some(Operand::Copy(ref place)) | @@ -1406,33 +1544,49 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { place.local().unwrap(), _ => return, }; - debug!( - "add_closure_invoked_twice_with_moved_variable_suggestion: closure={:?}", - closure, - ); - if let ty::TyKind::Closure(did, _substs) = self.mir.local_decls[closure].ty.sty { - let node_id = match self.infcx.tcx.hir().as_local_node_id(did) { - Some(node_id) => node_id, - _ => return, - }; - let hir_id = self.infcx.tcx.hir().node_to_hir_id(node_id); + debug!("add_moved_or_invoked_closure_note: closure={:?}", closure); + if let ty::Closure(did, _) = self.mir.local_decls[closure].ty.sty { + let hir_id = self.infcx.tcx.hir().as_local_hir_id(did).unwrap(); - if let Some(( - span, name - )) = self.infcx.tcx.typeck_tables_of(did).closure_kind_origins().get(hir_id) { + if let Some((span, name)) = self.infcx.tcx.typeck_tables_of(did) + .closure_kind_origins() + .get(hir_id) + { diag.span_note( *span, &format!( - "closure cannot be invoked more than once because it \ - moves the variable `{}` out of its environment", - name, + "closure cannot be invoked more than once because it moves the \ + variable `{}` out of its environment", + name, ), ); + return; } } } } + + // Check if we are just moving a closure after it has been invoked. + if let Some(target) = target { + if let ty::Closure(did, _) = self.mir.local_decls[target].ty.sty { + let hir_id = self.infcx.tcx.hir().as_local_hir_id(did).unwrap(); + + if let Some((span, name)) = self.infcx.tcx.typeck_tables_of(did) + .closure_kind_origins() + .get(hir_id) + { + diag.span_note( + *span, + &format!( + "closure cannot be moved more than once as it is not `Copy` due to \ + moving the variable `{}` out of its environment", + name + ), + ); + } + } + } } /// End-user visible description of `place` if one can be found. If the @@ -1466,14 +1620,14 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { including_downcast: &IncludingDowncast, ) -> Result<(), ()> { match *place { - Place::Promoted(_) => { - buf.push_str("promoted"); - } - Place::Local(local) => { + Place::Base(PlaceBase::Local(local)) => { self.append_local_to_string(local, buf)?; } - Place::Static(ref static_) => { - buf.push_str(&self.infcx.tcx.item_name(static_.def_id).to_string()); + Place::Base(PlaceBase::Static(box Static{ kind: StaticKind::Promoted(_), .. })) => { + buf.push_str("promoted"); + } + Place::Base(PlaceBase::Static(box Static{ kind: StaticKind::Static(def_id), .. })) => { + buf.push_str(&self.infcx.tcx.item_name(def_id).to_string()); } Place::Projection(ref proj) => { match proj.elem { @@ -1496,7 +1650,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { autoderef, &including_downcast, )?; - } else if let Place::Local(local) = proj.base { + } else if let Place::Base(PlaceBase::Local(local)) = proj.base { if let Some(ClearCrossCrate::Set(BindingForm::RefForGuard)) = self.mir.local_decls[local].is_user_variable { @@ -1568,7 +1722,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { )?; buf.push_str("["); if self.append_local_to_string(index, buf).is_err() { - buf.push_str(".."); + buf.push_str("_"); } buf.push_str("]"); } @@ -1606,20 +1760,22 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { } /// End-user visible description of the `field`nth field of `base` - fn describe_field(&self, base: &Place, field: Field) -> String { + fn describe_field(&self, base: &Place<'tcx>, field: Field) -> String { match *base { - Place::Local(local) => { + Place::Base(PlaceBase::Local(local)) => { let local = &self.mir.local_decls[local]; - self.describe_field_from_ty(&local.ty, field) + self.describe_field_from_ty(&local.ty, field, None) } - Place::Promoted(ref prom) => self.describe_field_from_ty(&prom.1, field), - Place::Static(ref static_) => self.describe_field_from_ty(&static_.ty, field), + Place::Base(PlaceBase::Static(ref static_)) => + self.describe_field_from_ty(&static_.ty, field, None), Place::Projection(ref proj) => match proj.elem { ProjectionElem::Deref => self.describe_field(&proj.base, field), - ProjectionElem::Downcast(def, variant_index) => - def.variants[variant_index].fields[field.index()].ident.to_string(), + ProjectionElem::Downcast(_, variant_index) => { + let base_ty = base.ty(self.mir, self.infcx.tcx).ty; + self.describe_field_from_ty(&base_ty, field, Some(variant_index)) + } ProjectionElem::Field(_, field_type) => { - self.describe_field_from_ty(&field_type, field) + self.describe_field_from_ty(&field_type, field, None) } ProjectionElem::Index(..) | ProjectionElem::ConstantIndex { .. } @@ -1631,33 +1787,43 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { } /// End-user visible description of the `field_index`nth field of `ty` - fn describe_field_from_ty(&self, ty: &ty::Ty, field: Field) -> String { + fn describe_field_from_ty( + &self, + ty: &ty::Ty<'_>, + field: Field, + variant_index: Option + ) -> String { if ty.is_box() { // If the type is a box, the field is described from the boxed type - self.describe_field_from_ty(&ty.boxed_ty(), field) + self.describe_field_from_ty(&ty.boxed_ty(), field, variant_index) } else { match ty.sty { - ty::Adt(def, _) => if def.is_enum() { - field.index().to_string() - } else { - def.non_enum_variant().fields[field.index()] + ty::Adt(def, _) => { + let variant = if let Some(idx) = variant_index { + assert!(def.is_enum()); + &def.variants[idx] + } else { + def.non_enum_variant() + }; + variant.fields[field.index()] .ident .to_string() }, ty::Tuple(_) => field.index().to_string(), ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { - self.describe_field_from_ty(&ty, field) + self.describe_field_from_ty(&ty, field, variant_index) } - ty::Array(ty, _) | ty::Slice(ty) => self.describe_field_from_ty(&ty, field), + ty::Array(ty, _) | ty::Slice(ty) => + self.describe_field_from_ty(&ty, field, variant_index), ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => { // Convert the def-id into a node-id. node-ids are only valid for // the local code in the current crate, so this returns an `Option` in case // the closure comes from another crate. But in that case we wouldn't // be borrowck'ing it, so we can just unwrap: - let node_id = self.infcx.tcx.hir().as_local_node_id(def_id).unwrap(); + let hir_id = self.infcx.tcx.hir().as_local_hir_id(def_id).unwrap(); let freevar = self.infcx .tcx - .with_freevars(node_id, |fv| fv[field.index()]); + .with_freevars(hir_id, |fv| fv[field.index()]); self.infcx.tcx.hir().name(freevar.var_id()).to_string() } @@ -1666,33 +1832,19 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { // (https://github.com/rust-lang/rfcs/pull/1546) bug!( "End-user description not implemented for field access on `{:?}`", - ty.sty + ty ); } } } } - /// Retrieve type of a place for the current MIR representation - fn retrieve_type_for_place(&self, place: &Place<'tcx>) -> Option { - match place { - Place::Local(local) => { - let local = &self.mir.local_decls[*local]; - Some(local.ty) - } - Place::Promoted(ref prom) => Some(prom.1), - Place::Static(ref st) => Some(st.ty), - Place::Projection(ref proj) => match proj.elem { - ProjectionElem::Field(_, ty) => Some(ty), - _ => None, - }, - } - } - - /// Check if a place is a thread-local static. + /// Checks if a place is a thread-local static. pub fn is_place_thread_local(&self, place: &Place<'tcx>) -> bool { - if let Place::Static(statik) = place { - let attrs = self.infcx.tcx.get_attrs(statik.def_id); + if let Place::Base( + PlaceBase::Static(box Static{ kind: StaticKind::Static(def_id), .. }) + ) = place { + let attrs = self.infcx.tcx.get_attrs(*def_id); let is_thread_local = attrs.iter().any(|attr| attr.check_name("thread_local")); debug!( @@ -1709,7 +1861,8 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { fn classify_drop_access_kind(&self, place: &Place<'tcx>) -> StorageDeadOrDrop<'tcx> { let tcx = self.infcx.tcx; match place { - Place::Local(_) | Place::Static(_) | Place::Promoted(_) => { + Place::Base(PlaceBase::Local(_)) | + Place::Base(PlaceBase::Static(_)) => { StorageDeadOrDrop::LocalStorageDead } Place::Projection(box PlaceProjection { base, elem }) => { @@ -1719,7 +1872,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { StorageDeadOrDrop::LocalStorageDead | StorageDeadOrDrop::BoxedStorageDead => { assert!( - base.ty(self.mir, tcx).to_ty(tcx).is_box(), + base.ty(self.mir, tcx).ty.is_box(), "Drop of value behind a reference or raw pointer" ); StorageDeadOrDrop::BoxedStorageDead @@ -1727,7 +1880,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { StorageDeadOrDrop::Destructor(_) => base_access, }, ProjectionElem::Field(..) | ProjectionElem::Downcast(..) => { - let base_ty = base.ty(self.mir, tcx).to_ty(tcx); + let base_ty = base.ty(self.mir, tcx).ty; match base_ty.sty { ty::Adt(def, _) if def.has_dtor(tcx) => { // Report the outermost adt with a destructor @@ -1756,7 +1909,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { fn annotate_argument_and_return_for_borrow( &self, borrow: &BorrowData<'tcx>, - ) -> Option { + ) -> Option> { // Define a fallback for when we can't match a closure. let fallback = || { let is_closure = self.infcx.tcx.is_closure(self.mir_def_id); @@ -1765,7 +1918,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { } else { let ty = self.infcx.tcx.type_of(self.mir_def_id); match ty.sty { - ty::TyKind::FnDef(_, _) | ty::TyKind::FnPtr(_) => self.annotate_fn_sig( + ty::FnDef(_, _) | ty::FnPtr(_) => self.annotate_fn_sig( self.mir_def_id, self.infcx.tcx.fn_sig(self.mir_def_id), ), @@ -1794,7 +1947,8 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { ); // Check that the initial assignment of the reserve location is into a temporary. let mut target = *match reservation { - Place::Local(local) if self.mir.local_kind(*local) == LocalKind::Temp => local, + Place::Base(PlaceBase::Local(local)) + if self.mir.local_kind(*local) == LocalKind::Temp => local, _ => return None, }; @@ -1806,8 +1960,10 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { "annotate_argument_and_return_for_borrow: target={:?} stmt={:?}", target, stmt ); - if let StatementKind::Assign(Place::Local(assigned_to), box rvalue) = &stmt.kind - { + if let StatementKind::Assign( + Place::Base(PlaceBase::Local(assigned_to)), + box rvalue + ) = &stmt.kind { debug!( "annotate_argument_and_return_for_borrow: assigned_to={:?} \ rvalue={:?}", @@ -1930,7 +2086,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { target, terminator ); if let TerminatorKind::Call { - destination: Some((Place::Local(assigned_to), _)), + destination: Some((Place::Base(PlaceBase::Local(assigned_to)), _)), args, .. } = &terminator.kind @@ -1977,11 +2133,11 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { &self, did: DefId, sig: ty::PolyFnSig<'tcx>, - ) -> Option { + ) -> Option> { debug!("annotate_fn_sig: did={:?} sig={:?}", did, sig); let is_closure = self.infcx.tcx.is_closure(did); - let fn_node_id = self.infcx.tcx.hir().as_local_node_id(did)?; - let fn_decl = self.infcx.tcx.hir().fn_decl(fn_node_id)?; + let fn_hir_id = self.infcx.tcx.hir().as_local_hir_id(did)?; + let fn_decl = self.infcx.tcx.hir().fn_decl_by_hir_id(fn_hir_id)?; // We need to work out which arguments to highlight. We do this by looking // at the return type, where there are three cases: @@ -2007,12 +2163,12 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { // anything. let return_ty = sig.output(); match return_ty.skip_binder().sty { - ty::TyKind::Ref(return_region, _, _) if return_region.has_name() && !is_closure => { + ty::Ref(return_region, _, _) if return_region.has_name() && !is_closure => { // This is case 1 from above, return type is a named reference so we need to // search for relevant arguments. let mut arguments = Vec::new(); for (index, argument) in sig.inputs().skip_binder().iter().enumerate() { - if let ty::TyKind::Ref(argument_region, _, _) = argument.sty { + if let ty::Ref(argument_region, _, _) = argument.sty { if argument_region == return_region { // Need to use the `rustc::ty` types to compare against the // `return_region`. Then use the `rustc::hir` type to get only @@ -2049,7 +2205,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { return_span, }) } - ty::TyKind::Ref(_, _, _) if is_closure => { + ty::Ref(_, _, _) if is_closure => { // This is case 2 from above but only for closures, return type is anonymous // reference so we select // the first argument. @@ -2058,9 +2214,9 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { // Closure arguments are wrapped in a tuple, so we need to get the first // from that. - if let ty::TyKind::Tuple(elems) = argument_ty.sty { + if let ty::Tuple(elems) = argument_ty.sty { let argument_ty = elems.first()?; - if let ty::TyKind::Ref(_, _, _) = argument_ty.sty { + if let ty::Ref(_, _, _) = argument_ty.sty { return Some(AnnotatedBorrowFnSignature::Closure { argument_ty, argument_span, @@ -2070,7 +2226,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { None } - ty::TyKind::Ref(_, _, _) => { + ty::Ref(_, _, _) => { // This is also case 2 from above but for functions, return type is still an // anonymous reference so we select the first argument. let argument_span = fn_decl.inputs.first()?.span; @@ -2081,7 +2237,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { // We expect the first argument to be a reference. match argument_ty.sty { - ty::TyKind::Ref(_, _, _) => {} + ty::Ref(_, _, _) => {} _ => return None, } @@ -2123,7 +2279,11 @@ enum AnnotatedBorrowFnSignature<'tcx> { impl<'tcx> AnnotatedBorrowFnSignature<'tcx> { /// Annotate the provided diagnostic with information about borrow from the fn signature that /// helps explain. - fn emit(&self, diag: &mut DiagnosticBuilder<'_>) -> String { + fn emit( + &self, + cx: &mut MirBorrowckCtxt<'_, '_, 'tcx>, + diag: &mut DiagnosticBuilder<'_>, + ) -> String { match self { AnnotatedBorrowFnSignature::Closure { argument_ty, @@ -2131,10 +2291,10 @@ impl<'tcx> AnnotatedBorrowFnSignature<'tcx> { } => { diag.span_label( *argument_span, - format!("has type `{}`", self.get_name_for_ty(argument_ty, 0)), + format!("has type `{}`", cx.get_name_for_ty(argument_ty, 0)), ); - self.get_region_name_for_ty(argument_ty, 0) + cx.get_region_name_for_ty(argument_ty, 0) } AnnotatedBorrowFnSignature::AnonymousFunction { argument_ty, @@ -2142,10 +2302,10 @@ impl<'tcx> AnnotatedBorrowFnSignature<'tcx> { return_ty, return_span, } => { - let argument_ty_name = self.get_name_for_ty(argument_ty, 0); + let argument_ty_name = cx.get_name_for_ty(argument_ty, 0); diag.span_label(*argument_span, format!("has type `{}`", argument_ty_name)); - let return_ty_name = self.get_name_for_ty(return_ty, 0); + let return_ty_name = cx.get_name_for_ty(return_ty, 0); let types_equal = return_ty_name == argument_ty_name; diag.span_label( *return_span, @@ -2160,11 +2320,11 @@ impl<'tcx> AnnotatedBorrowFnSignature<'tcx> { "argument and return type have the same lifetime due to lifetime elision rules", ); diag.note( - "to learn more, visit ", ); - self.get_region_name_for_ty(return_ty, 0) + cx.get_region_name_for_ty(return_ty, 0) } AnnotatedBorrowFnSignature::NamedFunction { arguments, @@ -2172,7 +2332,7 @@ impl<'tcx> AnnotatedBorrowFnSignature<'tcx> { return_span, } => { // Region of return type and arguments checked to be the same earlier. - let region_name = self.get_region_name_for_ty(return_ty, 0); + let region_name = cx.get_region_name_for_ty(return_ty, 0); for (_, argument_span) in arguments { diag.span_label(*argument_span, format!("has lifetime `{}`", region_name)); } @@ -2192,37 +2352,55 @@ impl<'tcx> AnnotatedBorrowFnSignature<'tcx> { } } } +} +impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { /// Return the name of the provided `Ty` (that must be a reference) with a synthesized lifetime /// name where required. fn get_name_for_ty(&self, ty: ty::Ty<'tcx>, counter: usize) -> String { + let mut s = String::new(); + let mut printer = ty::print::FmtPrinter::new(self.infcx.tcx, &mut s, Namespace::TypeNS); + // We need to add synthesized lifetimes where appropriate. We do // this by hooking into the pretty printer and telling it to label the // lifetimes without names with the value `'0`. match ty.sty { - ty::TyKind::Ref(ty::RegionKind::ReLateBound(_, br), _, _) - | ty::TyKind::Ref( + ty::Ref(ty::RegionKind::ReLateBound(_, br), _, _) + | ty::Ref( ty::RegionKind::RePlaceholder(ty::PlaceholderRegion { name: br, .. }), _, _, - ) => with_highlight_region_for_bound_region(*br, counter, || ty.to_string()), - _ => ty.to_string(), + ) => printer.region_highlight_mode.highlighting_bound_region(*br, counter), + _ => {} } + + let _ = ty.print(printer); + s } - /// Return the name of the provided `Ty` (that must be a reference)'s region with a + /// Returns the name of the provided `Ty` (that must be a reference)'s region with a /// synthesized lifetime name where required. fn get_region_name_for_ty(&self, ty: ty::Ty<'tcx>, counter: usize) -> String { - match ty.sty { - ty::TyKind::Ref(region, _, _) => match region { - ty::RegionKind::ReLateBound(_, br) - | ty::RegionKind::RePlaceholder(ty::PlaceholderRegion { name: br, .. }) => { - with_highlight_region_for_bound_region(*br, counter, || region.to_string()) + let mut s = String::new(); + let mut printer = ty::print::FmtPrinter::new(self.infcx.tcx, &mut s, Namespace::TypeNS); + + let region = match ty.sty { + ty::Ref(region, _, _) => { + match region { + ty::RegionKind::ReLateBound(_, br) + | ty::RegionKind::RePlaceholder(ty::PlaceholderRegion { name: br, .. }) => { + printer.region_highlight_mode.highlighting_bound_region(*br, counter) + } + _ => {} } - _ => region.to_string(), - }, + + region + } _ => bug!("ty for annotation of borrow region is not a reference"), - } + }; + + let _ = region.print(printer); + s } } @@ -2260,20 +2438,28 @@ impl UseSpans { } // Add a span label to the arguments of the closure, if it exists. - pub(super) fn args_span_label(self, err: &mut DiagnosticBuilder, message: impl Into) { + pub(super) fn args_span_label( + self, + err: &mut DiagnosticBuilder<'_>, + message: impl Into, + ) { if let UseSpans::ClosureUse { args_span, .. } = self { err.span_label(args_span, message); } } // Add a span label to the use of the captured variable, if it exists. - pub(super) fn var_span_label(self, err: &mut DiagnosticBuilder, message: impl Into) { + pub(super) fn var_span_label( + self, + err: &mut DiagnosticBuilder<'_>, + message: impl Into, + ) { if let UseSpans::ClosureUse { var_span, .. } = self { err.span_label(var_span, message); } } - /// Return `false` if this place is not used in a closure. + /// Returns `false` if this place is not used in a closure. fn for_closure(&self) -> bool { match *self { UseSpans::ClosureUse { is_generator, .. } => !is_generator, @@ -2281,7 +2467,7 @@ impl UseSpans { } } - /// Return `false` if this place is not used in a generator. + /// Returns `false` if this place is not used in a generator. fn for_generator(&self) -> bool { match *self { UseSpans::ClosureUse { is_generator, .. } => is_generator, @@ -2366,7 +2552,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { .get(location.statement_index) { Some(&Statement { - kind: StatementKind::Assign(Place::Local(local), _), + kind: StatementKind::Assign(Place::Base(PlaceBase::Local(local)), _), .. }) => local, _ => return OtherUse(use_span), @@ -2392,7 +2578,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { def_id, is_generator, places ); if let Some((args_span, var_span)) = self.closure_span( - *def_id, &Place::Local(target), places + *def_id, &Place::Base(PlaceBase::Local(target)), places ) { return ClosureUse { is_generator, @@ -2423,14 +2609,14 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { "closure_span: def_id={:?} target_place={:?} places={:?}", def_id, target_place, places ); - let node_id = self.infcx.tcx.hir().as_local_node_id(def_id)?; - let expr = &self.infcx.tcx.hir().expect_expr(node_id).node; - debug!("closure_span: node_id={:?} expr={:?}", node_id, expr); + let hir_id = self.infcx.tcx.hir().as_local_hir_id(def_id)?; + let expr = &self.infcx.tcx.hir().expect_expr_by_hir_id(hir_id).node; + debug!("closure_span: hir_id={:?} expr={:?}", hir_id, expr); if let hir::ExprKind::Closure( .., args_span, _ ) = expr { let var_span = self.infcx.tcx.with_freevars( - node_id, + hir_id, |freevars| { for (v, place) in freevars.iter().zip(places) { match place { @@ -2455,7 +2641,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { /// Helper to retrieve span(s) of given borrow from the current MIR /// representation - pub(super) fn retrieve_borrow_spans(&self, borrow: &BorrowData) -> UseSpans { + pub(super) fn retrieve_borrow_spans(&self, borrow: &BorrowData<'_>) -> UseSpans { let span = self.mir.source_info(borrow.reserve_location).span; self.borrow_spans(span, borrow.reserve_location) } diff --git a/src/librustc_mir/borrow_check/flows.rs b/src/librustc_mir/borrow_check/flows.rs index 16bb1ef78dc6f..8de39f0efc1a5 100644 --- a/src/librustc_mir/borrow_check/flows.rs +++ b/src/librustc_mir/borrow_check/flows.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Manages the dataflow bits required for borrowck. //! //! FIXME: this might be better as a "generic" fixed-point combinator, @@ -17,25 +7,25 @@ use rustc::mir::{BasicBlock, Location}; use rustc::ty::RegionVid; use rustc_data_structures::bit_set::BitIter; -use borrow_check::location::LocationIndex; +use crate::borrow_check::location::LocationIndex; use polonius_engine::Output; -use dataflow::move_paths::indexes::BorrowIndex; -use dataflow::move_paths::HasMoveData; -use dataflow::Borrows; -use dataflow::EverInitializedPlaces; -use dataflow::{FlowAtLocation, FlowsAtLocation}; -use dataflow::MaybeUninitializedPlaces; +use crate::dataflow::move_paths::indexes::BorrowIndex; +use crate::dataflow::move_paths::HasMoveData; +use crate::dataflow::Borrows; +use crate::dataflow::EverInitializedPlaces; +use crate::dataflow::{FlowAtLocation, FlowsAtLocation}; +use crate::dataflow::MaybeUninitializedPlaces; use either::Either; use std::fmt; use std::rc::Rc; // (forced to be `pub` due to its use as an associated type below.) crate struct Flows<'b, 'gcx: 'tcx, 'tcx: 'b> { - borrows: FlowAtLocation>, - pub uninits: FlowAtLocation>, - pub ever_inits: FlowAtLocation>, + borrows: FlowAtLocation<'tcx, Borrows<'b, 'gcx, 'tcx>>, + pub uninits: FlowAtLocation<'tcx, MaybeUninitializedPlaces<'b, 'gcx, 'tcx>>, + pub ever_inits: FlowAtLocation<'tcx, EverInitializedPlaces<'b, 'gcx, 'tcx>>, /// Polonius Output pub polonius_output: Option>>, @@ -43,9 +33,9 @@ crate struct Flows<'b, 'gcx: 'tcx, 'tcx: 'b> { impl<'b, 'gcx, 'tcx> Flows<'b, 'gcx, 'tcx> { crate fn new( - borrows: FlowAtLocation>, - uninits: FlowAtLocation>, - ever_inits: FlowAtLocation>, + borrows: FlowAtLocation<'tcx, Borrows<'b, 'gcx, 'tcx>>, + uninits: FlowAtLocation<'tcx, MaybeUninitializedPlaces<'b, 'gcx, 'tcx>>, + ever_inits: FlowAtLocation<'tcx, EverInitializedPlaces<'b, 'gcx, 'tcx>>, polonius_output: Option>>, ) -> Self { Flows { @@ -67,7 +57,7 @@ impl<'b, 'gcx, 'tcx> Flows<'b, 'gcx, 'tcx> { } } - crate fn with_outgoing_borrows(&self, op: impl FnOnce(BitIter)) { + crate fn with_outgoing_borrows(&self, op: impl FnOnce(BitIter<'_, BorrowIndex>)) { self.borrows.with_iter_outgoing(op) } } @@ -103,7 +93,7 @@ impl<'b, 'gcx, 'tcx> FlowsAtLocation for Flows<'b, 'gcx, 'tcx> { } impl<'b, 'gcx, 'tcx> fmt::Display for Flows<'b, 'gcx, 'tcx> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let mut s = String::new(); s.push_str("borrows in effect: ["); diff --git a/src/librustc_mir/borrow_check/location.rs b/src/librustc_mir/borrow_check/location.rs index b3e159dd84457..20a477576c95c 100644 --- a/src/librustc_mir/borrow_check/location.rs +++ b/src/librustc_mir/borrow_check/location.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::mir::{BasicBlock, Location, Mir}; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; diff --git a/src/librustc_mir/borrow_check/mod.rs b/src/librustc_mir/borrow_check/mod.rs index 5eca62938f7a8..275d958a3ed31 100644 --- a/src/librustc_mir/borrow_check/mod.rs +++ b/src/librustc_mir/borrow_check/mod.rs @@ -1,24 +1,17 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This query borrow-checks the MIR to (further) ensure it is not broken. -use borrow_check::nll::region_infer::RegionInferenceContext; +use crate::borrow_check::nll::region_infer::RegionInferenceContext; use rustc::hir; use rustc::hir::Node; use rustc::hir::def_id::DefId; use rustc::infer::InferCtxt; use rustc::lint::builtin::UNUSED_MUT; +use rustc::lint::builtin::{MUTABLE_BORROW_RESERVATION_CONFLICT}; use rustc::middle::borrowck::SignalledError; use rustc::mir::{AggregateKind, BasicBlock, BorrowCheckResult, BorrowKind}; -use rustc::mir::{ClearCrossCrate, Local, Location, Mir, Mutability, Operand, Place}; +use rustc::mir::{ + ClearCrossCrate, Local, Location, Mir, Mutability, Operand, Place, PlaceBase, Static, StaticKind +}; use rustc::mir::{Field, Projection, ProjectionElem, Rvalue, Statement, StatementKind}; use rustc::mir::{Terminator, TerminatorKind}; use rustc::ty::query::Providers; @@ -26,25 +19,26 @@ use rustc::ty::{self, TyCtxt}; use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, Level}; use rustc_data_structures::bit_set::BitSet; -use rustc_data_structures::fx::FxHashSet; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::graph::dominators::Dominators; use smallvec::SmallVec; -use std::rc::Rc; use std::collections::BTreeMap; +use std::mem; +use std::rc::Rc; -use syntax_pos::Span; +use syntax_pos::{Span, DUMMY_SP}; -use dataflow::indexes::{BorrowIndex, InitIndex, MoveOutIndex, MovePathIndex}; -use dataflow::move_paths::{HasMoveData, LookupResult, MoveData, MoveError}; -use dataflow::Borrows; -use dataflow::DataflowResultsConsumer; -use dataflow::FlowAtLocation; -use dataflow::MoveDataParamEnv; -use dataflow::{do_dataflow, DebugFormatted}; -use dataflow::EverInitializedPlaces; -use dataflow::{MaybeInitializedPlaces, MaybeUninitializedPlaces}; -use util::borrowck_errors::{BorrowckErrors, Origin}; +use crate::dataflow::indexes::{BorrowIndex, InitIndex, MoveOutIndex, MovePathIndex}; +use crate::dataflow::move_paths::{HasMoveData, LookupResult, MoveData, MoveError}; +use crate::dataflow::Borrows; +use crate::dataflow::DataflowResultsConsumer; +use crate::dataflow::FlowAtLocation; +use crate::dataflow::MoveDataParamEnv; +use crate::dataflow::{do_dataflow, DebugFormatted}; +use crate::dataflow::EverInitializedPlaces; +use crate::dataflow::{MaybeInitializedPlaces, MaybeUninitializedPlaces}; +use crate::util::borrowck_errors::{BorrowckErrors, Origin}; use self::borrow_set::{BorrowData, BorrowSet}; use self::flows::Flows; @@ -63,13 +57,13 @@ mod move_errors; mod mutability_errors; mod path_utils; crate mod place_ext; -mod places_conflict; +crate mod places_conflict; mod prefixes; mod used_muts; pub(crate) mod nll; -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { *providers = Providers { mir_borrowck, ..*providers @@ -78,15 +72,15 @@ pub fn provide(providers: &mut Providers) { fn mir_borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> BorrowCheckResult<'tcx> { let input_mir = tcx.mir_validated(def_id); - debug!("run query mir_borrowck: {}", tcx.item_path_str(def_id)); + debug!("run query mir_borrowck: {}", tcx.def_path_str(def_id)); let mut return_early; // Return early if we are not supposed to use MIR borrow checker for this function. return_early = !tcx.has_attr(def_id, "rustc_mir") && !tcx.use_mir_borrowck(); - if tcx.is_struct_constructor(def_id) { - // We are not borrow checking the automatically generated struct constructors + if tcx.is_constructor(def_id) { + // We are not borrow checking the automatically generated struct/variant constructors // because we want to accept structs such as this (taken from the `linked-hash-map` // crate): // ```rust @@ -118,7 +112,7 @@ fn mir_borrowck<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> BorrowC } let opt_closure_req = tcx.infer_ctxt().enter(|infcx| { - let input_mir: &Mir = &input_mir.borrow(); + let input_mir: &Mir<'_> = &input_mir.borrow(); do_mir_borrowck(&infcx, input_mir, def_id) }); debug!("mir_borrowck done"); @@ -138,7 +132,7 @@ fn do_mir_borrowck<'a, 'gcx, 'tcx>( let param_env = tcx.param_env(def_id); let id = tcx .hir() - .as_local_node_id(def_id) + .as_local_hir_id(def_id) .expect("do_mir_borrowck: non-local DefId"); // Replace all regions with fresh inference variables. This @@ -166,17 +160,14 @@ fn do_mir_borrowck<'a, 'gcx, 'tcx>( let mut flow_inits = FlowAtLocation::new(do_dataflow( tcx, mir, - id, + def_id, &attributes, &dead_unwinds, MaybeInitializedPlaces::new(tcx, mir, &mdpe), |bd, i| DebugFormatted::new(&bd.move_data().move_paths[i]), )); - let locals_are_invalidated_at_exit = match tcx.hir().body_owner_kind(id) { - hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_) => false, - hir::BodyOwnerKind::Fn => true, - }; + let locals_are_invalidated_at_exit = tcx.hir().body_owner_kind_by_hir_id(id).is_fn_or_closure(); let borrow_set = Rc::new(BorrowSet::build( tcx, mir, locals_are_invalidated_at_exit, &mdpe.move_data)); @@ -204,7 +195,7 @@ fn do_mir_borrowck<'a, 'gcx, 'tcx>( let flow_borrows = FlowAtLocation::new(do_dataflow( tcx, mir, - id, + def_id, &attributes, &dead_unwinds, Borrows::new(tcx, mir, regioncx.clone(), &borrow_set), @@ -213,7 +204,7 @@ fn do_mir_borrowck<'a, 'gcx, 'tcx>( let flow_uninits = FlowAtLocation::new(do_dataflow( tcx, mir, - id, + def_id, &attributes, &dead_unwinds, MaybeUninitializedPlaces::new(tcx, mir, &mdpe), @@ -222,14 +213,14 @@ fn do_mir_borrowck<'a, 'gcx, 'tcx>( let flow_ever_inits = FlowAtLocation::new(do_dataflow( tcx, mir, - id, + def_id, &attributes, &dead_unwinds, EverInitializedPlaces::new(tcx, mir, &mdpe), |bd, i| DebugFormatted::new(&bd.move_data().inits[i]), )); - let movable_generator = match tcx.hir().get(id) { + let movable_generator = match tcx.hir().get_by_hir_id(id) { Node::Expr(&hir::Expr { node: hir::ExprKind::Closure(.., Some(hir::GeneratorMovability::Static)), .. @@ -249,6 +240,7 @@ fn do_mir_borrowck<'a, 'gcx, 'tcx>( locals_are_invalidated_at_exit, access_place_error_reported: Default::default(), reservation_error_reported: Default::default(), + reservation_warnings: Default::default(), move_error_reported: BTreeMap::new(), uninitialized_error_reported: Default::default(), errors_buffer, @@ -271,6 +263,29 @@ fn do_mir_borrowck<'a, 'gcx, 'tcx>( } mbcx.analyze_results(&mut state); // entry point for DataflowResultsConsumer + // Convert any reservation warnings into lints. + let reservation_warnings = mem::replace(&mut mbcx.reservation_warnings, Default::default()); + for (_, (place, span, context, bk, borrow)) in reservation_warnings { + let mut initial_diag = mbcx.report_conflicting_borrow(context, (&place, span), bk, &borrow); + + let lint_root = if let ClearCrossCrate::Set(ref vsi) = mbcx.mir.source_scope_local_data { + let scope = mbcx.mir.source_info(context.loc).scope; + vsi[scope].lint_root + } else { + id + }; + + // Span and message don't matter; we overwrite them below anyway + let mut diag = mbcx.infcx.tcx.struct_span_lint_hir( + MUTABLE_BORROW_RESERVATION_CONFLICT, lint_root, DUMMY_SP, ""); + + diag.message = initial_diag.styled_message().clone(); + diag.span = initial_diag.span.clone(); + + initial_diag.cancel(); + diag.buffer(&mut mbcx.errors_buffer); + } + // For each non-user used mutable variable, check if it's been assigned from // a user-declared local. If so, then put that local into the used_mut set. // Note that this set is expected to be small - only upvars from closures @@ -314,13 +329,13 @@ fn do_mir_borrowck<'a, 'gcx, 'tcx>( } let mut_span = tcx.sess.source_map().span_until_non_whitespace(span); - tcx.struct_span_lint_node( + tcx.struct_span_lint_hir( UNUSED_MUT, vsi[local_decl.source_info.scope].lint_root, span, "variable does not need to be mutable", ) - .span_suggestion_short_with_applicability( + .span_suggestion_short( mut_span, "remove this `mut`", String::new(), @@ -342,42 +357,19 @@ fn do_mir_borrowck<'a, 'gcx, 'tcx>( // When borrowck=migrate, check if AST-borrowck would // error on the given code. - // rust-lang/rust#55492: loop over parents to ensure that - // errors that AST-borrowck only detects in some parent of - // a closure still allows NLL to signal an error. - let mut curr_def_id = def_id; - let signalled_any_error = loop { - match tcx.borrowck(curr_def_id).signalled_any_error { - SignalledError::NoErrorsSeen => { - // keep traversing (and borrow-checking) parents - } - SignalledError::SawSomeError => { - // stop search here - break SignalledError::SawSomeError; - } - } - - if tcx.is_closure(curr_def_id) { - curr_def_id = tcx.parent_def_id(curr_def_id) - .expect("a closure must have a parent_def_id"); - } else { - break SignalledError::NoErrorsSeen; - } - }; + // rust-lang/rust#55492, rust-lang/rust#58776 check the base def id + // for errors. AST borrowck is responsible for aggregating + // `signalled_any_error` from all of the nested closures here. + let base_def_id = tcx.closure_base_def_id(def_id); - match signalled_any_error { + match tcx.borrowck(base_def_id).signalled_any_error { SignalledError::NoErrorsSeen => { // if AST-borrowck signalled no errors, then // downgrade all the buffered MIR-borrowck errors // to warnings. - for err in &mut mbcx.errors_buffer { - if err.is_error() { - err.level = Level::Warning; - err.warn("This error has been downgraded to a warning \ - for backwards compatibility with previous releases.\n\ - It represents potential unsoundness in your code.\n\ - This warning will become a hard error in the future."); - } + + for err in mbcx.errors_buffer.iter_mut() { + downgrade_if_error(err); } } SignalledError::SawSomeError => { @@ -403,6 +395,20 @@ fn do_mir_borrowck<'a, 'gcx, 'tcx>( result } +fn downgrade_if_error(diag: &mut Diagnostic) { + if diag.is_error() { + diag.level = Level::Warning; + diag.warn( + "this error has been downgraded to a warning for backwards \ + compatibility with previous releases", + ); + diag.warn( + "this represents potential undefined behavior in your code and \ + this warning will become a hard error in the future", + ); + } +} + pub struct MirBorrowckCtxt<'cx, 'gcx: 'tcx, 'tcx: 'cx> { infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, mir: &'cx Mir<'tcx>, @@ -429,12 +435,19 @@ pub struct MirBorrowckCtxt<'cx, 'gcx: 'tcx, 'tcx: 'cx> { access_place_error_reported: FxHashSet<(Place<'tcx>, Span)>, /// This field keeps track of when borrow conflict errors are reported /// for reservations, so that we don't report seemingly duplicate - /// errors for corresponding activations - /// - /// FIXME: Ideally this would be a set of BorrowIndex, not Places, - /// but it is currently inconvenient to track down the BorrowIndex - /// at the time we detect and report a reservation error. + /// errors for corresponding activations. + // + // FIXME: ideally this would be a set of `BorrowIndex`, not `Place`s, + // but it is currently inconvenient to track down the `BorrowIndex` + // at the time we detect and report a reservation error. reservation_error_reported: FxHashSet>, + /// Migration warnings to be reported for #56254. We delay reporting these + /// so that we can suppress the warning if there's a corresponding error + /// for the activation of the borrow. + reservation_warnings: FxHashMap< + BorrowIndex, + (Place<'tcx>, Span, Context, BorrowKind, BorrowData<'tcx>) + >, /// This field keeps track of move errors that are to be reported for given move indicies. /// /// There are situations where many errors can be reported for a single move out (see #53807) @@ -461,7 +474,7 @@ pub struct MirBorrowckCtxt<'cx, 'gcx: 'tcx, 'tcx: 'cx> { /// If the function we're checking is a closure, then we'll need to report back the list of /// mutable upvars that have been used. This field keeps track of them. used_mut_upvars: SmallVec<[Field; 8]>, - /// Non-lexical region inference context, if NLL is enabled. This + /// Non-lexical region inference context, if NLL is enabled. This /// contains the results from region inference and lets us e.g. /// find out which CFG points are contained in each borrow region. nonlexical_regioncx: Rc>, @@ -550,16 +563,12 @@ impl<'cx, 'gcx, 'tcx> DataflowResultsConsumer<'cx, 'tcx> for MirBorrowckCtxt<'cx flow_state, ); } - StatementKind::InlineAsm { - ref asm, - ref outputs, - ref inputs, - } => { + StatementKind::InlineAsm(ref asm) => { let context = ContextKind::InlineAsm.new(location); - for (o, output) in asm.outputs.iter().zip(outputs.iter()) { + for (o, output) in asm.asm.outputs.iter().zip(asm.outputs.iter()) { if o.is_indirect { // FIXME(eddyb) indirect inline asm outputs should - // be encoeded through MIR place derefs instead. + // be encoded through MIR place derefs instead. self.access_place( context, (output, o.span), @@ -583,14 +592,13 @@ impl<'cx, 'gcx, 'tcx> DataflowResultsConsumer<'cx, 'tcx> for MirBorrowckCtxt<'cx ); } } - for (_, input) in inputs.iter() { + for (_, input) in asm.inputs.iter() { self.consume_operand(context, (input, span), flow_state); } } StatementKind::Nop | StatementKind::AscribeUserType(..) | StatementKind::Retag { .. } - | StatementKind::EscapeToRaw { .. } | StatementKind::StorageLive(..) => { // `Nop`, `AscribeUserType`, `Retag`, and `StorageLive` are irrelevant // to borrow check. @@ -598,7 +606,7 @@ impl<'cx, 'gcx, 'tcx> DataflowResultsConsumer<'cx, 'tcx> for MirBorrowckCtxt<'cx StatementKind::StorageDead(local) => { self.access_place( ContextKind::StorageDead.new(location), - (&Place::Local(local), span), + (&Place::Base(PlaceBase::Local(local)), span), (Shallow(None), Write(WriteKind::StorageDeadOrDrop)), LocalMutationIsAllowed::Yes, flow_state, @@ -642,8 +650,7 @@ impl<'cx, 'gcx, 'tcx> DataflowResultsConsumer<'cx, 'tcx> for MirBorrowckCtxt<'cx let drop_place_ty = drop_place.ty(self.mir, self.infcx.tcx); // Erase the regions. - let drop_place_ty = self.infcx.tcx.erase_regions(&drop_place_ty) - .to_ty(self.infcx.tcx); + let drop_place_ty = self.infcx.tcx.erase_regions(&drop_place_ty).ty; // "Lift" into the gcx -- once regions are erased, this type should be in the // global arenas; this "lift" operation basically just asserts that is true, but @@ -714,7 +721,7 @@ impl<'cx, 'gcx, 'tcx> DataflowResultsConsumer<'cx, 'tcx> for MirBorrowckCtxt<'cx cleanup: _, } => { self.consume_operand(ContextKind::Assert.new(loc), (cond, span), flow_state); - use rustc::mir::interpret::EvalErrorKind::BoundsCheck; + use rustc::mir::interpret::InterpError::BoundsCheck; if let BoundsCheck { ref len, ref index } = *msg { self.consume_operand(ContextKind::Assert.new(loc), (len, span), flow_state); self.consume_operand(ContextKind::Assert.new(loc), (index, span), flow_state); @@ -845,12 +852,12 @@ enum WriteKind { /// When checking permissions for a place access, this flag is used to indicate that an immutable /// local place can be mutated. -/// -/// FIXME: @nikomatsakis suggested that this flag could be removed with the following modifications: -/// - Merge `check_access_permissions()` and `check_if_reassignment_to_immutable_state()` -/// - Split `is_mutable()` into `is_assignable()` (can be directly assigned) and -/// `is_declared_mutable()` -/// - Take flow state into consideration in `is_assignable()` for local variables +// +// FIXME: @nikomatsakis suggested that this flag could be removed with the following modifications: +// - Merge `check_access_permissions()` and `check_if_reassignment_to_immutable_state()`. +// - Split `is_mutable()` into `is_assignable()` (can be directly assigned) and +// `is_declared_mutable()`. +// - Take flow state into consideration in `is_assignable()` for local variables. #[derive(Copy, Clone, PartialEq, Eq, Debug)] enum LocalMutationIsAllowed { Yes, @@ -905,7 +912,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { /// place is initialized and (b) it is not borrowed in some way that would prevent this /// access. /// - /// Returns true if an error is reported, false otherwise. + /// Returns `true` if an error is reported. fn access_place( &mut self, context: Context, @@ -952,11 +959,18 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { let conflict_error = self.check_access_for_conflict(context, place_span, sd, rw, flow_state); + if let (Activation(_, borrow_idx), true) = (kind.1, conflict_error) { + // Suppress this warning when there's an error being emited for the + // same borrow: fixing the error is likely to fix the warning. + self.reservation_warnings.remove(&borrow_idx); + } + if conflict_error || mutability_error { debug!( "access_place: logging error place_span=`{:?}` kind=`{:?}`", place_span, kind ); + self.access_place_error_reported .insert((place_span.0.clone(), place_span.1)); } @@ -1007,8 +1021,10 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { Control::Continue } - (Read(_), BorrowKind::Shared) | (Reservation(..), BorrowKind::Shared) - | (Read(_), BorrowKind::Shallow) | (Reservation(..), BorrowKind::Shallow) => { + (Read(_), BorrowKind::Shared) + | (Read(_), BorrowKind::Shallow) + | (Read(ReadKind::Borrow(BorrowKind::Shallow)), BorrowKind::Unique) + | (Read(ReadKind::Borrow(BorrowKind::Shallow)), BorrowKind::Mut { .. }) => { Control::Continue } @@ -1020,7 +1036,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { (Read(kind), BorrowKind::Unique) | (Read(kind), BorrowKind::Mut { .. }) => { // Reading from mere reservations of mutable-borrows is OK. if !is_active(&this.dominators, borrow, context.loc) { - assert!(allow_two_phase_borrow(&this.infcx.tcx, borrow.kind)); + assert!(allow_two_phase_borrow(&tcx, borrow.kind)); return Control::Continue; } @@ -1028,20 +1044,45 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { match kind { ReadKind::Copy => { this.report_use_while_mutably_borrowed(context, place_span, borrow) + .buffer(&mut this.errors_buffer); } ReadKind::Borrow(bk) => { - this.report_conflicting_borrow(context, place_span, bk, &borrow) + this.report_conflicting_borrow(context, place_span, bk, borrow) + .buffer(&mut this.errors_buffer); } } Control::Break } - (Reservation(kind), BorrowKind::Unique) - | (Reservation(kind), BorrowKind::Mut { .. }) + (Reservation(WriteKind::MutableBorrow(bk)), BorrowKind::Shallow) + | (Reservation(WriteKind::MutableBorrow(bk)), BorrowKind::Shared) if { + tcx.migrate_borrowck() + } => { + let bi = this.borrow_set.location_map[&context.loc]; + debug!( + "recording invalid reservation of place: {:?} with \ + borrow index {:?} as warning", + place_span.0, + bi, + ); + // rust-lang/rust#56254 - This was previously permitted on + // the 2018 edition so we emit it as a warning. We buffer + // these sepately so that we only emit a warning if borrow + // checking was otherwise successful. + this.reservation_warnings.insert( + bi, + (place_span.0.clone(), place_span.1, context, bk, borrow.clone()), + ); + + // Don't suppress actual errors. + Control::Continue + } + + (Reservation(kind), _) | (Activation(kind, _), _) | (Write(kind), _) => { match rw { - Reservation(_) => { + Reservation(..) => { debug!( "recording invalid reservation of \ place: {:?}", @@ -1062,7 +1103,8 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { error_reported = true; match kind { WriteKind::MutableBorrow(bk) => { - this.report_conflicting_borrow(context, place_span, bk, &borrow) + this.report_conflicting_borrow(context, place_span, bk, borrow) + .buffer(&mut this.errors_buffer); } WriteKind::StorageDeadOrDrop => { this.report_borrowed_value_does_not_live_long_enough( @@ -1075,7 +1117,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { this.report_illegal_mutation_of_borrowed(context, place_span, borrow) } WriteKind::Move => { - this.report_move_out_while_borrowed(context, place_span, &borrow) + this.report_move_out_while_borrowed(context, place_span, borrow) } } Control::Break @@ -1112,7 +1154,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { // Special case: you can assign a immutable local variable // (e.g., `x = ...`) so long as it has never been initialized // before (at this point in the flow). - if let &Place::Local(local) = place_span.0 { + if let &Place::Base(PlaceBase::Local(local)) = place_span.0 { if let Mutability::Not = self.mir.local_decls[local].mutability { // check for reassignments to immutable local variables self.check_if_reassignment_to_immutable_state( @@ -1239,8 +1281,8 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { // captures of a closure are copied/moved directly // when generating MIR. match operands[field.index()] { - Operand::Move(Place::Local(local)) - | Operand::Copy(Place::Local(local)) => { + Operand::Move(Place::Base(PlaceBase::Local(local))) + | Operand::Copy(Place::Base(PlaceBase::Local(local))) => { self.used_mut.insert(local); } Operand::Move(ref place @ Place::Projection(_)) @@ -1250,10 +1292,8 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { self.used_mut_upvars.push(field); } } - Operand::Move(Place::Static(..)) - | Operand::Copy(Place::Static(..)) - | Operand::Move(Place::Promoted(..)) - | Operand::Copy(Place::Promoted(..)) + Operand::Move(Place::Base(PlaceBase::Static(..))) + | Operand::Copy(Place::Base(PlaceBase::Static(..))) | Operand::Constant(..) => {} } } @@ -1336,14 +1376,15 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { // // FIXME: allow thread-locals to borrow other thread locals? let (might_be_alive, will_be_dropped) = match root_place { - Place::Promoted(_) => (true, false), - Place::Static(_) => { + Place::Base(PlaceBase::Static(box Static{ kind: StaticKind::Promoted(_), .. })) => { + (true, false) + } + Place::Base(PlaceBase::Static(box Static{ kind: StaticKind::Static(_), .. })) => { // Thread-locals might be dropped after the function exits, but // "true" statics will never be. - let is_thread_local = self.is_place_thread_local(&root_place); - (true, is_thread_local) + (true, self.is_place_thread_local(&root_place)) } - Place::Local(_) => { + Place::Base(PlaceBase::Local(_)) => { // Locals are always dropped at function exit, and if they // have a destructor it would've been called already. (false, self.locals_are_invalidated_at_exit) @@ -1369,7 +1410,8 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { place, borrow.kind, root_place, - sd + sd, + places_conflict::PlaceConflictBias::Overlap, ) { debug!("check_for_invalidation_at_exit({:?}): INVALID", place); // FIXME: should be talking about the region lifetime instead @@ -1601,10 +1643,9 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { last_prefix = prefix; } match *last_prefix { - Place::Local(_) => panic!("should have move path for every Local"), + Place::Base(PlaceBase::Local(_)) => panic!("should have move path for every Local"), Place::Projection(_) => panic!("PrefixSet::All meant don't stop for Projection"), - Place::Promoted(_) | - Place::Static(_) => Err(NoMovePathFound::ReachedStatic), + Place::Base(PlaceBase::Static(_)) => Err(NoMovePathFound::ReachedStatic), } } @@ -1630,8 +1671,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { let mut place = place; loop { match *place { - Place::Promoted(_) | - Place::Local(_) | Place::Static(_) => { + Place::Base(PlaceBase::Local(_)) | Place::Base(PlaceBase::Static(_)) => { // assigning to `x` does not require `x` be initialized. break; } @@ -1667,7 +1707,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { // assigning to `P.f` requires `P` itself // be already initialized let tcx = self.infcx.tcx; - match base.ty(self.mir, tcx).to_ty(tcx).sty { + match base.ty(self.mir, tcx).ty.sty { ty::Adt(def, _) if def.has_dtor(tcx) => { self.check_if_path_or_subpath_is_moved( context, InitializationRequiringAction::Assignment, @@ -1772,7 +1812,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { // no move out from an earlier location) then this is an attempt at initialization // of the union - we should error in that case. let tcx = this.infcx.tcx; - if let ty::TyKind::Adt(def, _) = base.ty(this.mir, tcx).to_ty(tcx).sty { + if let ty::Adt(def, _) = base.ty(this.mir, tcx).ty.sty { if def.is_union() { if this.move_data.path_map[mpi].iter().any(|moi| { this.move_data.moves[*moi].source.is_predecessor_of( @@ -1794,9 +1834,9 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { } } - /// Check the permissions for the given place and read or write kind + /// Checks the permissions for the given place and read or write kind /// - /// Returns true if an error is reported, false otherwise. + /// Returns `true` if an error is reported. fn check_access_permissions( &mut self, (place, span): (&Place<'tcx>, Span), @@ -1954,7 +1994,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { ) { match root_place { RootPlace { - place: Place::Local(local), + place: Place::Base(PlaceBase::Local(local)), is_local_mutation_allowed, } => { // If the local may have been initialized, and it is now currently being @@ -1979,11 +2019,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { } } RootPlace { - place: Place::Promoted(..), - is_local_mutation_allowed: _, - } => {} - RootPlace { - place: Place::Static(..), + place: Place::Base(PlaceBase::Static(..)), is_local_mutation_allowed: _, } => {} } @@ -1997,7 +2033,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { is_local_mutation_allowed: LocalMutationIsAllowed, ) -> Result, &'d Place<'tcx>> { match *place { - Place::Local(local) => { + Place::Base(PlaceBase::Local(local)) => { let local = &self.mir.local_decls[local]; match local.mutability { Mutability::Not => match is_local_mutation_allowed { @@ -2019,12 +2055,13 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { } // The rules for promotion are made by `qualify_consts`, there wouldn't even be a // `Place::Promoted` if the promotion weren't 100% legal. So we just forward this - Place::Promoted(_) => Ok(RootPlace { - place, - is_local_mutation_allowed, - }), - Place::Static(ref static_) => { - if self.infcx.tcx.is_static(static_.def_id) != Some(hir::Mutability::MutMutable) { + Place::Base(PlaceBase::Static(box Static{kind: StaticKind::Promoted(_), ..})) => + Ok(RootPlace { + place, + is_local_mutation_allowed, + }), + Place::Base(PlaceBase::Static(box Static{ kind: StaticKind::Static(def_id), .. })) => { + if self.infcx.tcx.is_static(def_id) != Some(hir::Mutability::MutMutable) { Err(place) } else { Ok(RootPlace { @@ -2036,7 +2073,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { Place::Projection(ref proj) => { match proj.elem { ProjectionElem::Deref => { - let base_ty = proj.base.ty(self.mir, self.infcx.tcx).to_ty(self.infcx.tcx); + let base_ty = proj.base.ty(self.mir, self.infcx.tcx).ty; // Check the kind of deref to decide match base_ty.sty { diff --git a/src/librustc_mir/borrow_check/move_errors.rs b/src/librustc_mir/borrow_check/move_errors.rs index fb93c41ce4f76..7efe1d83c2e5f 100644 --- a/src/librustc_mir/borrow_check/move_errors.rs +++ b/src/librustc_mir/borrow_check/move_errors.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::unicode::property::Pattern_White_Space; use std::fmt::{self, Display}; @@ -16,13 +6,13 @@ use rustc::ty; use rustc_errors::{DiagnosticBuilder,Applicability}; use syntax_pos::Span; -use borrow_check::MirBorrowckCtxt; -use borrow_check::prefixes::PrefixSet; -use dataflow::move_paths::{ +use crate::borrow_check::MirBorrowckCtxt; +use crate::borrow_check::prefixes::PrefixSet; +use crate::dataflow::move_paths::{ IllegalMoveOrigin, IllegalMoveOriginKind, InitLocation, LookupResult, MoveError, MovePathIndex, }; -use util::borrowck_errors::{BorrowckErrors, Origin}; +use crate::util::borrowck_errors::{BorrowckErrors, Origin}; // Often when desugaring a pattern match we may have many individual moves in // MIR that are all part of one operation from the user's point-of-view. For @@ -73,7 +63,7 @@ enum BorrowedContentSource { } impl Display for BorrowedContentSource { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { BorrowedContentSource::Arc => write!(f, "an `Arc`"), BorrowedContentSource::Rc => write!(f, "an `Rc`"), @@ -121,7 +111,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { // If that ever stops being the case, then the ever initialized // flow could be used. if let Some(StatementKind::Assign( - Place::Local(local), + Place::Base(PlaceBase::Local(local)), box Rvalue::Use(Operand::Move(move_from)), )) = self.mir.basic_blocks()[location.block] .statements @@ -250,7 +240,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { fn report(&mut self, error: GroupedMoveError<'tcx>) { let (mut err, err_span) = { - let (span, original_path, kind): (Span, &Place<'tcx>, &IllegalMoveOriginKind) = + let (span, original_path, kind): (Span, &Place<'tcx>, &IllegalMoveOriginKind<'_>) = match error { GroupedMoveError::MovesFromPlace { span, @@ -276,7 +266,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { // Inspect the type of the content behind the // borrow to provide feedback about why this // was a move rather than a copy. - let ty = place.ty(self.mir, self.infcx.tcx).to_ty(self.infcx.tcx); + let ty = place.ty(self.mir, self.infcx.tcx).ty; let is_upvar_field_projection = self.prefixes(&original_path, PrefixSet::All) .any(|p| p.is_upvar_field_projection(self.mir, &self.infcx.tcx) @@ -318,9 +308,8 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { let upvar_decl = &self.mir.upvar_decls[field.index()]; let upvar_hir_id = upvar_decl.var_hir_id.assert_crate_local(); - let upvar_node_id = - self.infcx.tcx.hir().hir_to_node_id(upvar_hir_id); - let upvar_span = self.infcx.tcx.hir().span(upvar_node_id); + let upvar_span = self.infcx.tcx.hir().span_by_hir_id( + upvar_hir_id); diag.span_label(upvar_span, "captured outer variable"); break; } @@ -378,14 +367,14 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { // expressions `a[b]`, which roughly desugar to // `*Index::index(&a, b)` or // `*IndexMut::index_mut(&mut a, b)`. - err.span_suggestion_with_applicability( + err.span_suggestion( span, "consider removing the `*`", snippet[1..].to_owned(), Applicability::Unspecified, ); } else { - err.span_suggestion_with_applicability( + err.span_suggestion( span, "consider borrowing here", format!("&{}", snippet), @@ -449,7 +438,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { suggestions.sort_unstable_by_key(|&(span, _, _)| span); suggestions.dedup_by_key(|&mut (span, _, _)| span); for (span, to_remove, suggestion) in suggestions { - err.span_suggestion_with_applicability( + err.span_suggestion( span, &format!("consider removing the `{}`", to_remove), suggestion, @@ -541,9 +530,9 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { // We're only interested in assignments (in particular, where the // assignment came from - was it an `Rc` or `Arc`?). if let StatementKind::Assign(_, box Rvalue::Ref(_, _, source)) = &stmt.kind { - let ty = source.ty(self.mir, self.infcx.tcx).to_ty(self.infcx.tcx); + let ty = source.ty(self.mir, self.infcx.tcx).ty; let ty = match ty.sty { - ty::TyKind::Ref(_, ty, _) => ty, + ty::Ref(_, ty, _) => ty, _ => ty, }; debug!("borrowed_content_source: ty={:?}", ty); @@ -566,9 +555,9 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { _ => continue, }; - let ty = source.ty(self.mir, self.infcx.tcx).to_ty(self.infcx.tcx); + let ty = source.ty(self.mir, self.infcx.tcx).ty; let ty = match ty.sty { - ty::TyKind::Ref(_, ty, _) => ty, + ty::Ref(_, ty, _) => ty, _ => ty, }; debug!("borrowed_content_source: ty={:?}", ty); @@ -592,7 +581,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { base, elem: ProjectionElem::Deref, }) = place { - if base.ty(self.mir, self.infcx.tcx).to_ty(self.infcx.tcx).is_unsafe_ptr() { + if base.ty(self.mir, self.infcx.tcx).ty.is_unsafe_ptr() { return BorrowedContentSource::DerefRawPointer; } } diff --git a/src/librustc_mir/borrow_check/mutability_errors.rs b/src/librustc_mir/borrow_check/mutability_errors.rs index ab819aafc47c7..b780511315d81 100644 --- a/src/librustc_mir/borrow_check/mutability_errors.rs +++ b/src/librustc_mir/borrow_check/mutability_errors.rs @@ -1,28 +1,20 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir; use rustc::hir::Node; use rustc::mir::{self, BindingForm, Constant, ClearCrossCrate, Local, Location, Mir}; -use rustc::mir::{Mutability, Operand, Place, Projection, ProjectionElem, Static, Terminator}; -use rustc::mir::TerminatorKind; -use rustc::ty::{self, Const, DefIdTree, TyS, TyKind, TyCtxt}; +use rustc::mir::{ + Mutability, Operand, Place, PlaceBase, Projection, ProjectionElem, Static, StaticKind, +}; +use rustc::mir::{Terminator, TerminatorKind}; +use rustc::ty::{self, Const, DefIdTree, TyS, TyCtxt}; use rustc_data_structures::indexed_vec::Idx; use syntax_pos::Span; use syntax_pos::symbol::keywords; -use dataflow::move_paths::InitLocation; -use borrow_check::MirBorrowckCtxt; -use util::borrowck_errors::{BorrowckErrors, Origin}; -use util::collect_writes::FindAssignments; -use util::suggest_ref_mut; +use crate::dataflow::move_paths::InitLocation; +use crate::borrow_check::MirBorrowckCtxt; +use crate::util::borrowck_errors::{BorrowckErrors, Origin}; +use crate::util::collect_writes::FindAssignments; +use crate::util::suggest_ref_mut; use rustc_errors::Applicability; #[derive(Copy, Clone, Debug, Eq, PartialEq)] @@ -55,9 +47,9 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { debug!("report_mutability_error: access_place_desc={:?}", access_place_desc); match the_place_err { - Place::Local(local) => { + Place::Base(PlaceBase::Local(local)) => { item_msg = format!("`{}`", access_place_desc.unwrap()); - if let Place::Local(_) = access_place { + if let Place::Base(PlaceBase::Local(_)) = access_place { reason = ", as it is not declared as mutable".to_string(); } else { let name = self.mir.local_decls[*local] @@ -72,7 +64,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { elem: ProjectionElem::Field(upvar_index, _), }) => { debug_assert!(is_closure_or_generator( - base.ty(self.mir, self.infcx.tcx).to_ty(self.infcx.tcx) + base.ty(self.mir, self.infcx.tcx).ty )); item_msg = format!("`{}`", access_place_desc.unwrap()); @@ -88,11 +80,12 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { base, elem: ProjectionElem::Deref, }) => { - if *base == Place::Local(Local::new(1)) && !self.mir.upvar_decls.is_empty() { + if *base == Place::Base(PlaceBase::Local(Local::new(1))) && + !self.mir.upvar_decls.is_empty() { item_msg = format!("`{}`", access_place_desc.unwrap()); debug_assert!(self.mir.local_decls[Local::new(1)].ty.is_region_ptr()); debug_assert!(is_closure_or_generator( - the_place_err.ty(self.mir, self.infcx.tcx).to_ty(self.infcx.tcx) + the_place_err.ty(self.mir, self.infcx.tcx).ty )); reason = if access_place.is_upvar_field_projection(self.mir, @@ -102,7 +95,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { ", as `Fn` closures cannot mutate their captured variables".to_string() } } else if { - if let Place::Local(local) = *base { + if let Place::Base(PlaceBase::Local(local)) = *base { if let Some(ClearCrossCrate::Set(BindingForm::RefForGuard)) = self.mir.local_decls[local].is_user_variable { true @@ -117,7 +110,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { reason = ", as it is immutable for the pattern guard".to_string(); } else { let pointer_type = - if base.ty(self.mir, self.infcx.tcx).to_ty(self.infcx.tcx).is_region_ptr() { + if base.ty(self.mir, self.infcx.tcx).ty.is_region_ptr() { "`&` reference" } else { "`*const` pointer" @@ -138,10 +131,11 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { } } - Place::Promoted(_) => unreachable!(), + Place::Base(PlaceBase::Static(box Static { kind: StaticKind::Promoted(_), .. })) => + unreachable!(), - Place::Static(box Static { def_id, ty: _ }) => { - if let Place::Static(_) = access_place { + Place::Base(PlaceBase::Static(box Static { kind: StaticKind::Static(def_id), .. })) => { + if let Place::Base(PlaceBase::Static(_)) = access_place { item_msg = format!("immutable static item `{}`", access_place_desc.unwrap()); reason = String::new(); } else { @@ -238,10 +232,10 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { if let Some((span, message)) = annotate_struct_field( self.infcx.tcx, - base.ty(self.mir, self.infcx.tcx).to_ty(self.infcx.tcx), + base.ty(self.mir, self.infcx.tcx).ty, field, ) { - err.span_suggestion_with_applicability( + err.span_suggestion( span, "consider changing this to be mutable", message, @@ -251,7 +245,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { }, // Suggest removing a `&mut` from the use of a mutable reference. - Place::Local(local) + Place::Base(PlaceBase::Local(local)) if { self.mir.local_decls.get(*local).map(|local_decl| { if let ClearCrossCrate::Set( @@ -267,7 +261,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { // Otherwise, check if the name is the self kewyord - in which case // we have an explicit self. Do the same thing in this case and check // for a `self: &mut Self` to suggest removing the `&mut`. - if let ty::TyKind::Ref( + if let ty::Ref( _, _, hir::Mutability::MutMutable ) = local_decl.ty.sty { true @@ -286,7 +280,8 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { // We want to suggest users use `let mut` for local (user // variable) mutations... - Place::Local(local) if self.mir.local_decls[*local].can_be_made_mutable() => { + Place::Base(PlaceBase::Local(local)) + if self.mir.local_decls[*local].can_be_made_mutable() => { // ... but it doesn't make sense to suggest it on // variables that are `ref x`, `ref mut x`, `&self`, // or `&mut self` (such variables are simply not @@ -295,7 +290,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { assert_eq!(local_decl.mutability, Mutability::Not); err.span_label(span, format!("cannot {ACT}", ACT = act)); - err.span_suggestion_with_applicability( + err.span_suggestion( local_decl.source_info.span, "consider changing this to be mutable", format!("mut {}", local_decl.name.unwrap()), @@ -309,7 +304,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { elem: ProjectionElem::Field(upvar_index, _), }) => { debug_assert!(is_closure_or_generator( - base.ty(self.mir, self.infcx.tcx).to_ty(self.infcx.tcx) + base.ty(self.mir, self.infcx.tcx).ty )); err.span_label(span, format!("cannot {ACT}", ACT = act)); @@ -326,7 +321,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { _, ) = pat.node { - err.span_suggestion_with_applicability( + err.span_suggestion( upvar_ident.span, "consider changing this to be mutable", format!("mut {}", upvar_ident.name), @@ -339,7 +334,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { // complete hack to approximate old AST-borrowck // diagnostic: if the span starts with a mutable borrow of // a local variable, then just suggest the user remove it. - Place::Local(_) + Place::Base(PlaceBase::Local(_)) if { if let Ok(snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(span) { snippet.starts_with("&mut ") @@ -353,7 +348,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { } Place::Projection(box Projection { - base: Place::Local(local), + base: Place::Base(PlaceBase::Local(local)), elem: ProjectionElem::Deref, }) if { if let Some(ClearCrossCrate::Set(BindingForm::RefForGuard)) = @@ -377,7 +372,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { // FIXME: can this case be generalized to work for an // arbitrary base for the projection? Place::Projection(box Projection { - base: Place::Local(local), + base: Place::Base(PlaceBase::Local(local)), elem: ProjectionElem::Deref, }) if self.mir.local_decls[*local].is_user_variable.is_some() => { @@ -420,7 +415,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { }; if let Some((err_help_span, suggested_code)) = suggestion { - err.span_suggestion_with_applicability( + err.span_suggestion( err_help_span, &format!("consider changing this to be a mutable {}", pointer_desc), suggested_code, @@ -456,7 +451,8 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { Place::Projection(box Projection { base, elem: ProjectionElem::Deref, - }) if *base == Place::Local(Local::new(1)) && !self.mir.upvar_decls.is_empty() => + }) if *base == Place::Base(PlaceBase::Local(Local::new(1))) && + !self.mir.upvar_decls.is_empty() => { err.span_label(span, format!("cannot {ACT}", ACT = act)); err.span_help( @@ -466,7 +462,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { } Place::Projection(box Projection { - base: Place::Local(local), + base: Place::Base(PlaceBase::Local(local)), elem: ProjectionElem::Deref, }) if error_access == AccessKind::MutableBorrow => { err.span_label(span, format!("cannot {ACT}", ACT = act)); @@ -480,7 +476,7 @@ impl<'a, 'gcx, 'tcx> MirBorrowckCtxt<'a, 'gcx, 'tcx> { func: Operand::Constant(box Constant { literal: Const { ty: &TyS { - sty: TyKind::FnDef(id, substs), + sty: ty::FnDef(id, substs), .. }, .. @@ -620,11 +616,11 @@ fn suggest_ampmut<'cx, 'gcx, 'tcx>( }) } -fn is_closure_or_generator(ty: ty::Ty) -> bool { +fn is_closure_or_generator(ty: ty::Ty<'_>) -> bool { ty.is_closure() || ty.is_generator() } -/// Add a suggestion to a struct definition given a field access to a local. +/// Adds a suggestion to a struct definition given a field access to a local. /// This function expects the local to be a reference to a struct in order to produce a suggestion. /// /// ```text @@ -637,12 +633,12 @@ fn annotate_struct_field( field: &mir::Field, ) -> Option<(Span, String)> { // Expect our local to be a reference to a struct of some kind. - if let ty::TyKind::Ref(_, ty, _) = ty.sty { - if let ty::TyKind::Adt(def, _) = ty.sty { + if let ty::Ref(_, ty, _) = ty.sty { + if let ty::Adt(def, _) = ty.sty { let field = def.all_fields().nth(field.index())?; // Use the HIR types to construct the diagnostic message. - let node_id = tcx.hir().as_local_node_id(field.did)?; - let node = tcx.hir().find(node_id)?; + let hir_id = tcx.hir().as_local_hir_id(field.did)?; + let node = tcx.hir().find_by_hir_id(hir_id)?; // Now we're dealing with the actual struct that we're going to suggest a change to, // we can expect a field that is an immutable reference to a type. if let hir::Node::Field(field) = node { diff --git a/src/librustc_mir/borrow_check/nll/constraint_generation.rs b/src/librustc_mir/borrow_check/nll/constraint_generation.rs index 1d8d028137a5b..bf9cff1e4ae03 100644 --- a/src/librustc_mir/borrow_check/nll/constraint_generation.rs +++ b/src/librustc_mir/borrow_check/nll/constraint_generation.rs @@ -1,27 +1,17 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::borrow_set::BorrowSet; -use borrow_check::location::LocationTable; -use borrow_check::nll::ToRegionVid; -use borrow_check::nll::facts::AllFacts; -use borrow_check::nll::region_infer::values::LivenessValues; +use crate::borrow_check::borrow_set::BorrowSet; +use crate::borrow_check::location::LocationTable; +use crate::borrow_check::nll::ToRegionVid; +use crate::borrow_check::nll::facts::AllFacts; +use crate::borrow_check::nll::region_infer::values::LivenessValues; use rustc::infer::InferCtxt; use rustc::mir::visit::TyContext; use rustc::mir::visit::Visitor; -use rustc::mir::{BasicBlock, BasicBlockData, Location, Mir, Place, Rvalue}; -use rustc::mir::{Statement, Terminator}; +use rustc::mir::{BasicBlock, BasicBlockData, Location, Mir, Place, PlaceBase, Rvalue}; +use rustc::mir::{SourceInfo, Statement, Terminator}; use rustc::mir::UserTypeProjection; use rustc::ty::fold::TypeFoldable; -use rustc::ty::subst::Substs; use rustc::ty::{self, ClosureSubsts, GeneratorSubsts, RegionVid}; +use rustc::ty::subst::SubstsRef; pub(super) fn generate_constraints<'cx, 'gcx, 'tcx>( infcx: &InferCtxt<'cx, 'gcx, 'tcx>, @@ -60,7 +50,7 @@ impl<'cg, 'cx, 'gcx, 'tcx> Visitor<'tcx> for ConstraintGeneration<'cg, 'cx, 'gcx /// We sometimes have `substs` within an rvalue, or within a /// call. Make them live at the location where they appear. - fn visit_substs(&mut self, substs: &&'tcx Substs<'tcx>, location: Location) { + fn visit_substs(&mut self, substs: &SubstsRef<'tcx>, location: Location) { self.add_regular_live_constraint(*substs, location); self.super_substs(substs); } @@ -76,11 +66,12 @@ impl<'cg, 'cx, 'gcx, 'tcx> Visitor<'tcx> for ConstraintGeneration<'cg, 'cx, 'gcx /// call. Make them live at the location where they appear. fn visit_ty(&mut self, ty: &ty::Ty<'tcx>, ty_context: TyContext) { match ty_context { - TyContext::ReturnTy(source_info) - | TyContext::YieldTy(source_info) - | TyContext::LocalDecl { source_info, .. } => { + TyContext::ReturnTy(SourceInfo { span, .. }) + | TyContext::YieldTy(SourceInfo { span, .. }) + | TyContext::UserTy(span) + | TyContext::LocalDecl { source_info: SourceInfo { span, .. }, .. } => { span_bug!( - source_info.span, + span, "should not be visiting outside of the CFG: {:?}", ty_context ); @@ -139,7 +130,7 @@ impl<'cg, 'cx, 'gcx, 'tcx> Visitor<'tcx> for ConstraintGeneration<'cg, 'cx, 'gcx // When we see `X = ...`, then kill borrows of // `(*X).foo` and so forth. if let Some(all_facts) = self.all_facts { - if let Place::Local(temp) = place { + if let Place::Base(PlaceBase::Local(temp)) = place { if let Some(borrow_indices) = self.borrow_set.local_map.get(temp) { all_facts.killed.reserve(borrow_indices.len()); for &borrow_index in borrow_indices { @@ -183,7 +174,7 @@ impl<'cg, 'cx, 'gcx, 'tcx> Visitor<'tcx> for ConstraintGeneration<'cg, 'cx, 'gcx &mut self, _place: &Place<'tcx>, _variance: &ty::Variance, - _user_ty: &UserTypeProjection<'tcx>, + _user_ty: &UserTypeProjection, _location: Location, ) { } diff --git a/src/librustc_mir/borrow_check/nll/constraints/graph.rs b/src/librustc_mir/borrow_check/nll/constraints/graph.rs index 3d4b2456f9657..c4b2a5daef89a 100644 --- a/src/librustc_mir/borrow_check/nll/constraints/graph.rs +++ b/src/librustc_mir/borrow_check/nll/constraints/graph.rs @@ -1,16 +1,6 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::nll::type_check::Locations; -use borrow_check::nll::constraints::ConstraintIndex; -use borrow_check::nll::constraints::{ConstraintSet, OutlivesConstraint}; +use crate::borrow_check::nll::type_check::Locations; +use crate::borrow_check::nll::constraints::ConstraintIndex; +use crate::borrow_check::nll::constraints::{ConstraintSet, OutlivesConstraint}; use rustc::mir::ConstraintCategory; use rustc::ty::RegionVid; use rustc_data_structures::graph; @@ -81,7 +71,7 @@ impl ConstraintGraphDirecton for Reverse { } impl ConstraintGraph { - /// Create a "dependency graph" where each region constraint `R1: + /// Creates a "dependency graph" where each region constraint `R1: /// R2` is treated as an edge `R1 -> R2`. We use this graph to /// construct SCCs for region inference but also for error /// reporting. @@ -196,7 +186,7 @@ crate struct RegionGraph<'s, D: ConstraintGraphDirecton> { } impl<'s, D: ConstraintGraphDirecton> RegionGraph<'s, D> { - /// Create a "dependency graph" where each region constraint `R1: + /// Creates a "dependency graph" where each region constraint `R1: /// R2` is treated as an edge `R1 -> R2`. We use this graph to /// construct SCCs for region inference but also for error /// reporting. diff --git a/src/librustc_mir/borrow_check/nll/constraints/mod.rs b/src/librustc_mir/borrow_check/nll/constraints/mod.rs index bfac33b34c7dc..b1091eb5ac81f 100644 --- a/src/librustc_mir/borrow_check/nll/constraints/mod.rs +++ b/src/librustc_mir/borrow_check/nll/constraints/mod.rs @@ -1,18 +1,8 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::mir::ConstraintCategory; use rustc::ty::RegionVid; use rustc_data_structures::graph::scc::Sccs; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -use borrow_check::nll::type_check::Locations; +use crate::borrow_check::nll::type_check::Locations; use std::fmt; use std::ops::Deref; @@ -41,7 +31,7 @@ impl ConstraintSet { /// easy to find the constraints affecting a particular region. /// /// N.B., this graph contains a "frozen" view of the current - /// constraints. any new constraints added to the `ConstraintSet` + /// constraints. Any new constraints added to the `ConstraintSet` /// after the graph is built will not be present in the graph. crate fn graph(&self, num_region_vars: usize) -> graph::NormalConstraintGraph { graph::ConstraintGraph::new(graph::Normal, self, num_region_vars) @@ -53,7 +43,7 @@ impl ConstraintSet { graph::ConstraintGraph::new(graph::Reverse, self, num_region_vars) } - /// Compute cycles (SCCs) in the graph of regions. In particular, + /// Computes cycles (SCCs) in the graph of regions. In particular, /// find all regions R1, R2 such that R1: R2 and R2: R1 and group /// them into an SCC, and find the relationships between SCCs. crate fn compute_sccs( @@ -94,7 +84,7 @@ pub struct OutlivesConstraint { } impl fmt::Debug for OutlivesConstraint { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { write!( formatter, "({:?}: {:?}) due to {:?}", diff --git a/src/librustc_mir/borrow_check/nll/explain_borrow/find_use.rs b/src/librustc_mir/borrow_check/nll/explain_borrow/find_use.rs index 1e6ba638e1cc4..7d6385752c348 100644 --- a/src/librustc_mir/borrow_check/nll/explain_borrow/find_use.rs +++ b/src/librustc_mir/borrow_check/nll/explain_borrow/find_use.rs @@ -1,23 +1,13 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::collections::VecDeque; use std::rc::Rc; -use borrow_check::nll::region_infer::{Cause, RegionInferenceContext}; -use borrow_check::nll::ToRegionVid; +use crate::borrow_check::nll::region_infer::{Cause, RegionInferenceContext}; +use crate::borrow_check::nll::ToRegionVid; +use crate::util::liveness::{self, DefUse}; use rustc::mir::visit::{MirVisitable, PlaceContext, Visitor}; use rustc::mir::{Local, Location, Mir}; use rustc::ty::{RegionVid, TyCtxt}; use rustc_data_structures::fx::FxHashSet; -use util::liveness::{self, DefUse}; crate fn find<'tcx>( mir: &Mir<'tcx>, @@ -75,10 +65,7 @@ impl<'cx, 'gcx, 'tcx> UseFinder<'cx, 'gcx, 'tcx> { None => { if p.statement_index < block_data.statements.len() { - queue.push_back(Location { - statement_index: p.statement_index + 1, - ..p - }); + queue.push_back(p.successor_within_block()); } else { queue.extend( block_data diff --git a/src/librustc_mir/borrow_check/nll/explain_borrow/mod.rs b/src/librustc_mir/borrow_check/nll/explain_borrow/mod.rs index 7fb3f02e0e3f3..e30938bc32659 100644 --- a/src/librustc_mir/borrow_check/nll/explain_borrow/mod.rs +++ b/src/librustc_mir/borrow_check/nll/explain_borrow/mod.rs @@ -1,30 +1,22 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::borrow_set::BorrowData; -use borrow_check::error_reporting::UseSpans; -use borrow_check::nll::ConstraintDescription; -use borrow_check::nll::region_infer::{Cause, RegionName}; -use borrow_check::{Context, MirBorrowckCtxt, WriteKind}; -use rustc::ty::{self, TyCtxt}; +use std::collections::VecDeque; + +use crate::borrow_check::borrow_set::BorrowData; +use crate::borrow_check::error_reporting::UseSpans; +use crate::borrow_check::nll::region_infer::{Cause, RegionName}; +use crate::borrow_check::nll::ConstraintDescription; +use crate::borrow_check::{Context, MirBorrowckCtxt, WriteKind}; use rustc::mir::{ - CastKind, ConstraintCategory, FakeReadCause, Local, Location, Mir, Operand, - Place, Projection, ProjectionElem, Rvalue, Statement, StatementKind, - TerminatorKind + CastKind, ConstraintCategory, FakeReadCause, Local, Location, Mir, Operand, Place, PlaceBase, + Projection, ProjectionElem, Rvalue, Statement, StatementKind, TerminatorKind, }; +use rustc::ty::{self, TyCtxt}; +use rustc_data_structures::fx::FxHashSet; use rustc_errors::DiagnosticBuilder; use syntax_pos::Span; mod find_use; -pub(in borrow_check) enum BorrowExplanation { +pub(in crate::borrow_check) enum BorrowExplanation { UsedLater(LaterUseKind, Span), UsedLaterInLoop(LaterUseKind, Span), UsedLaterWhenDropped { @@ -43,7 +35,7 @@ pub(in borrow_check) enum BorrowExplanation { } #[derive(Clone, Copy)] -pub(in borrow_check) enum LaterUseKind { +pub(in crate::borrow_check) enum LaterUseKind { TraitCapture, ClosureCapture, Call, @@ -52,68 +44,82 @@ pub(in borrow_check) enum LaterUseKind { } impl BorrowExplanation { - pub(in borrow_check) fn is_explained(&self) -> bool { + pub(in crate::borrow_check) fn is_explained(&self) -> bool { match self { BorrowExplanation::Unexplained => false, _ => true, } } - pub(in borrow_check) fn add_explanation_to_diagnostic<'cx, 'gcx, 'tcx>( + pub(in crate::borrow_check) fn add_explanation_to_diagnostic<'cx, 'gcx, 'tcx>( &self, tcx: TyCtxt<'cx, 'gcx, 'tcx>, mir: &Mir<'tcx>, err: &mut DiagnosticBuilder<'_>, borrow_desc: &str, + borrow_span: Option, ) { match *self { BorrowExplanation::UsedLater(later_use_kind, var_or_use_span) => { let message = match later_use_kind { - LaterUseKind::TraitCapture => "borrow later captured here by trait object", - LaterUseKind::ClosureCapture => "borrow later captured here by closure", - LaterUseKind::Call => "borrow later used by call", - LaterUseKind::FakeLetRead => "borrow later stored here", - LaterUseKind::Other => "borrow later used here", + LaterUseKind::TraitCapture => "captured here by trait object", + LaterUseKind::ClosureCapture => "captured here by closure", + LaterUseKind::Call => "used by call", + LaterUseKind::FakeLetRead => "stored here", + LaterUseKind::Other => "used here", }; - err.span_label(var_or_use_span, format!("{}{}", borrow_desc, message)); - }, + if !borrow_span.map(|sp| sp.overlaps(var_or_use_span)).unwrap_or(false) { + err.span_label( + var_or_use_span, + format!("{}borrow later {}", borrow_desc, message), + ); + } + } BorrowExplanation::UsedLaterInLoop(later_use_kind, var_or_use_span) => { let message = match later_use_kind { - LaterUseKind::TraitCapture => - "borrow captured here by trait object, in later iteration of loop", - LaterUseKind::ClosureCapture => - "borrow captured here by closure, in later iteration of loop", - LaterUseKind::Call => "borrow used by call, in later iteration of loop", + LaterUseKind::TraitCapture => { + "borrow captured here by trait object, in later iteration of loop" + } + LaterUseKind::ClosureCapture => { + "borrow captured here by closure, in later iteration of loop" + } + LaterUseKind::Call => "borrow used by call, in later iteration of loop", LaterUseKind::FakeLetRead => "borrow later stored here", LaterUseKind::Other => "borrow used here, in later iteration of loop", }; err.span_label(var_or_use_span, format!("{}{}", borrow_desc, message)); - }, - BorrowExplanation::UsedLaterWhenDropped { drop_loc, dropped_local, - should_note_order } => - { + } + BorrowExplanation::UsedLaterWhenDropped { + drop_loc, + dropped_local, + should_note_order, + } => { let local_decl = &mir.local_decls[dropped_local]; let (dtor_desc, type_desc) = match local_decl.ty.sty { // If type is an ADT that implements Drop, then // simplify output by reporting just the ADT name. - ty::Adt(adt, _substs) if adt.has_dtor(tcx) && !adt.is_box() => - ("`Drop` code", format!("type `{}`", tcx.item_path_str(adt.did))), + ty::Adt(adt, _substs) if adt.has_dtor(tcx) && !adt.is_box() => ( + "`Drop` code", + format!("type `{}`", tcx.def_path_str(adt.did)), + ), // Otherwise, just report the whole type (and use // the intentionally fuzzy phrase "destructor") - ty::Closure(..) => - ("destructor", "closure".to_owned()), - ty::Generator(..) => - ("destructor", "generator".to_owned()), + ty::Closure(..) => ("destructor", "closure".to_owned()), + ty::Generator(..) => ("destructor", "generator".to_owned()), _ => ("destructor", format!("type `{}`", local_decl.ty)), }; match local_decl.name { Some(local_name) => { - let message = - format!("{B}borrow might be used here, when `{LOC}` is dropped \ - and runs the {DTOR} for {TYPE}", - B=borrow_desc, LOC=local_name, TYPE=type_desc, DTOR=dtor_desc); + let message = format!( + "{B}borrow might be used here, when `{LOC}` is dropped \ + and runs the {DTOR} for {TYPE}", + B = borrow_desc, + LOC = local_name, + TYPE = type_desc, + DTOR = dtor_desc + ); err.span_label(mir.source_info(drop_loc).span, message); if should_note_order { @@ -124,15 +130,22 @@ impl BorrowExplanation { } } None => { - err.span_label(local_decl.source_info.span, - format!("a temporary with access to the {B}borrow \ - is created here ...", - B=borrow_desc)); - let message = - format!("... and the {B}borrow might be used here, \ - when that temporary is dropped \ - and runs the {DTOR} for {TYPE}", - B=borrow_desc, TYPE=type_desc, DTOR=dtor_desc); + err.span_label( + local_decl.source_info.span, + format!( + "a temporary with access to the {B}borrow \ + is created here ...", + B = borrow_desc + ), + ); + let message = format!( + "... and the {B}borrow might be used here, \ + when that temporary is dropped \ + and runs the {DTOR} for {TYPE}", + B = borrow_desc, + TYPE = type_desc, + DTOR = dtor_desc + ); err.span_label(mir.source_info(drop_loc).span, message); if let Some(info) = &local_decl.is_block_tail { @@ -156,7 +169,7 @@ impl BorrowExplanation { } } } - }, + } BorrowExplanation::MustBeValidFor { category, span, @@ -167,18 +180,28 @@ impl BorrowExplanation { region_name.highlight_region_name(err); if let Some(desc) = opt_place_desc { - err.span_label(span, format!( - "{}requires that `{}` is borrowed for `{}`", - category.description(), desc, region_name, - )); + err.span_label( + span, + format!( + "{}requires that `{}` is borrowed for `{}`", + category.description(), + desc, + region_name, + ), + ); } else { - err.span_label(span, format!( - "{}requires that {}borrow lasts for `{}`", - category.description(), borrow_desc, region_name, - )); + err.span_label( + span, + format!( + "{}requires that {}borrow lasts for `{}`", + category.description(), + borrow_desc, + region_name, + ), + ); }; - }, - _ => {}, + } + _ => {} } } } @@ -197,7 +220,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { /// - second half is the place being accessed /// /// [d]: https://rust-lang.github.io/rfcs/2094-nll.html#leveraging-intuition-framing-errors-in-terms-of-points - pub(in borrow_check) fn explain_why_borrow_contains_point( + pub(in crate::borrow_check) fn explain_why_borrow_contains_point( &self, context: Context, borrow: &BorrowData<'tcx>, @@ -224,13 +247,15 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { region_sub ); - match find_use::find(mir, regioncx, tcx, region_sub, context.loc) { + match find_use::find(mir, regioncx, tcx, region_sub, context.loc) { Some(Cause::LiveVar(local, location)) => { let span = mir.source_info(location).span; - let spans = self.move_spans(&Place::Local(local), location) + let spans = self + .move_spans(&Place::Base(PlaceBase::Local(local)), location) .or_else(|| self.borrow_spans(span, location)); - if self.is_borrow_location_in_loop(context.loc) { + let borrow_location = context.loc; + if self.is_use_in_later_iteration_of_loop(borrow_location, location) { let later_use = self.later_use_kind(borrow, spans, location); BorrowExplanation::UsedLaterInLoop(later_use.0, later_use.1) } else { @@ -242,166 +267,235 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { } } - Some(Cause::DropVar(local, location)) => { - let mut should_note_order = false; - if mir.local_decls[local].name.is_some() { - if let Some((WriteKind::StorageDeadOrDrop, place)) = kind_place { - if let Place::Local(borrowed_local) = place { - let dropped_local_scope = mir.local_decls[local].visibility_scope; - let borrowed_local_scope = - mir.local_decls[*borrowed_local].visibility_scope; - - if mir.is_sub_scope(borrowed_local_scope, dropped_local_scope) - && local != *borrowed_local - { - should_note_order = true; - } - } - } - } - - BorrowExplanation::UsedLaterWhenDropped { - drop_loc: location, - dropped_local: local, - should_note_order, - } + Some(Cause::DropVar(local, location)) => { + let mut should_note_order = false; + if mir.local_decls[local].name.is_some() { + if let Some((WriteKind::StorageDeadOrDrop, place)) = kind_place { + if let Place::Base(PlaceBase::Local(borrowed_local)) = place { + let dropped_local_scope = mir.local_decls[local].visibility_scope; + let borrowed_local_scope = + mir.local_decls[*borrowed_local].visibility_scope; + + if mir.is_sub_scope(borrowed_local_scope, dropped_local_scope) + && local != *borrowed_local + { + should_note_order = true; + } + } + } + } + + BorrowExplanation::UsedLaterWhenDropped { + drop_loc: location, + dropped_local: local, + should_note_order, + } } - None => if let Some(region) = regioncx.to_error_region_vid(borrow_region_vid) { - let (category, from_closure, span, region_name) = self - .nonlexical_regioncx - .free_region_constraint_info( - self.mir, - self.mir_def_id, - self.infcx, - borrow_region_vid, - region, - ); - if let Some(region_name) = region_name { - let opt_place_desc = self.describe_place(&borrow.borrowed_place); - BorrowExplanation::MustBeValidFor { - category, - from_closure, - span, - region_name, - opt_place_desc, + None => { + if let Some(region) = regioncx.to_error_region_vid(borrow_region_vid) { + let (category, from_closure, span, region_name) = + self.nonlexical_regioncx.free_region_constraint_info( + self.mir, + self.mir_def_id, + self.infcx, + borrow_region_vid, + region, + ); + if let Some(region_name) = region_name { + let opt_place_desc = self.describe_place(&borrow.borrowed_place); + BorrowExplanation::MustBeValidFor { + category, + from_closure, + span, + region_name, + opt_place_desc, + } + } else { + BorrowExplanation::Unexplained } } else { BorrowExplanation::Unexplained } - } else { - BorrowExplanation::Unexplained } } } - /// Check if a borrow location is within a loop. - fn is_borrow_location_in_loop( + /// true if `borrow_location` can reach `use_location` by going through a loop and + /// `use_location` is also inside of that loop + fn is_use_in_later_iteration_of_loop( &self, borrow_location: Location, + use_location: Location, ) -> bool { - let mut visited_locations = Vec::new(); - let mut pending_locations = vec![ borrow_location ]; - debug!("is_in_loop: borrow_location={:?}", borrow_location); - - while let Some(location) = pending_locations.pop() { - debug!("is_in_loop: location={:?} pending_locations={:?} visited_locations={:?}", - location, pending_locations, visited_locations); - if location == borrow_location && visited_locations.contains(&borrow_location) { - // We've managed to return to where we started (and this isn't the start of the - // search). - debug!("is_in_loop: found!"); - return true; - } + let back_edge = self.reach_through_backedge(borrow_location, use_location); + back_edge.map_or(false, |back_edge| { + self.can_reach_head_of_loop(use_location, back_edge) + }) + } - // Skip locations we've been. - if visited_locations.contains(&location) { continue; } + /// Returns the outmost back edge if `from` location can reach `to` location passing through + /// that back edge + fn reach_through_backedge(&self, from: Location, to: Location) -> Option { + let mut visited_locations = FxHashSet::default(); + let mut pending_locations = VecDeque::new(); + visited_locations.insert(from); + pending_locations.push_back(from); + debug!("reach_through_backedge: from={:?} to={:?}", from, to,); + + let mut outmost_back_edge = None; + while let Some(location) = pending_locations.pop_front() { + debug!( + "reach_through_backedge: location={:?} outmost_back_edge={:?} + pending_locations={:?} visited_locations={:?}", + location, outmost_back_edge, pending_locations, visited_locations + ); + + if location == to && outmost_back_edge.is_some() { + // We've managed to reach the use location + debug!("reach_through_backedge: found!"); + return outmost_back_edge; + } let block = &self.mir.basic_blocks()[location.block]; - if location.statement_index == block.statements.len() { - // Add start location of the next blocks to pending locations. - match block.terminator().kind { - TerminatorKind::Goto { target } => { - pending_locations.push(target.start_location()); - }, - TerminatorKind::SwitchInt { ref targets, .. } => { - pending_locations.extend( - targets.into_iter().map(|target| target.start_location())); - }, - TerminatorKind::Drop { target, unwind, .. } | - TerminatorKind::DropAndReplace { target, unwind, .. } | - TerminatorKind::Assert { target, cleanup: unwind, .. } | - TerminatorKind::Yield { resume: target, drop: unwind, .. } | - TerminatorKind::FalseUnwind { real_target: target, unwind, .. } => { - pending_locations.push(target.start_location()); - if let Some(unwind) = unwind { - pending_locations.push(unwind.start_location()); - } - }, - TerminatorKind::Call { ref destination, cleanup, .. } => { - if let Some((_, destination)) = destination { - pending_locations.push(destination.start_location()); - } - if let Some(cleanup) = cleanup { - pending_locations.push(cleanup.start_location()); - } - }, - TerminatorKind::FalseEdges { real_target, ref imaginary_targets, .. } => { - pending_locations.push(real_target.start_location()); - pending_locations.extend( - imaginary_targets.into_iter().map(|target| target.start_location())); - }, - _ => {}, + + if location.statement_index < block.statements.len() { + let successor = location.successor_within_block(); + if visited_locations.insert(successor) { + pending_locations.push_back(successor); } } else { - // Add the next statement to pending locations. - pending_locations.push(location.successor_within_block()); + pending_locations.extend( + block + .terminator() + .successors() + .map(|bb| Location { + statement_index: 0, + block: *bb, + }) + .filter(|s| visited_locations.insert(*s)) + .map(|s| { + if self.is_back_edge(location, s) { + match outmost_back_edge { + None => { + outmost_back_edge = Some(location); + } + + Some(back_edge) + if location.dominates(back_edge, &self.dominators) => + { + outmost_back_edge = Some(location); + } + + Some(_) => {} + } + } + + s + }), + ); } + } + + None + } + + /// true if `from` location can reach `loop_head` location and `loop_head` dominates all the + /// intermediate nodes + fn can_reach_head_of_loop(&self, from: Location, loop_head: Location) -> bool { + self.find_loop_head_dfs(from, loop_head, &mut FxHashSet::default()) + } + + fn find_loop_head_dfs( + &self, + from: Location, + loop_head: Location, + visited_locations: &mut FxHashSet, + ) -> bool { + visited_locations.insert(from); + + if from == loop_head { + return true; + } + + if loop_head.dominates(from, &self.dominators) { + let block = &self.mir.basic_blocks()[from.block]; + + if from.statement_index < block.statements.len() { + let successor = from.successor_within_block(); - // Keep track of where we have visited. - visited_locations.push(location); + if !visited_locations.contains(&successor) + && self.find_loop_head_dfs(successor, loop_head, visited_locations) + { + return true; + } + } else { + for bb in block.terminator().successors() { + let successor = Location { + statement_index: 0, + block: *bb, + }; + + if !visited_locations.contains(&successor) + && self.find_loop_head_dfs(successor, loop_head, visited_locations) + { + return true; + } + } + } } false } + /// True if an edge `source -> target` is a backedge -- in other words, if the target + /// dominates the source. + fn is_back_edge(&self, source: Location, target: Location) -> bool { + target.dominates(source, &self.mir.dominators()) + } + /// Determine how the borrow was later used. fn later_use_kind( &self, borrow: &BorrowData<'tcx>, use_spans: UseSpans, - location: Location + location: Location, ) -> (LaterUseKind, Span) { match use_spans { UseSpans::ClosureUse { var_span, .. } => { // Used in a closure. (LaterUseKind::ClosureCapture, var_span) - }, + } UseSpans::OtherUse(span) => { let block = &self.mir.basic_blocks()[location.block]; let kind = if let Some(&Statement { kind: StatementKind::FakeRead(FakeReadCause::ForLet, _), .. - }) = block.statements.get(location.statement_index) { + }) = block.statements.get(location.statement_index) + { LaterUseKind::FakeLetRead } else if self.was_captured_by_trait_object(borrow) { LaterUseKind::TraitCapture } else if location.statement_index == block.statements.len() { if let TerminatorKind::Call { - ref func, from_hir_call: true, .. - } = block.terminator().kind { + ref func, + from_hir_call: true, + .. + } = block.terminator().kind + { // Just point to the function, to reduce the chance of overlapping spans. let function_span = match func { Operand::Constant(c) => c.span, - Operand::Copy(Place::Local(l)) | Operand::Move(Place::Local(l)) => { + Operand::Copy(Place::Base(PlaceBase::Local(l))) | + Operand::Move(Place::Base(PlaceBase::Local(l))) => { let local_decl = &self.mir.local_decls[*l]; if local_decl.name.is_none() { local_decl.source_info.span } else { span } - }, + } _ => span, }; return (LaterUseKind::Call, function_span); @@ -417,7 +511,7 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { } } - /// Check if a borrowed value was captured by a trait object. We do this by + /// Checks if a borrowed value was captured by a trait object. We do this by /// looking forward in the MIR from the reserve location and checking if we see /// a unsized cast to a trait object on our data. fn was_captured_by_trait_object(&self, borrow: &BorrowData<'tcx>) -> bool { @@ -425,22 +519,29 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { let location = borrow.reserve_location; let block = &self.mir[location.block]; let stmt = block.statements.get(location.statement_index); - debug!("was_captured_by_trait_object: location={:?} stmt={:?}", location, stmt); + debug!( + "was_captured_by_trait_object: location={:?} stmt={:?}", + location, stmt + ); // We make a `queue` vector that has the locations we want to visit. As of writing, this // will only ever have one item at any given time, but by using a vector, we can pop from // it which simplifies the termination logic. let mut queue = vec![location]; let mut target = if let Some(&Statement { - kind: StatementKind::Assign(Place::Local(local), _), + kind: StatementKind::Assign(Place::Base(PlaceBase::Local(local)), _), .. - }) = stmt { + }) = stmt + { local } else { return false; }; - debug!("was_captured_by_trait: target={:?} queue={:?}", target, queue); + debug!( + "was_captured_by_trait: target={:?} queue={:?}", + target, queue + ); while let Some(current_location) = queue.pop() { debug!("was_captured_by_trait: target={:?}", target); let block = &self.mir[current_location.block]; @@ -451,55 +552,55 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { debug!("was_captured_by_trait_object: stmt={:?}", stmt); // The only kind of statement that we care about is assignments... - if let StatementKind::Assign( - place, - box rvalue, - ) = &stmt.kind { + if let StatementKind::Assign(place, box rvalue) = &stmt.kind { let into = match place { - Place::Local(into) => into, + Place::Base(PlaceBase::Local(into)) => into, Place::Projection(box Projection { - base: Place::Local(into), + base: Place::Base(PlaceBase::Local(into)), elem: ProjectionElem::Deref, }) => into, - _ => { + _ => { // Continue at the next location. queue.push(current_location.successor_within_block()); continue; - }, + } }; match rvalue { // If we see a use, we should check whether it is our data, and if so // update the place that we're looking for to that new place. Rvalue::Use(operand) => match operand { - Operand::Copy(Place::Local(from)) | - Operand::Move(Place::Local(from)) if *from == target => { + Operand::Copy(Place::Base(PlaceBase::Local(from))) + | Operand::Move(Place::Base(PlaceBase::Local(from))) + if *from == target => + { target = *into; - }, - _ => {}, + } + _ => {} }, // If we see a unsized cast, then if it is our data we should check // whether it is being cast to a trait object. Rvalue::Cast(CastKind::Unsize, operand, ty) => match operand { - Operand::Copy(Place::Local(from)) | - Operand::Move(Place::Local(from)) if *from == target => { + Operand::Copy(Place::Base(PlaceBase::Local(from))) + | Operand::Move(Place::Base(PlaceBase::Local(from))) + if *from == target => + { debug!("was_captured_by_trait_object: ty={:?}", ty); // Check the type for a trait object. return match ty.sty { // `&dyn Trait` - ty::TyKind::Ref(_, ty, _) if ty.is_trait() => true, + ty::Ref(_, ty, _) if ty.is_trait() => true, // `Box` - _ if ty.is_box() && ty.boxed_ty().is_trait() => - true, + _ if ty.is_box() && ty.boxed_ty().is_trait() => true, // `dyn Trait` _ if ty.is_trait() => true, // Anything else. _ => false, }; - }, + } _ => return false, }, - _ => {}, + _ => {} } } @@ -511,17 +612,18 @@ impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { debug!("was_captured_by_trait_object: terminator={:?}", terminator); if let TerminatorKind::Call { - destination: Some((Place::Local(dest), block)), + destination: Some((Place::Base(PlaceBase::Local(dest)), block)), args, .. - } = &terminator.kind { + } = &terminator.kind + { debug!( "was_captured_by_trait_object: target={:?} dest={:?} args={:?}", target, dest, args ); // Check if one of the arguments to this function is the target place. let found_target = args.iter().any(|arg| { - if let Operand::Move(Place::Local(potential)) = arg { + if let Operand::Move(Place::Base(PlaceBase::Local(potential))) = arg { *potential == target } else { false diff --git a/src/librustc_mir/borrow_check/nll/facts.rs b/src/librustc_mir/borrow_check/nll/facts.rs index 465707ecc17dd..9714398d9d63a 100644 --- a/src/librustc_mir/borrow_check/nll/facts.rs +++ b/src/librustc_mir/borrow_check/nll/facts.rs @@ -1,15 +1,5 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::location::{LocationIndex, LocationTable}; -use dataflow::indexes::BorrowIndex; +use crate::borrow_check::location::{LocationIndex, LocationTable}; +use crate::dataflow::indexes::BorrowIndex; use polonius_engine::AllFacts as PoloniusAllFacts; use polonius_engine::Atom; use rustc::ty::{RegionVid, TyCtxt}; @@ -23,7 +13,7 @@ use std::path::Path; crate type AllFacts = PoloniusAllFacts; crate trait AllFactsExt { - /// Returns true if there is a need to gather `AllFacts` given the + /// Returns `true` if there is a need to gather `AllFacts` given the /// current `-Z` flags. fn enabled(tcx: TyCtxt<'_, '_, '_>) -> bool; diff --git a/src/librustc_mir/borrow_check/nll/invalidation.rs b/src/librustc_mir/borrow_check/nll/invalidation.rs index 07bda8af62618..9cbb3556017cc 100644 --- a/src/librustc_mir/borrow_check/nll/invalidation.rs +++ b/src/librustc_mir/borrow_check/nll/invalidation.rs @@ -1,28 +1,18 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::borrow_set::BorrowSet; -use borrow_check::location::LocationTable; -use borrow_check::{JustWrite, WriteAndRead}; -use borrow_check::{AccessDepth, Deep, Shallow}; -use borrow_check::{ReadOrWrite, Activation, Read, Reservation, Write}; -use borrow_check::{Context, ContextKind}; -use borrow_check::{LocalMutationIsAllowed, MutateMode}; -use borrow_check::ArtificialField; -use borrow_check::{ReadKind, WriteKind}; -use borrow_check::nll::facts::AllFacts; -use borrow_check::path_utils::*; -use dataflow::move_paths::indexes::BorrowIndex; +use crate::borrow_check::borrow_set::BorrowSet; +use crate::borrow_check::location::LocationTable; +use crate::borrow_check::{JustWrite, WriteAndRead}; +use crate::borrow_check::{AccessDepth, Deep, Shallow}; +use crate::borrow_check::{ReadOrWrite, Activation, Read, Reservation, Write}; +use crate::borrow_check::{Context, ContextKind}; +use crate::borrow_check::{LocalMutationIsAllowed, MutateMode}; +use crate::borrow_check::ArtificialField; +use crate::borrow_check::{ReadKind, WriteKind}; +use crate::borrow_check::nll::facts::AllFacts; +use crate::borrow_check::path_utils::*; +use crate::dataflow::move_paths::indexes::BorrowIndex; use rustc::ty::TyCtxt; use rustc::mir::visit::Visitor; -use rustc::mir::{BasicBlock, Location, Mir, Place, Rvalue}; +use rustc::mir::{BasicBlock, Location, Mir, Place, PlaceBase, Rvalue}; use rustc::mir::{Statement, StatementKind}; use rustc::mir::{Terminator, TerminatorKind}; use rustc::mir::{Operand, BorrowKind}; @@ -63,13 +53,17 @@ struct InvalidationGenerator<'cx, 'tcx: 'cx, 'gcx: 'tcx> { borrow_set: &'cx BorrowSet<'tcx>, } -/// Visits the whole MIR and generates invalidates() facts -/// Most of the code implementing this was stolen from borrow_check/mod.rs +/// Visits the whole MIR and generates `invalidates()` facts. +/// Most of the code implementing this was stolen from `borrow_check/mod.rs`. impl<'cx, 'tcx, 'gcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx, 'gcx> { - fn visit_statement(&mut self, - block: BasicBlock, - statement: &Statement<'tcx>, - location: Location) { + fn visit_statement( + &mut self, + block: BasicBlock, + statement: &Statement<'tcx>, + location: Location, + ) { + self.check_activations(location); + match statement.kind { StatementKind::Assign(ref lhs, ref rhs) => { self.consume_rvalue( @@ -84,13 +78,8 @@ impl<'cx, 'tcx, 'gcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx, 'gcx> { JustWrite ); } - StatementKind::FakeRead(_, ref place) => { - self.access_place( - ContextKind::FakeRead.new(location), - place, - (Deep, Read(ReadKind::Borrow(BorrowKind::Shared))), - LocalMutationIsAllowed::No, - ); + StatementKind::FakeRead(_, _) => { + // Only relavent for initialized/liveness/safety checks. } StatementKind::SetDiscriminant { ref place, @@ -103,16 +92,12 @@ impl<'cx, 'tcx, 'gcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx, 'gcx> { JustWrite, ); } - StatementKind::InlineAsm { - ref asm, - ref outputs, - ref inputs, - } => { + StatementKind::InlineAsm(ref asm) => { let context = ContextKind::InlineAsm.new(location); - for (o, output) in asm.outputs.iter().zip(outputs.iter()) { + for (o, output) in asm.asm.outputs.iter().zip(asm.outputs.iter()) { if o.is_indirect { // FIXME(eddyb) indirect inline asm outputs should - // be encoeded through MIR place derefs instead. + // be encoded through MIR place derefs instead. self.access_place( context, output, @@ -128,14 +113,13 @@ impl<'cx, 'tcx, 'gcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx, 'gcx> { ); } } - for (_, input) in inputs.iter() { + for (_, input) in asm.inputs.iter() { self.consume_operand(context, input); } } StatementKind::Nop | StatementKind::AscribeUserType(..) | StatementKind::Retag { .. } | - StatementKind::EscapeToRaw { .. } | StatementKind::StorageLive(..) => { // `Nop`, `AscribeUserType`, `Retag`, and `StorageLive` are irrelevant // to borrow check. @@ -143,7 +127,7 @@ impl<'cx, 'tcx, 'gcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx, 'gcx> { StatementKind::StorageDead(local) => { self.access_place( ContextKind::StorageDead.new(location), - &Place::Local(local), + &Place::Base(PlaceBase::Local(local)), (Shallow(None), Write(WriteKind::StorageDeadOrDrop)), LocalMutationIsAllowed::Yes, ); @@ -159,6 +143,8 @@ impl<'cx, 'tcx, 'gcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx, 'gcx> { terminator: &Terminator<'tcx>, location: Location ) { + self.check_activations(location); + match terminator.kind { TerminatorKind::SwitchInt { ref discr, @@ -225,7 +211,7 @@ impl<'cx, 'tcx, 'gcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx, 'gcx> { cleanup: _, } => { self.consume_operand(ContextKind::Assert.new(location), cond); - use rustc::mir::interpret::EvalErrorKind::BoundsCheck; + use rustc::mir::interpret::InterpError::BoundsCheck; if let BoundsCheck { ref len, ref index } = *msg { self.consume_operand(ContextKind::Assert.new(location), len); self.consume_operand(ContextKind::Assert.new(location), index); @@ -277,7 +263,7 @@ impl<'cx, 'tcx, 'gcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx, 'gcx> { } impl<'cg, 'cx, 'tcx, 'gcx> InvalidationGenerator<'cx, 'tcx, 'gcx> { - /// Simulates mutation of a place + /// Simulates mutation of a place. fn mutate_place( &mut self, context: Context, @@ -293,7 +279,7 @@ impl<'cg, 'cx, 'tcx, 'gcx> InvalidationGenerator<'cx, 'tcx, 'gcx> { ); } - /// Simulates consumption of an operand + /// Simulates consumption of an operand. fn consume_operand( &mut self, context: Context, @@ -389,7 +375,7 @@ impl<'cg, 'cx, 'tcx, 'gcx> InvalidationGenerator<'cx, 'tcx, 'gcx> { } } - /// Simulates an access to a place + /// Simulates an access to a place. fn access_place( &mut self, context: Context, @@ -442,9 +428,11 @@ impl<'cg, 'cx, 'tcx, 'gcx> InvalidationGenerator<'cx, 'tcx, 'gcx> { // have already taken the reservation } - (Read(_), BorrowKind::Shallow) | (Reservation(..), BorrowKind::Shallow) - | (Read(_), BorrowKind::Shared) | (Reservation(..), BorrowKind::Shared) => { - // Reads/reservations don't invalidate shared or shallow borrows + (Read(_), BorrowKind::Shallow) + | (Read(_), BorrowKind::Shared) + | (Read(ReadKind::Borrow(BorrowKind::Shallow)), BorrowKind::Unique) + | (Read(ReadKind::Borrow(BorrowKind::Shallow)), BorrowKind::Mut { .. }) => { + // Reads don't invalidate shared or shallow borrows } (Read(_), BorrowKind::Unique) | (Read(_), BorrowKind::Mut { .. }) => { @@ -460,16 +448,15 @@ impl<'cg, 'cx, 'tcx, 'gcx> InvalidationGenerator<'cx, 'tcx, 'gcx> { this.generate_invalidates(borrow_index, context.loc); } - (Reservation(_), BorrowKind::Unique) - | (Reservation(_), BorrowKind::Mut { .. }) - | (Activation(_, _), _) - | (Write(_), _) => { - // unique or mutable borrows are invalidated by writes. - // Reservations count as writes since we need to check - // that activating the borrow will be OK - // FIXME(bob_twinkles) is this actually the right thing to do? - this.generate_invalidates(borrow_index, context.loc); - } + (Reservation(_), _) + | (Activation(_, _), _) + | (Write(_), _) => { + // unique or mutable borrows are invalidated by writes. + // Reservations count as writes since we need to check + // that activating the borrow will be OK + // FIXME(bob_twinkles) is this actually the right thing to do? + this.generate_invalidates(borrow_index, context.loc); + } } Control::Continue }, @@ -477,10 +464,46 @@ impl<'cg, 'cx, 'tcx, 'gcx> InvalidationGenerator<'cx, 'tcx, 'gcx> { } - /// Generate a new invalidates(L, B) fact + /// Generates a new `invalidates(L, B)` fact. fn generate_invalidates(&mut self, b: BorrowIndex, l: Location) { let lidx = self.location_table.start_index(l); self.all_facts.invalidates.push((lidx, b)); } + + fn check_activations( + &mut self, + location: Location, + ) { + if !self.tcx.two_phase_borrows() { + return; + } + + // Two-phase borrow support: For each activation that is newly + // generated at this statement, check if it interferes with + // another borrow. + for &borrow_index in self.borrow_set.activations_at_location(location) { + let borrow = &self.borrow_set[borrow_index]; + + // only mutable borrows should be 2-phase + assert!(match borrow.kind { + BorrowKind::Shared | BorrowKind::Shallow => false, + BorrowKind::Unique | BorrowKind::Mut { .. } => true, + }); + + self.access_place( + ContextKind::Activation.new(location), + &borrow.borrowed_place, + ( + Deep, + Activation(WriteKind::MutableBorrow(borrow.kind), borrow_index), + ), + LocalMutationIsAllowed::No, + ); + + // We do not need to call `check_if_path_or_subpath_is_moved` + // again, as we already called it when we made the + // initial reservation. + } + } } diff --git a/src/librustc_mir/borrow_check/nll/mod.rs b/src/librustc_mir/borrow_check/nll/mod.rs index cd4694351ad0e..2d3800dd1dda8 100644 --- a/src/librustc_mir/borrow_check/nll/mod.rs +++ b/src/librustc_mir/borrow_check/nll/mod.rs @@ -1,23 +1,13 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::borrow_set::BorrowSet; -use borrow_check::location::{LocationIndex, LocationTable}; -use borrow_check::nll::facts::AllFactsExt; -use borrow_check::nll::type_check::{MirTypeckResults, MirTypeckRegionConstraints}; -use borrow_check::nll::type_check::liveness::liveness_map::NllLivenessMap; -use borrow_check::nll::region_infer::values::RegionValueElements; -use dataflow::indexes::BorrowIndex; -use dataflow::move_paths::MoveData; -use dataflow::FlowAtLocation; -use dataflow::MaybeInitializedPlaces; +use crate::borrow_check::borrow_set::BorrowSet; +use crate::borrow_check::location::{LocationIndex, LocationTable}; +use crate::borrow_check::nll::facts::AllFactsExt; +use crate::borrow_check::nll::type_check::{MirTypeckResults, MirTypeckRegionConstraints}; +use crate::borrow_check::nll::region_infer::values::RegionValueElements; +use crate::dataflow::indexes::BorrowIndex; +use crate::dataflow::move_paths::MoveData; +use crate::dataflow::FlowAtLocation; +use crate::dataflow::MaybeInitializedPlaces; +use crate::transform::MirSource; use rustc::hir::def_id::DefId; use rustc::infer::InferCtxt; use rustc::mir::{ClosureOutlivesSubject, ClosureRegionRequirements, Mir}; @@ -29,12 +19,11 @@ use std::io; use std::path::PathBuf; use std::rc::Rc; use std::str::FromStr; -use transform::MirSource; use self::mir_util::PassWhere; use polonius_engine::{Algorithm, Output}; -use util as mir_util; -use util::pretty; +use crate::util as mir_util; +use crate::util::pretty; mod constraint_generation; pub mod explain_borrow; @@ -55,7 +44,7 @@ use self::universal_regions::UniversalRegions; /// scraping out the set of universal regions (e.g., region parameters) /// declared on the function. That set will need to be given to /// `compute_regions`. -pub(in borrow_check) fn replace_regions_in_mir<'cx, 'gcx, 'tcx>( +pub(in crate::borrow_check) fn replace_regions_in_mir<'cx, 'gcx, 'tcx>( infcx: &InferCtxt<'cx, 'gcx, 'tcx>, def_id: DefId, param_env: ty::ParamEnv<'tcx>, @@ -78,14 +67,14 @@ pub(in borrow_check) fn replace_regions_in_mir<'cx, 'gcx, 'tcx>( /// Computes the (non-lexical) regions from the input MIR. /// /// This may result in errors being reported. -pub(in borrow_check) fn compute_regions<'cx, 'gcx, 'tcx>( +pub(in crate::borrow_check) fn compute_regions<'cx, 'gcx, 'tcx>( infcx: &InferCtxt<'cx, 'gcx, 'tcx>, def_id: DefId, universal_regions: UniversalRegions<'tcx>, mir: &Mir<'tcx>, location_table: &LocationTable, param_env: ty::ParamEnv<'gcx>, - flow_inits: &mut FlowAtLocation>, + flow_inits: &mut FlowAtLocation<'tcx, MaybeInitializedPlaces<'cx, 'gcx, 'tcx>>, move_data: &MoveData<'tcx>, borrow_set: &BorrowSet<'tcx>, errors_buffer: &mut Vec, @@ -219,10 +208,10 @@ pub(in borrow_check) fn compute_regions<'cx, 'gcx, 'tcx>( fn dump_mir_results<'a, 'gcx, 'tcx>( infcx: &InferCtxt<'a, 'gcx, 'tcx>, - source: MirSource, + source: MirSource<'tcx>, mir: &Mir<'tcx>, - regioncx: &RegionInferenceContext, - closure_region_requirements: &Option, + regioncx: &RegionInferenceContext<'_>, + closure_region_requirements: &Option>, ) { if !mir_util::dump_enabled(infcx.tcx, "nll", source) { return; @@ -240,13 +229,14 @@ fn dump_mir_results<'a, 'gcx, 'tcx>( // Before the CFG, dump out the values for each region variable. PassWhere::BeforeCFG => { regioncx.dump_mir(out)?; + writeln!(out, "|")?; if let Some(closure_region_requirements) = closure_region_requirements { - writeln!(out, "|")?; writeln!(out, "| Free Region Constraints")?; for_each_region_constraint(closure_region_requirements, &mut |msg| { writeln!(out, "| {}", msg) })?; + writeln!(out, "|")?; } } @@ -263,14 +253,14 @@ fn dump_mir_results<'a, 'gcx, 'tcx>( ); // Also dump the inference graph constraints as a graphviz file. - let _: io::Result<()> = try_block! { + let _: io::Result<()> = try { let mut file = pretty::create_dump_file(infcx.tcx, "regioncx.all.dot", None, "nll", &0, source)?; regioncx.dump_graphviz_raw_constraints(&mut file)?; }; // Also dump the inference graph constraints as a graphviz file. - let _: io::Result<()> = try_block! { + let _: io::Result<()> = try { let mut file = pretty::create_dump_file(infcx.tcx, "regioncx.scc.dot", None, "nll", &0, source)?; regioncx.dump_graphviz_scc_constraints(&mut file)?; @@ -282,7 +272,7 @@ fn dump_annotation<'a, 'gcx, 'tcx>( mir: &Mir<'tcx>, mir_def_id: DefId, regioncx: &RegionInferenceContext<'tcx>, - closure_region_requirements: &Option, + closure_region_requirements: &Option>, errors_buffer: &mut Vec, ) { let tcx = infcx.tcx; @@ -331,7 +321,7 @@ fn dump_annotation<'a, 'gcx, 'tcx>( } fn for_each_region_constraint( - closure_region_requirements: &ClosureRegionRequirements, + closure_region_requirements: &ClosureRegionRequirements<'_>, with_msg: &mut dyn FnMut(&str) -> io::Result<()>, ) -> io::Result<()> { for req in &closure_region_requirements.outlives_requirements { diff --git a/src/librustc_mir/borrow_check/nll/region_infer/dump_mir.rs b/src/librustc_mir/borrow_check/nll/region_infer/dump_mir.rs index 268a37c708681..419ee73b28ad3 100644 --- a/src/librustc_mir/borrow_check/nll/region_infer/dump_mir.rs +++ b/src/librustc_mir/borrow_check/nll/region_infer/dump_mir.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! As part of generating the regions, if you enable `-Zdump-mir=nll`, //! we will generate an annotated copy of the MIR that includes the //! state of region inference. This code handles emitting the region @@ -67,7 +57,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { } /// Debugging aid: Invokes the `with_msg` callback repeatedly with - /// our internal region constraints. These are dumped into the + /// our internal region constraints. These are dumped into the /// -Zdump-mir file so that we can figure out why the region /// inference resulted in the values that it did when debugging. fn for_each_constraint( diff --git a/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/mod.rs b/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/mod.rs index 32aaa0590d2f9..917e383cae827 100644 --- a/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/mod.rs +++ b/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/mod.rs @@ -1,18 +1,9 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::nll::constraints::OutlivesConstraint; -use borrow_check::nll::region_infer::RegionInferenceContext; -use borrow_check::nll::type_check::Locations; -use borrow_check::nll::universal_regions::DefiningTy; -use borrow_check::nll::ConstraintDescription; +use crate::borrow_check::nll::constraints::OutlivesConstraint; +use crate::borrow_check::nll::region_infer::RegionInferenceContext; +use crate::borrow_check::nll::type_check::Locations; +use crate::borrow_check::nll::universal_regions::DefiningTy; +use crate::borrow_check::nll::ConstraintDescription; +use crate::util::borrowck_errors::{BorrowckErrors, Origin}; use rustc::hir::def_id::DefId; use rustc::infer::error_reporting::nice_region_error::NiceRegionError; use rustc::infer::InferCtxt; @@ -25,7 +16,6 @@ use std::collections::VecDeque; use syntax::errors::Applicability; use syntax::symbol::keywords; use syntax_pos::Span; -use util::borrowck_errors::{BorrowckErrors, Origin}; mod region_name; mod var_name; @@ -142,6 +132,15 @@ impl<'tcx> RegionInferenceContext<'tcx> { } }); if let Some(i) = best_choice { + if let Some(next) = categorized_path.get(i + 1) { + if categorized_path[i].0 == ConstraintCategory::Return + && next.0 == ConstraintCategory::OpaqueType + { + // The return expression is being influenced by the return type being + // impl Trait, point at the return type and not the return expr. + return *next; + } + } return categorized_path[i]; } @@ -215,7 +214,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { for constraint in self.constraint_graph .outgoing_edges(r, &self.constraints, fr_static) { - assert_eq!(constraint.sup, r); + debug_assert_eq!(constraint.sup, r); let sub_region = constraint.sub; if let Trace::NotVisited = context[sub_region] { context[sub_region] = Trace::FromOutlivesConstraint(constraint); @@ -250,11 +249,13 @@ impl<'tcx> RegionInferenceContext<'tcx> { self.provides_universal_region(r, fr, outlived_fr) }); + debug!("report_error: category={:?} {:?}", category, span); // Check if we can use one of the "nice region errors". if let (Some(f), Some(o)) = (self.to_error_region(fr), self.to_error_region(outlived_fr)) { let tables = infcx.tcx.typeck_tables_of(mir_def_id); - let nice = NiceRegionError::new_from_span(infcx.tcx, span, o, f, Some(tables)); - if let Some(_error_reported) = nice.try_report_from_nll() { + let nice = NiceRegionError::new_from_span(infcx, span, o, f, Some(tables)); + if let Some(diag) = nice.try_report_from_nll() { + diag.buffer(errors_buffer); return; } } @@ -515,7 +516,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { ) { let mut diag = infcx.tcx.sess.struct_span_err( span, - "unsatisfied lifetime constraints", // FIXME + "lifetime may not live long enough" ); let counter = &mut 1; @@ -582,7 +583,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { (self.to_error_region(fr), self.to_error_region(outlived_fr)) { if let Some(ty::TyS { - sty: ty::TyKind::Opaque(did, substs), + sty: ty::Opaque(did, substs), .. }) = infcx .tcx @@ -636,7 +637,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { "'_".to_string() }; - diag.span_suggestion_with_applicability( + diag.span_suggestion( span, &format!( "to allow this impl Trait to capture borrowed data with lifetime \ @@ -748,8 +749,8 @@ impl<'tcx> RegionInferenceContext<'tcx> { } /// If `r2` represents a placeholder region, then this returns - /// true if `r1` cannot name that placeholder in its - /// value. Otherwise, returns false. + /// `true` if `r1` cannot name that placeholder in its + /// value; otherwise, returns `false`. fn cannot_name_placeholder(&self, r1: RegionVid, r2: RegionVid) -> bool { debug!("cannot_name_value_of(r1={:?}, r2={:?})", r1, r2); diff --git a/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/region_name.rs b/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/region_name.rs index a41d27e8ff1a6..362214d325712 100644 --- a/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/region_name.rs +++ b/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/region_name.rs @@ -1,26 +1,16 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::fmt::{self, Display}; -use borrow_check::nll::region_infer::RegionInferenceContext; -use borrow_check::nll::universal_regions::DefiningTy; -use borrow_check::nll::ToRegionVid; +use crate::borrow_check::nll::region_infer::RegionInferenceContext; +use crate::borrow_check::nll::universal_regions::DefiningTy; +use crate::borrow_check::nll::ToRegionVid; use rustc::hir; use rustc::hir::def_id::DefId; use rustc::infer::InferCtxt; use rustc::mir::Mir; -use rustc::ty::subst::{Substs, UnpackedKind}; +use rustc::ty::subst::{SubstsRef, UnpackedKind}; use rustc::ty::{self, RegionKind, RegionVid, Ty, TyCtxt}; -use rustc::util::ppaux::with_highlight_region_for_regionvid; +use rustc::ty::print::RegionHighlightMode; use rustc_errors::DiagnosticBuilder; -use syntax::ast::{Name, DUMMY_NODE_ID}; +use syntax::ast::Name; use syntax::symbol::keywords; use syntax_pos::Span; use syntax_pos::symbol::InternedString; @@ -119,7 +109,7 @@ impl RegionName { } impl Display for RegionName { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.name) } } @@ -183,7 +173,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { value } - /// Check for the case where `fr` maps to something that the + /// Checks for the case where `fr` maps to something that the /// *user* has a name for. In that case, we'll be able to map /// `fr` to a `Region<'tcx>`, and that region will be one of /// named variants. @@ -282,7 +272,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { } } - /// Get a span of a named region to provide context for error messages that + /// Gets a span of a named region to provide context for error messages that /// mention that span, for example: /// /// ``` @@ -303,9 +293,9 @@ impl<'tcx> RegionInferenceContext<'tcx> { name: &InternedString, ) -> Span { let scope = error_region.free_region_binding_scope(tcx); - let node = tcx.hir().as_local_node_id(scope).unwrap_or(DUMMY_NODE_ID); + let node = tcx.hir().as_local_hir_id(scope).unwrap_or(hir::DUMMY_HIR_ID); - let span = tcx.sess.source_map().def_span(tcx.hir().span(node)); + let span = tcx.sess.source_map().def_span(tcx.hir().span_by_hir_id(node)); if let Some(param) = tcx.hir() .get_generics(scope) .and_then(|generics| generics.get_named(name)) @@ -316,7 +306,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { } } - /// Find an argument that contains `fr` and label it with a fully + /// Finds an argument that contains `fr` and label it with a fully /// elaborated type, returning something like `'1`. Result looks /// like: /// @@ -406,9 +396,9 @@ impl<'tcx> RegionInferenceContext<'tcx> { argument_ty: Ty<'tcx>, counter: &mut usize, ) -> Option { - let type_name = with_highlight_region_for_regionvid(needle_fr, *counter, || { - infcx.extract_type_name(&argument_ty) - }); + let mut highlight = RegionHighlightMode::default(); + highlight.highlighting_region_vid(needle_fr, *counter); + let type_name = infcx.extract_type_name(&argument_ty, Some(highlight)); debug!( "give_name_if_we_cannot_match_hir_ty: type_name={:?} needle_fr={:?}", @@ -438,7 +428,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { /// to. For example, we might produce an annotation like this: /// /// ``` - /// | fn a(items: &[T]) -> Box> { + /// | fn a(items: &[T]) -> Box> { /// | - let's call the lifetime of this reference `'1` /// ``` /// @@ -447,7 +437,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { /// `argument_hir_ty`, a `hir::Ty` (the syntax of the type /// annotation). We are descending through the types stepwise, /// looking in to find the region `needle_fr` in the internal - /// type. Once we find that, we can use the span of the `hir::Ty` + /// type. Once we find that, we can use the span of the `hir::Ty` /// to add the highlight. /// /// This is a somewhat imperfect process, so long the way we also @@ -551,7 +541,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { /// types+hir to search through). fn match_adt_and_segment<'hir>( &self, - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, needle_fr: RegionVid, last_segment: &'hir hir::PathSegment, counter: &mut usize, @@ -597,7 +587,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { /// `search_stack` the types+hir to search through. fn try_match_adt_and_generic_args<'hir>( &self, - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, needle_fr: RegionVid, args: &'hir hir::GenericArgs, search_stack: &mut Vec<(Ty<'tcx>, &'hir hir::Ty)>, @@ -614,7 +604,14 @@ impl<'tcx> RegionInferenceContext<'tcx> { search_stack.push((ty, hir_ty)); } - (UnpackedKind::Lifetime(_), _) | (UnpackedKind::Type(_), _) => { + (UnpackedKind::Const(_ct), hir::GenericArg::Const(_hir_ct)) => { + // Lifetimes cannot be found in consts, so we don't need + // to search anything here. + } + + (UnpackedKind::Lifetime(_), _) + | (UnpackedKind::Type(_), _) + | (UnpackedKind::Const(_), _) => { // I *think* that HIR lowering should ensure this // doesn't happen, even in erroneous // programs. Else we should use delay-span-bug. @@ -631,7 +628,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { None } - /// Find a closure upvar that contains `fr` and label it with a + /// Finds a closure upvar that contains `fr` and label it with a /// fully elaborated type, returning something like `'1`. Result /// looks like: /// @@ -657,7 +654,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { }) } - /// Check for arguments appearing in the (closure) return type. It + /// Checks for arguments appearing in the (closure) return type. It /// must be a closure since, in a free fn, such an argument would /// have to either also appear in an argument (if using elision) /// or be early bound (named, not in argument). @@ -683,17 +680,21 @@ impl<'tcx> RegionInferenceContext<'tcx> { return None; } - let type_name = with_highlight_region_for_regionvid( - fr, *counter, || infcx.extract_type_name(&return_ty)); + let mut highlight = RegionHighlightMode::default(); + highlight.highlighting_region_vid(fr, *counter); + let type_name = infcx.extract_type_name(&return_ty, Some(highlight)); let mir_node_id = tcx.hir().as_local_node_id(mir_def_id).expect("non-local mir"); let (return_span, mir_description) = match tcx.hir().get(mir_node_id) { hir::Node::Expr(hir::Expr { - node: hir::ExprKind::Closure(_, _, _, span, gen_move), + node: hir::ExprKind::Closure(_, return_ty, _, span, gen_move), .. }) => ( - tcx.sess.source_map().end_point(*span), + match return_ty.output { + hir::FunctionRetTy::DefaultReturn(_) => tcx.sess.source_map().end_point(*span), + hir::FunctionRetTy::Return(_) => return_ty.output.span(), + }, if gen_move.is_some() { " of generator" } else { @@ -720,7 +721,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { }) } - /// Create a synthetic region named `'1`, incrementing the + /// Creates a synthetic region named `'1`, incrementing the /// counter. fn synthesize_region_name(&self, counter: &mut usize) -> InternedString { let c = *counter; diff --git a/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/var_name.rs b/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/var_name.rs index 0c0504b7b316d..f6bbaf2db0383 100644 --- a/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/var_name.rs +++ b/src/librustc_mir/borrow_check/nll/region_infer/error_reporting/var_name.rs @@ -1,15 +1,5 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::nll::region_infer::RegionInferenceContext; -use borrow_check::nll::ToRegionVid; +use crate::borrow_check::nll::region_infer::RegionInferenceContext; +use crate::borrow_check::nll::ToRegionVid; use rustc::mir::{Local, Mir}; use rustc::ty::{RegionVid, TyCtxt}; use rustc_data_structures::indexed_vec::Idx; @@ -81,11 +71,10 @@ impl<'tcx> RegionInferenceContext<'tcx> { upvar_index: usize, ) -> (Symbol, Span) { let upvar_hir_id = mir.upvar_decls[upvar_index].var_hir_id.assert_crate_local(); - let upvar_node_id = tcx.hir().hir_to_node_id(upvar_hir_id); - debug!("get_upvar_name_and_span_for_region: upvar_node_id={:?}", upvar_node_id); + debug!("get_upvar_name_and_span_for_region: upvar_hir_id={:?}", upvar_hir_id); - let upvar_name = tcx.hir().name(upvar_node_id); - let upvar_span = tcx.hir().span(upvar_node_id); + let upvar_name = tcx.hir().name_by_hir_id(upvar_hir_id); + let upvar_span = tcx.hir().span_by_hir_id(upvar_hir_id); debug!("get_upvar_name_and_span_for_region: upvar_name={:?} upvar_span={:?}", upvar_name, upvar_span); diff --git a/src/librustc_mir/borrow_check/nll/region_infer/graphviz.rs b/src/librustc_mir/borrow_check/nll/region_infer/graphviz.rs index e2e19a85bec87..cffc66ac7ddfd 100644 --- a/src/librustc_mir/borrow_check/nll/region_infer/graphviz.rs +++ b/src/librustc_mir/borrow_check/nll/region_infer/graphviz.rs @@ -1,20 +1,9 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This module provides linkage between RegionInferenceContext and //! libgraphviz traits, specialized to attaching borrowck analysis //! data to rendered labels. use super::*; -use borrow_check::nll::constraints::OutlivesConstraint; -use dot; +use crate::borrow_check::nll::constraints::OutlivesConstraint; use std::borrow::Cow; use std::io::{self, Write}; diff --git a/src/librustc_mir/borrow_check/nll/region_infer/mod.rs b/src/librustc_mir/borrow_check/nll/region_infer/mod.rs index cbd1e666284cf..ac10683598aa7 100644 --- a/src/librustc_mir/borrow_check/nll/region_infer/mod.rs +++ b/src/librustc_mir/borrow_check/nll/region_infer/mod.rs @@ -1,19 +1,11 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::universal_regions::UniversalRegions; -use borrow_check::nll::constraints::graph::NormalConstraintGraph; -use borrow_check::nll::constraints::{ConstraintSccIndex, ConstraintSet, OutlivesConstraint}; -use borrow_check::nll::region_infer::values::{PlaceholderIndices, RegionElement, ToElementIndex}; -use borrow_check::nll::type_check::free_region_relations::UniversalRegionRelations; -use borrow_check::nll::type_check::Locations; +use crate::borrow_check::nll::constraints::graph::NormalConstraintGraph; +use crate::borrow_check::nll::constraints::{ConstraintSccIndex, ConstraintSet, OutlivesConstraint}; +use crate::borrow_check::nll::region_infer::values::{ + PlaceholderIndices, RegionElement, ToElementIndex +}; +use crate::borrow_check::nll::type_check::free_region_relations::UniversalRegionRelations; +use crate::borrow_check::nll::type_check::Locations; use rustc::hir::def_id::DefId; use rustc::infer::canonical::QueryRegionConstraint; use rustc::infer::region_constraints::{GenericKind, VarInfos, VerifyBound}; @@ -22,8 +14,8 @@ use rustc::mir::{ ClosureOutlivesRequirement, ClosureOutlivesSubject, ClosureRegionRequirements, ConstraintCategory, Local, Location, Mir, }; -use rustc::ty::{self, RegionVid, Ty, TyCtxt, TypeFoldable}; -use rustc::util::common; +use rustc::ty::{self, subst::SubstsRef, RegionVid, Ty, TyCtxt, TypeFoldable}; +use rustc::util::common::{self, ErrorReported}; use rustc_data_structures::bit_set::BitSet; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::graph::scc::Sccs; @@ -43,7 +35,7 @@ use self::values::{LivenessValues, RegionValueElements, RegionValues}; use super::ToRegionVid; pub struct RegionInferenceContext<'tcx> { - /// Contains the definition for every region variable. Region + /// Contains the definition for every region variable. Region /// variables are identified by their index (`RegionVid`). The /// definition contains information about where the region came /// from as well as its final inferred value. @@ -132,7 +124,7 @@ pub(crate) enum Cause { } /// A "type test" corresponds to an outlives constraint between a type -/// and a lifetime, like `T: 'x` or `::Bar: 'x`. They are +/// and a lifetime, like `T: 'x` or `::Bar: 'x`. They are /// translated from the `Verify` region constraints in the ordinary /// inference context. /// @@ -145,10 +137,10 @@ pub(crate) enum Cause { /// /// In some cases, however, there are outlives relationships that are /// not converted into a region constraint, but rather into one of -/// these "type tests". The distinction is that a type test does not +/// these "type tests". The distinction is that a type test does not /// influence the inference result, but instead just examines the /// values that we ultimately inferred for each region variable and -/// checks that they meet certain extra criteria. If not, an error +/// checks that they meet certain extra criteria. If not, an error /// can be issued. /// /// One reason for this is that these type tests typically boil down @@ -294,7 +286,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { /// Initializes the region variables for each universally /// quantified region (lifetime parameter). The first N variables /// always correspond to the regions appearing in the function - /// signature (both named and anonymous) and where clauses. This + /// signature (both named and anonymous) and where-clauses. This /// function iterates over those regions and initializes them with /// minimum values. /// @@ -376,12 +368,12 @@ impl<'tcx> RegionInferenceContext<'tcx> { self.universal_regions.to_region_vid(r) } - /// Add annotations for `#[rustc_regions]`; see `UniversalRegions::annotate`. + /// Adds annotations for `#[rustc_regions]`; see `UniversalRegions::annotate`. crate fn annotate(&self, tcx: TyCtxt<'_, '_, 'tcx>, err: &mut DiagnosticBuilder<'_>) { self.universal_regions.annotate(tcx, err) } - /// Returns true if the region `r` contains the point `p`. + /// Returns `true` if the region `r` contains the point `p`. /// /// Panics if called before `solve()` executes, crate fn region_contains(&self, r: impl ToRegionVid, p: impl ToElementIndex) -> bool { @@ -401,7 +393,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { self.scc_universes[scc] } - /// Perform region inference and report errors if we see any + /// Performs region inference and report errors if we see any /// unsatisfiable constraints. If this is a closure, returns the /// region requirements to propagate to our creator, if any. pub(super) fn solve<'gcx>( @@ -411,8 +403,9 @@ impl<'tcx> RegionInferenceContext<'tcx> { mir_def_id: DefId, errors_buffer: &mut Vec, ) -> Option> { - common::time( - infcx.tcx.sess, + common::time_ext( + infcx.tcx.sess.time_extended(), + Some(infcx.tcx.sess), &format!("solve_nll_region_constraints({:?})", mir_def_id), || self.solve_inner(infcx, mir, mir_def_id, errors_buffer), ) @@ -541,7 +534,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { ); } - /// True if all the elements in the value of `scc_b` are nameable + /// Returns `true` if all the elements in the value of `scc_b` are nameable /// in `scc_a`. Used during constraint propagation, and only once /// the value of `scc_b` has been computed. fn universe_compatible(&self, scc_b: ConstraintSccIndex, scc_a: ConstraintSccIndex) -> bool { @@ -771,20 +764,26 @@ impl<'tcx> RegionInferenceContext<'tcx> { debug!("try_promote_type_test: ur={:?}", ur); - let non_local_ub = self.universal_region_relations.non_local_upper_bound(ur); + let non_local_ub = self.universal_region_relations.non_local_upper_bounds(&ur); debug!("try_promote_type_test: non_local_ub={:?}", non_local_ub); - assert!(self.universal_regions.is_universal_region(non_local_ub)); - assert!(!self.universal_regions.is_local_free_region(non_local_ub)); - - let requirement = ClosureOutlivesRequirement { - subject, - outlived_free_region: non_local_ub, - blame_span: locations.span(mir), - category: ConstraintCategory::Boring, - }; - debug!("try_promote_type_test: pushing {:#?}", requirement); - propagated_outlives_requirements.push(requirement); + // This is slightly too conservative. To show T: '1, given `'2: '1` + // and `'3: '1` we only need to prove that T: '2 *or* T: '3, but to + // avoid potential non-determinism we approximate this by requiring + // T: '1 and T: '2. + for &upper_bound in non_local_ub { + debug_assert!(self.universal_regions.is_universal_region(upper_bound)); + debug_assert!(!self.universal_regions.is_local_free_region(upper_bound)); + + let requirement = ClosureOutlivesRequirement { + subject, + outlived_free_region: upper_bound, + blame_span: locations.span(mir), + category: ConstraintCategory::Boring, + }; + debug!("try_promote_type_test: pushing {:#?}", requirement); + propagated_outlives_requirements.push(requirement); + } } true } @@ -936,8 +935,8 @@ impl<'tcx> RegionInferenceContext<'tcx> { lub } - /// Test if `test` is true when applied to `lower_bound` at - /// `point`, and returns true or false. + /// Tests if `test` is true when applied to `lower_bound` at + /// `point`. fn eval_verify_bound( &self, tcx: TyCtxt<'_, '_, 'tcx>, @@ -998,7 +997,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { /// different results. (For example, there might be two regions /// with the same value that are not in the same SCC). /// - /// NB. This is not an ideal approach and I would like to revisit + /// N.B., this is not an ideal approach and I would like to revisit /// it. However, it works pretty well in practice. In particular, /// this is needed to deal with projection outlives bounds like /// @@ -1006,7 +1005,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { /// /// In particular, this routine winds up being important when /// there are bounds like `where >::Item: 'b` in the - /// environment. In this case, if we can show that `'0 == 'a`, + /// environment. In this case, if we can show that `'0 == 'a`, /// and that `'b: '1`, then we know that the clause is /// satisfied. In such cases, particularly due to limitations of /// the trait solver =), we usually wind up with a where-clause like @@ -1085,7 +1084,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { /// Once regions have been propagated, this method is used to see /// whether any of the constraints were too strong. In particular, /// we want to check for a case where a universally quantified - /// region exceeded its bounds. Consider: + /// region exceeded its bounds. Consider: /// /// fn foo<'a, 'b>(x: &'a u32) -> &'b u32 { x } /// @@ -1134,7 +1133,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { } } - /// Check the final value for the free region `fr` to see if it + /// Checks the final value for the free region `fr` to see if it /// grew too large. In particular, examine what `end(X)` points /// wound up in `fr`'s final value; for each `end(X)` where `X != /// fr`, we want to check that `fr: X`. If not, that's either an @@ -1165,63 +1164,109 @@ impl<'tcx> RegionInferenceContext<'tcx> { .is_none() ); + // Only check all of the relations for the main representative of each + // SCC, otherwise just check that we outlive said representative. This + // reduces the number of redundant relations propagated out of + // closures. + // Note that the representative will be a universal region if there is + // one in this SCC, so we will always check the representative here. + let representative = self.scc_representatives[longer_fr_scc]; + if representative != longer_fr { + self.check_universal_region_relation( + longer_fr, + representative, + infcx, + mir, + mir_def_id, + propagated_outlives_requirements, + errors_buffer, + ); + return; + } + // Find every region `o` such that `fr: o` // (because `fr` includes `end(o)`). for shorter_fr in self.scc_values.universal_regions_outlived_by(longer_fr_scc) { - // If it is known that `fr: o`, carry on. - if self.universal_region_relations - .outlives(longer_fr, shorter_fr) - { - continue; + if let Some(ErrorReported) = self.check_universal_region_relation( + longer_fr, + shorter_fr, + infcx, + mir, + mir_def_id, + propagated_outlives_requirements, + errors_buffer, + ) { + // continuing to iterate just reports more errors than necessary + return; } + } + } - debug!( - "check_universal_region: fr={:?} does not outlive shorter_fr={:?}", - longer_fr, shorter_fr, - ); + fn check_universal_region_relation( + &self, + longer_fr: RegionVid, + shorter_fr: RegionVid, + infcx: &InferCtxt<'_, 'gcx, 'tcx>, + mir: &Mir<'tcx>, + mir_def_id: DefId, + propagated_outlives_requirements: &mut Option<&mut Vec>>, + errors_buffer: &mut Vec, + ) -> Option { + // If it is known that `fr: o`, carry on. + if self.universal_region_relations + .outlives(longer_fr, shorter_fr) + { + return None; + } - let blame_span_category = self.find_outlives_blame_span(mir, longer_fr, shorter_fr); - - if let Some(propagated_outlives_requirements) = propagated_outlives_requirements { - // Shrink `fr` until we find a non-local region (if we do). - // We'll call that `fr-` -- it's ever so slightly smaller than `fr`. - if let Some(fr_minus) = self.universal_region_relations - .non_local_lower_bound(longer_fr) - { - debug!("check_universal_region: fr_minus={:?}", fr_minus); - - // Grow `shorter_fr` until we find a non-local - // region. (We always will.) We'll call that - // `shorter_fr+` -- it's ever so slightly larger than - // `fr`. - let shorter_fr_plus = self.universal_region_relations - .non_local_upper_bound(shorter_fr); - debug!( - "check_universal_region: shorter_fr_plus={:?}", - shorter_fr_plus - ); + debug!( + "check_universal_region_relation: fr={:?} does not outlive shorter_fr={:?}", + longer_fr, shorter_fr, + ); + + if let Some(propagated_outlives_requirements) = propagated_outlives_requirements { + // Shrink `longer_fr` until we find a non-local region (if we do). + // We'll call it `fr-` -- it's ever so slightly smaller than + // `longer_fr`. + if let Some(fr_minus) = self + .universal_region_relations + .non_local_lower_bound(longer_fr) + { + debug!("check_universal_region: fr_minus={:?}", fr_minus); + + let blame_span_category = self.find_outlives_blame_span(mir, longer_fr, shorter_fr); + + // Grow `shorter_fr` until we find some non-local regions. (We + // always will.) We'll call them `shorter_fr+` -- they're ever + // so slightly larger than `shorter_fr`. + let shorter_fr_plus = self.universal_region_relations + .non_local_upper_bounds(&shorter_fr); + debug!( + "check_universal_region: shorter_fr_plus={:?}", + shorter_fr_plus + ); + for &&fr in &shorter_fr_plus { // Push the constraint `fr-: shorter_fr+` propagated_outlives_requirements.push(ClosureOutlivesRequirement { subject: ClosureOutlivesSubject::Region(fr_minus), - outlived_free_region: shorter_fr_plus, + outlived_free_region: fr, blame_span: blame_span_category.1, category: blame_span_category.0, }); - continue; } + return None; } - - // If we are not in a context where we can propagate - // errors, or we could not shrink `fr` to something - // smaller, then just report an error. - // - // Note: in this case, we use the unapproximated regions - // to report the error. This gives better error messages - // in some cases. - self.report_error(mir, infcx, mir_def_id, longer_fr, shorter_fr, errors_buffer); - return; // continuing to iterate just reports more errors than necessary } + + // If we are not in a context where we can't propagate errors, or we + // could not shrink `fr` to something smaller, then just report an + // error. + // + // Note: in this case, we use the unapproximated regions to report the + // error. This gives better error messages in some cases. + self.report_error(mir, infcx, mir_def_id, longer_fr, shorter_fr, errors_buffer); + Some(ErrorReported) } fn check_bound_universal_region<'gcx>( @@ -1313,9 +1358,8 @@ pub trait ClosureRegionRequirementsExt<'gcx, 'tcx> { fn apply_requirements( &self, tcx: TyCtxt<'_, 'gcx, 'tcx>, - location: Location, closure_def_id: DefId, - closure_substs: &'tcx ty::subst::Substs<'tcx>, + closure_substs: SubstsRef<'tcx>, ) -> Vec>; fn subst_closure_mapping( @@ -1344,13 +1388,12 @@ impl<'gcx, 'tcx> ClosureRegionRequirementsExt<'gcx, 'tcx> for ClosureRegionRequi fn apply_requirements( &self, tcx: TyCtxt<'_, 'gcx, 'tcx>, - location: Location, closure_def_id: DefId, - closure_substs: &'tcx ty::subst::Substs<'tcx>, + closure_substs: SubstsRef<'tcx>, ) -> Vec> { debug!( - "apply_requirements(location={:?}, closure_def_id={:?}, closure_substs={:?})", - location, closure_def_id, closure_substs + "apply_requirements(closure_def_id={:?}, closure_substs={:?})", + closure_def_id, closure_substs ); // Extract the values of the free regions in `closure_substs` diff --git a/src/librustc_mir/borrow_check/nll/region_infer/values.rs b/src/librustc_mir/borrow_check/nll/region_infer/values.rs index 4f5829f34069c..2101447965a15 100644 --- a/src/librustc_mir/borrow_check/nll/region_infer/values.rs +++ b/src/librustc_mir/borrow_check/nll/region_infer/values.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::mir::{BasicBlock, Location, Mir}; use rustc::ty::{self, RegionVid}; use rustc_data_structures::bit_set::{HybridBitSet, SparseBitMatrix}; @@ -126,14 +116,14 @@ impl RegionValueElements { } } -/// A single integer representing a `Location` in the MIR control-flow -/// graph. Constructed efficiently from `RegionValueElements`. newtype_index! { + /// A single integer representing a `Location` in the MIR control-flow + /// graph. Constructed efficiently from `RegionValueElements`. pub struct PointIndex { DEBUG_FORMAT = "PointIndex({})" } } -/// A single integer representing a `ty::Placeholder`. newtype_index! { + /// A single integer representing a `ty::Placeholder`. pub struct PlaceholderIndex { DEBUG_FORMAT = "PlaceholderIndex({})" } } @@ -164,10 +154,10 @@ impl LivenessValues { /// Creates a new set of "region values" that tracks causal information. /// Each of the regions in num_region_variables will be initialized with an /// empty set of points and no causal information. - crate fn new(elements: &Rc) -> Self { + crate fn new(elements: Rc) -> Self { Self { - elements: elements.clone(), points: SparseBitMatrix::new(elements.num_points), + elements: elements, } } @@ -176,7 +166,7 @@ impl LivenessValues { self.points.rows() } - /// Adds the given element to the value for the given region. Returns true if + /// Adds the given element to the value for the given region. Returns whether /// the element is newly added (i.e., was not already present). crate fn add_element(&mut self, row: N, location: Location) -> bool { debug!("LivenessValues::add(r={:?}, location={:?})", row, location); @@ -185,7 +175,7 @@ impl LivenessValues { } /// Adds all the elements in the given bit array into the given - /// region. Returns true if any of them are newly added. + /// region. Returns whether any of them are newly added. crate fn add_elements(&mut self, row: N, locations: &HybridBitSet) -> bool { debug!( "LivenessValues::add_elements(row={:?}, locations={:?})", @@ -199,7 +189,7 @@ impl LivenessValues { self.points.insert_all_into_row(row); } - /// True if the region `r` contains the given element. + /// Returns `true` if the region `r` contains the given element. crate fn contains(&self, row: N, location: Location) -> bool { let index = self.elements.point_from_location(location); self.points.contains(row, index) @@ -301,7 +291,7 @@ impl RegionValues { } } - /// Adds the given element to the value for the given region. Returns true if + /// Adds the given element to the value for the given region. Returns whether /// the element is newly added (i.e., was not already present). crate fn add_element(&mut self, r: N, elem: impl ToElementIndex) -> bool { debug!("add(r={:?}, elem={:?})", r, elem); @@ -313,7 +303,7 @@ impl RegionValues { self.points.insert_all_into_row(r); } - /// Add all elements in `r_from` to `r_to` (because e.g., `r_to: + /// Adds all elements in `r_from` to `r_to` (because e.g., `r_to: /// r_from`). crate fn add_region(&mut self, r_to: N, r_from: N) -> bool { self.points.union_rows(r_from, r_to) @@ -321,7 +311,7 @@ impl RegionValues { | self.placeholders.union_rows(r_from, r_to) } - /// True if the region `r` contains the given element. + /// Returns `true` if the region `r` contains the given element. crate fn contains(&self, r: N, elem: impl ToElementIndex) -> bool { elem.contained_in_row(self, r) } @@ -335,7 +325,7 @@ impl RegionValues { } } - /// True if `sup_region` contains all the CFG points that + /// Returns `true` if `sup_region` contains all the CFG points that /// `sub_region` contains. Ignores universal regions. crate fn contains_points(&self, sup_region: N, sub_region: N) -> bool { if let Some(sub_row) = self.points.row(sub_region) { diff --git a/src/librustc_mir/borrow_check/nll/renumber.rs b/src/librustc_mir/borrow_check/nll/renumber.rs index e9f749ac092d2..58e567c39a9bb 100644 --- a/src/librustc_mir/borrow_check/nll/renumber.rs +++ b/src/librustc_mir/borrow_check/nll/renumber.rs @@ -1,16 +1,6 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::ty::subst::Substs; +use rustc::ty::subst::SubstsRef; use rustc::ty::{self, ClosureSubsts, GeneratorSubsts, Ty, TypeFoldable}; -use rustc::mir::{Location, Mir, UserTypeAnnotation}; +use rustc::mir::{Location, Mir}; use rustc::mir::visit::{MutVisitor, TyContext}; use rustc::infer::{InferCtxt, NLLRegionVariableOrigin}; @@ -57,6 +47,14 @@ impl<'a, 'gcx, 'tcx> NLLVisitor<'a, 'gcx, 'tcx> { } impl<'a, 'gcx, 'tcx> MutVisitor<'tcx> for NLLVisitor<'a, 'gcx, 'tcx> { + fn visit_mir(&mut self, mir: &mut Mir<'tcx>) { + for promoted in mir.promoted.iter_mut() { + self.visit_mir(promoted); + } + + self.super_mir(mir); + } + fn visit_ty(&mut self, ty: &mut Ty<'tcx>, ty_context: TyContext) { debug!("visit_ty(ty={:?}, ty_context={:?})", ty, ty_context); @@ -65,15 +63,7 @@ impl<'a, 'gcx, 'tcx> MutVisitor<'tcx> for NLLVisitor<'a, 'gcx, 'tcx> { debug!("visit_ty: ty={:?}", ty); } - fn visit_user_type_annotation(&mut self, _ty: &mut UserTypeAnnotation<'tcx>) { - // User type annotations represent the types that the user - // wrote in the progarm. We don't want to erase the regions - // from these types: rather, we want to add them as - // constraints at type-check time. - debug!("visit_user_type_annotation: skipping renumber"); - } - - fn visit_substs(&mut self, substs: &mut &'tcx Substs<'tcx>, location: Location) { + fn visit_substs(&mut self, substs: &mut SubstsRef<'tcx>, location: Location) { debug!("visit_substs(substs={:?}, location={:?})", substs, location); *substs = self.renumber_regions(&{ *substs }); diff --git a/src/librustc_mir/borrow_check/nll/type_check/constraint_conversion.rs b/src/librustc_mir/borrow_check/nll/type_check/constraint_conversion.rs index 35ec478143546..bef159e996b87 100644 --- a/src/librustc_mir/borrow_check/nll/type_check/constraint_conversion.rs +++ b/src/librustc_mir/borrow_check/nll/type_check/constraint_conversion.rs @@ -1,18 +1,8 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::nll::constraints::OutlivesConstraint; -use borrow_check::nll::region_infer::TypeTest; -use borrow_check::nll::type_check::{Locations, MirTypeckRegionConstraints}; -use borrow_check::nll::universal_regions::UniversalRegions; -use borrow_check::nll::ToRegionVid; +use crate::borrow_check::nll::constraints::OutlivesConstraint; +use crate::borrow_check::nll::region_infer::TypeTest; +use crate::borrow_check::nll::type_check::{Locations, MirTypeckRegionConstraints}; +use crate::borrow_check::nll::universal_regions::UniversalRegions; +use crate::borrow_check::nll::ToRegionVid; use rustc::infer::canonical::QueryRegionConstraint; use rustc::infer::outlives::env::RegionBoundPairs; use rustc::infer::outlives::obligations::{TypeOutlives, TypeOutlivesDelegate}; @@ -109,6 +99,11 @@ impl<'a, 'gcx, 'tcx> ConstraintConversion<'a, 'gcx, 'tcx> { param_env, ).type_must_outlive(origin, t1, r2); } + + UnpackedKind::Const(_) => { + // Consts cannot outlive one another, so we + // don't need to handle any relations here. + } } } diff --git a/src/librustc_mir/borrow_check/nll/type_check/free_region_relations.rs b/src/librustc_mir/borrow_check/nll/type_check/free_region_relations.rs index 3d0f3d9fc7d8d..3b663ef6dad61 100644 --- a/src/librustc_mir/borrow_check/nll/type_check/free_region_relations.rs +++ b/src/librustc_mir/borrow_check/nll/type_check/free_region_relations.rs @@ -1,17 +1,7 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::nll::type_check::constraint_conversion; -use borrow_check::nll::type_check::{Locations, MirTypeckRegionConstraints}; -use borrow_check::nll::universal_regions::UniversalRegions; -use borrow_check::nll::ToRegionVid; +use crate::borrow_check::nll::type_check::constraint_conversion; +use crate::borrow_check::nll::type_check::{Locations, MirTypeckRegionConstraints}; +use crate::borrow_check::nll::universal_regions::UniversalRegions; +use crate::borrow_check::nll::ToRegionVid; use rustc::infer::canonical::QueryRegionConstraint; use rustc::infer::outlives::free_region_map::FreeRegionRelations; use rustc::infer::region_constraints::GenericKind; @@ -29,7 +19,7 @@ crate struct UniversalRegionRelations<'tcx> { universal_regions: Rc>, /// Stores the outlives relations that are known to hold from the - /// implied bounds, in-scope where clauses, and that sort of + /// implied bounds, in-scope where-clauses, and that sort of /// thing. outlives: TransitiveRelation, @@ -45,7 +35,7 @@ crate struct UniversalRegionRelations<'tcx> { /// added via implicit bounds. /// /// Each region here is guaranteed to be a key in the `indices` -/// map. We use the "original" regions (i.e., the keys from the +/// map. We use the "original" regions (i.e., the keys from the /// map, and not the values) because the code in /// `process_registered_region_obligations` has some special-cased /// logic expecting to see (e.g.) `ReStatic`, and if we supplied @@ -54,7 +44,7 @@ type RegionBoundPairs<'tcx> = Vec<(ty::Region<'tcx>, GenericKind<'tcx>)>; /// As part of computing the free region relations, we also have to /// normalize the input-output types, which we then need later. So we -/// return those. This vector consists of first the input types and +/// return those. This vector consists of first the input types and /// then the output type as the last element. type NormalizedInputsAndOutput<'tcx> = Vec>; @@ -115,44 +105,89 @@ impl UniversalRegionRelations<'tcx> { /// Finds an "upper bound" for `fr` that is not local. In other /// words, returns the smallest (*) known region `fr1` that (a) - /// outlives `fr` and (b) is not local. This cannot fail, because - /// we will always find `'static` at worst. + /// outlives `fr` and (b) is not local. /// - /// (*) If there are multiple competing choices, we pick the "postdominating" - /// one. See `TransitiveRelation::postdom_upper_bound` for details. - crate fn non_local_upper_bound(&self, fr: RegionVid) -> RegionVid { + /// (*) If there are multiple competing choices, we return all of them. + crate fn non_local_upper_bounds(&'a self, fr: &'a RegionVid) -> Vec<&'a RegionVid> { debug!("non_local_upper_bound(fr={:?})", fr); - self.non_local_bound(&self.inverse_outlives, fr) + let res = self.non_local_bounds(&self.inverse_outlives, fr); + assert!(!res.is_empty(), "can't find an upper bound!?"); + res + } + + /// Returns the "postdominating" bound of the set of + /// `non_local_upper_bounds` for the given region. + crate fn non_local_upper_bound(&self, fr: RegionVid) -> RegionVid { + let upper_bounds = self.non_local_upper_bounds(&fr); + + // In case we find more than one, reduce to one for + // convenience. This is to prevent us from generating more + // complex constraints, but it will cause spurious errors. + let post_dom = self + .inverse_outlives + .mutual_immediate_postdominator(upper_bounds); + + debug!("non_local_bound: post_dom={:?}", post_dom); + + post_dom + .and_then(|&post_dom| { + // If the mutual immediate postdom is not local, then + // there is no non-local result we can return. + if !self.universal_regions.is_local_free_region(post_dom) { + Some(post_dom) + } else { + None + } + }) .unwrap_or(self.universal_regions.fr_static) } + /// Finds a "lower bound" for `fr` that is not local. In other /// words, returns the largest (*) known region `fr1` that (a) is - /// outlived by `fr` and (b) is not local. This cannot fail, - /// because we will always find `'static` at worst. + /// outlived by `fr` and (b) is not local. /// /// (*) If there are multiple competing choices, we pick the "postdominating" /// one. See `TransitiveRelation::postdom_upper_bound` for details. crate fn non_local_lower_bound(&self, fr: RegionVid) -> Option { debug!("non_local_lower_bound(fr={:?})", fr); - self.non_local_bound(&self.outlives, fr) + let lower_bounds = self.non_local_bounds(&self.outlives, &fr); + + // In case we find more than one, reduce to one for + // convenience. This is to prevent us from generating more + // complex constraints, but it will cause spurious errors. + let post_dom = self + .outlives + .mutual_immediate_postdominator(lower_bounds); + + debug!("non_local_bound: post_dom={:?}", post_dom); + + post_dom + .and_then(|&post_dom| { + // If the mutual immediate postdom is not local, then + // there is no non-local result we can return. + if !self.universal_regions.is_local_free_region(post_dom) { + Some(post_dom) + } else { + None + } + }) } - /// Helper for `non_local_upper_bound` and - /// `non_local_lower_bound`. Repeatedly invokes `postdom_parent` - /// until we find something that is not local. Returns None if we - /// never do so. - fn non_local_bound( + /// Helper for `non_local_upper_bounds` and `non_local_lower_bounds`. + /// Repeatedly invokes `postdom_parent` until we find something that is not + /// local. Returns `None` if we never do so. + fn non_local_bounds<'a>( &self, - relation: &TransitiveRelation, - fr0: RegionVid, - ) -> Option { + relation: &'a TransitiveRelation, + fr0: &'a RegionVid, + ) -> Vec<&'a RegionVid> { // This method assumes that `fr0` is one of the universally // quantified region variables. - assert!(self.universal_regions.is_universal_region(fr0)); + assert!(self.universal_regions.is_universal_region(*fr0)); let mut external_parents = vec![]; - let mut queue = vec![&fr0]; + let mut queue = vec![fr0]; // Keep expanding `fr` into its parents until we reach // non-local regions. @@ -167,27 +202,10 @@ impl UniversalRegionRelations<'tcx> { debug!("non_local_bound: external_parents={:?}", external_parents); - // In case we find more than one, reduce to one for - // convenience. This is to prevent us from generating more - // complex constraints, but it will cause spurious errors. - let post_dom = relation - .mutual_immediate_postdominator(external_parents) - .cloned(); - - debug!("non_local_bound: post_dom={:?}", post_dom); - - post_dom.and_then(|post_dom| { - // If the mutual immediate postdom is not local, then - // there is no non-local result we can return. - if !self.universal_regions.is_local_free_region(post_dom) { - Some(post_dom) - } else { - None - } - }) + external_parents } - /// True if fr1 is known to outlive fr2. + /// Returns `true` if fr1 is known to outlive fr2. /// /// This will only ever be true for universally quantified regions. crate fn outlives(&self, fr1: RegionVid, fr2: RegionVid) -> bool { diff --git a/src/librustc_mir/borrow_check/nll/type_check/input_output.rs b/src/librustc_mir/borrow_check/nll/type_check/input_output.rs index bb890e65b53e6..50828c294fa1b 100644 --- a/src/librustc_mir/borrow_check/nll/type_check/input_output.rs +++ b/src/librustc_mir/borrow_check/nll/type_check/input_output.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This module contains code to equate the input/output types appearing //! in the MIR with the expected input/output types from the function //! signature. This requires a bit of processing, as the expected types @@ -17,7 +7,7 @@ //! `RETURN_PLACE` the MIR arguments) are always fully normalized (and //! contain revealed `impl Trait` values). -use borrow_check::nll::universal_regions::UniversalRegions; +use crate::borrow_check::nll::universal_regions::UniversalRegions; use rustc::infer::LateBoundRegionConversionTime; use rustc::mir::*; use rustc::ty::Ty; diff --git a/src/librustc_mir/borrow_check/nll/type_check/liveness/liveness_map.rs b/src/librustc_mir/borrow_check/nll/type_check/liveness/liveness_map.rs deleted file mode 100644 index cc176cbc40392..0000000000000 --- a/src/librustc_mir/borrow_check/nll/type_check/liveness/liveness_map.rs +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! For the NLL computation, we need to compute liveness, but only for those -//! local variables whose types contain regions. The others are not of interest -//! to us. This file defines a new index type (LiveVar) that indexes into -//! a list of "variables whose type contain regions". It also defines a map from -//! Local to LiveVar and vice versa -- this map can be given to the -//! liveness code so that it only operates over variables with regions in their -//! types, instead of all variables. - -use borrow_check::nll::ToRegionVid; -use borrow_check::nll::facts::{AllFacts, AllFactsExt}; -use rustc::mir::{Local, Mir}; -use rustc::ty::{RegionVid, TyCtxt}; -use rustc_data_structures::fx::FxHashSet; -use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -use util::liveness::LiveVariableMap; - -/// Map between Local and LiveVar indices: the purpose of this -/// map is to define the subset of local variables for which we need -/// to do a liveness computation. We only need to compute whether a -/// variable `X` is live if that variable contains some region `R` in -/// its type where `R` is not known to outlive a free region (i.e., -/// where `R` may be valid for just a subset of the fn body). -crate struct NllLivenessMap { - /// For each local variable, contains `Some(i)` if liveness is - /// needed for this variable. - pub from_local: IndexVec>, - - /// For each `LiveVar`, maps back to the original `Local` index. - pub to_local: IndexVec, -} - -impl LiveVariableMap for NllLivenessMap { - fn from_local(&self, local: Local) -> Option { - self.from_local[local] - } - - type LiveVar = LiveVar; - - fn from_live_var(&self, local: Self::LiveVar) -> Local { - self.to_local[local] - } - - fn num_variables(&self) -> usize { - self.to_local.len() - } -} - -impl NllLivenessMap { - crate fn compute( - tcx: TyCtxt<'_, '_, 'tcx>, - free_regions: &FxHashSet, - mir: &Mir<'tcx>, - ) -> Self { - let mut to_local = IndexVec::default(); - let facts_enabled = AllFacts::enabled(tcx); - let from_local: IndexVec> = mir.local_decls - .iter_enumerated() - .map(|(local, local_decl)| { - if tcx.all_free_regions_meet(&local_decl.ty, |r| { - free_regions.contains(&r.to_region_vid()) - }) && !facts_enabled { - // If all the regions in the type are free regions - // (or there are no regions), then we don't need - // to track liveness for this variable. - None - } else { - Some(to_local.push(local)) - } - }) - .collect(); - - debug!("{} total variables", mir.local_decls.len()); - debug!("{} variables need liveness", to_local.len()); - debug!("{} regions outlive free regions", free_regions.len()); - - Self { - from_local, - to_local, - } - } - - /// True if there are no local variables that need liveness computation. - crate fn is_empty(&self) -> bool { - self.to_local.is_empty() - } -} - -/// Index given to each local variable for which we need to -/// compute liveness information. For many locals, we are able to -/// skip liveness information: for example, those variables whose -/// types contain no regions. -newtype_index! { - pub struct LiveVar { .. } -} diff --git a/src/librustc_mir/borrow_check/nll/type_check/liveness/local_use_map.rs b/src/librustc_mir/borrow_check/nll/type_check/liveness/local_use_map.rs index 320422c9d3322..9b8940098852c 100644 --- a/src/librustc_mir/borrow_check/nll/type_check/liveness/local_use_map.rs +++ b/src/librustc_mir/borrow_check/nll/type_check/liveness/local_use_map.rs @@ -1,44 +1,40 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::nll::region_infer::values::{PointIndex, RegionValueElements}; -use borrow_check::nll::type_check::liveness::liveness_map::{LiveVar, NllLivenessMap}; +use crate::borrow_check::nll::region_infer::values::{PointIndex, RegionValueElements}; +use crate::util::liveness::{categorize, DefUse}; use rustc::mir::visit::{PlaceContext, Visitor}; use rustc::mir::{Local, Location, Mir}; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc_data_structures::vec_linked_list as vll; -use util::liveness::{categorize, DefUse, LiveVariableMap}; /// A map that cross references each local with the locations where it /// is defined (assigned), used, or dropped. Used during liveness /// computation. -crate struct LocalUseMap<'me> { - liveness_map: &'me NllLivenessMap, - +/// +/// We keep track only of `Local`s we'll do the liveness analysis later, +/// this means that our internal `IndexVec`s will only be sparsely populated. +/// In the time-memory trade-off between keeping compact vectors with new +/// indexes (and needing to continuously map the `Local` index to its compact +/// counterpart) and having `IndexVec`s that we only use a fraction of, time +/// (and code simplicity) was favored. The rationale is that we only keep +/// a small number of `IndexVec`s throughout the entire analysis while, in +/// contrast, we're accessing each `Local` *many* times. +crate struct LocalUseMap { /// Head of a linked list of **definitions** of each variable -- /// definition in this context means assignment, e.g., `x` is /// defined in `x = y` but not `y`; that first def is the head of /// a linked list that lets you enumerate all places the variable /// is assigned. - first_def_at: IndexVec>, + first_def_at: IndexVec>, /// Head of a linked list of **uses** of each variable -- use in /// this context means that the existing value of the variable is /// read or modified. e.g., `y` is used in `x = y` but not `x`. /// Note that `DROP(x)` terminators are excluded from this list. - first_use_at: IndexVec>, + first_use_at: IndexVec>, /// Head of a linked list of **drops** of each variable -- these /// are a special category of uses corresponding to the drop that /// we add for each local variable. - first_drop_at: IndexVec>, + first_drop_at: IndexVec>, appearances: IndexVec, } @@ -60,52 +56,68 @@ impl vll::LinkElem for Appearance { } } -impl LocalUseMap<'me> { +impl LocalUseMap { crate fn build( - liveness_map: &'me NllLivenessMap, + live_locals: &Vec, elements: &RegionValueElements, mir: &Mir<'_>, ) -> Self { - let nones = IndexVec::from_elem_n(None, liveness_map.num_variables()); + let nones = IndexVec::from_elem_n(None, mir.local_decls.len()); let mut local_use_map = LocalUseMap { - liveness_map, first_def_at: nones.clone(), first_use_at: nones.clone(), first_drop_at: nones, appearances: IndexVec::new(), }; + let mut locals_with_use_data: IndexVec = + IndexVec::from_elem_n(false, mir.local_decls.len()); + live_locals + .iter() + .for_each(|&local| locals_with_use_data[local] = true); + LocalUseMapBuild { local_use_map: &mut local_use_map, elements, - }.visit_mir(mir); + locals_with_use_data, + } + .visit_mir(mir); local_use_map } - crate fn defs(&self, local: LiveVar) -> impl Iterator + '_ { + crate fn defs(&self, local: Local) -> impl Iterator + '_ { vll::iter(self.first_def_at[local], &self.appearances) .map(move |aa| self.appearances[aa].point_index) } - crate fn uses(&self, local: LiveVar) -> impl Iterator + '_ { + crate fn uses(&self, local: Local) -> impl Iterator + '_ { vll::iter(self.first_use_at[local], &self.appearances) .map(move |aa| self.appearances[aa].point_index) } - crate fn drops(&self, local: LiveVar) -> impl Iterator + '_ { + crate fn drops(&self, local: Local) -> impl Iterator + '_ { vll::iter(self.first_drop_at[local], &self.appearances) .map(move |aa| self.appearances[aa].point_index) } } -struct LocalUseMapBuild<'me, 'map: 'me> { - local_use_map: &'me mut LocalUseMap<'map>, +struct LocalUseMapBuild<'me> { + local_use_map: &'me mut LocalUseMap, elements: &'me RegionValueElements, + + // Vector used in `visit_local` to signal which `Local`s do we need + // def/use/drop information on, constructed from `live_locals` (that + // contains the variables we'll do the liveness analysis for). + // This vector serves optimization purposes only: we could have + // obtained the same information from `live_locals` but we want to + // avoid repeatedly calling `Vec::contains()` (see `LocalUseMap` for + // the rationale on the time-memory trade-off we're favoring here). + locals_with_use_data: IndexVec, } -impl LocalUseMapBuild<'_, '_> { - fn insert_def(&mut self, local: LiveVar, location: Location) { +impl LocalUseMapBuild<'_> { + fn insert_def(&mut self, local: Local, location: Location) { Self::insert( self.elements, &mut self.local_use_map.first_def_at[local], @@ -114,7 +126,7 @@ impl LocalUseMapBuild<'_, '_> { ); } - fn insert_use(&mut self, local: LiveVar, location: Location) { + fn insert_use(&mut self, local: Local, location: Location) { Self::insert( self.elements, &mut self.local_use_map.first_use_at[local], @@ -123,7 +135,7 @@ impl LocalUseMapBuild<'_, '_> { ); } - fn insert_drop(&mut self, local: LiveVar, location: Location) { + fn insert_drop(&mut self, local: Local, location: Location) { Self::insert( self.elements, &mut self.local_use_map.first_drop_at[local], @@ -147,13 +159,13 @@ impl LocalUseMapBuild<'_, '_> { } } -impl Visitor<'tcx> for LocalUseMapBuild<'_, '_> { +impl Visitor<'tcx> for LocalUseMapBuild<'_> { fn visit_local(&mut self, &local: &Local, context: PlaceContext<'tcx>, location: Location) { - if let Some(local_with_region) = self.local_use_map.liveness_map.from_local(local) { + if self.locals_with_use_data[local] { match categorize(context) { - Some(DefUse::Def) => self.insert_def(local_with_region, location), - Some(DefUse::Use) => self.insert_use(local_with_region, location), - Some(DefUse::Drop) => self.insert_drop(local_with_region, location), + Some(DefUse::Def) => self.insert_def(local, location), + Some(DefUse::Use) => self.insert_use(local, location), + Some(DefUse::Drop) => self.insert_drop(local, location), _ => (), } } diff --git a/src/librustc_mir/borrow_check/nll/type_check/liveness/mod.rs b/src/librustc_mir/borrow_check/nll/type_check/liveness/mod.rs index 9ccdc84db1561..960e75048fa16 100644 --- a/src/librustc_mir/borrow_check/nll/type_check/liveness/mod.rs +++ b/src/librustc_mir/borrow_check/nll/type_check/liveness/mod.rs @@ -1,29 +1,19 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::location::LocationTable; -use borrow_check::nll::region_infer::values::RegionValueElements; -use borrow_check::nll::constraints::ConstraintSet; -use borrow_check::nll::NllLivenessMap; -use borrow_check::nll::universal_regions::UniversalRegions; -use dataflow::move_paths::MoveData; -use dataflow::MaybeInitializedPlaces; -use dataflow::FlowAtLocation; -use rustc::mir::Mir; -use rustc::ty::RegionVid; +use crate::borrow_check::location::LocationTable; +use crate::borrow_check::nll::constraints::ConstraintSet; +use crate::borrow_check::nll::facts::{AllFacts, AllFactsExt}; +use crate::borrow_check::nll::region_infer::values::RegionValueElements; +use crate::borrow_check::nll::universal_regions::UniversalRegions; +use crate::borrow_check::nll::ToRegionVid; +use crate::dataflow::move_paths::MoveData; +use crate::dataflow::FlowAtLocation; +use crate::dataflow::MaybeInitializedPlaces; +use rustc::mir::{Local, Mir}; +use rustc::ty::{RegionVid, TyCtxt}; use rustc_data_structures::fx::FxHashSet; use std::rc::Rc; use super::TypeChecker; -crate mod liveness_map; mod local_use_map; mod trace; @@ -33,30 +23,85 @@ mod trace; /// that indicate which types must be live at which point in the CFG. /// This vector is consumed by `constraint_generation`. /// -/// NB. This computation requires normalization; therefore, it must be +/// N.B., this computation requires normalization; therefore, it must be /// performed before pub(super) fn generate<'gcx, 'tcx>( typeck: &mut TypeChecker<'_, 'gcx, 'tcx>, mir: &Mir<'tcx>, elements: &Rc, - flow_inits: &mut FlowAtLocation>, + flow_inits: &mut FlowAtLocation<'tcx, MaybeInitializedPlaces<'_, 'gcx, 'tcx>>, move_data: &MoveData<'tcx>, location_table: &LocationTable, ) { debug!("liveness::generate"); - let free_regions = { - let borrowck_context = typeck.borrowck_context.as_ref().unwrap(); - regions_that_outlive_free_regions( - typeck.infcx.num_region_vars(), - &borrowck_context.universal_regions, - &borrowck_context.constraints.outlives_constraints, - ) + + let live_locals: Vec = if AllFacts::enabled(typeck.tcx()) { + // If "dump facts from NLL analysis" was requested perform + // the liveness analysis for all `Local`s. This case opens + // the possibility of the variables being analyzed in `trace` + // to be *any* `Local`, not just the "live" ones, so we can't + // make any assumptions past this point as to the characteristics + // of the `live_locals`. + // FIXME: Review "live" terminology past this point, we should + // not be naming the `Local`s as live. + mir.local_decls.indices().collect() + } else { + let free_regions = { + let borrowck_context = typeck.borrowck_context.as_ref().unwrap(); + regions_that_outlive_free_regions( + typeck.infcx.num_region_vars(), + &borrowck_context.universal_regions, + &borrowck_context.constraints.outlives_constraints, + ) + }; + compute_live_locals(typeck.tcx(), &free_regions, mir) }; - let liveness_map = NllLivenessMap::compute(typeck.tcx(), &free_regions, mir); - trace::trace(typeck, mir, elements, flow_inits, move_data, &liveness_map, location_table); + + if !live_locals.is_empty() { + trace::trace( + typeck, + mir, + elements, + flow_inits, + move_data, + live_locals, + location_table, + ); + } +} + +// The purpose of `compute_live_locals` is to define the subset of `Local` +// variables for which we need to do a liveness computation. We only need +// to compute whether a variable `X` is live if that variable contains +// some region `R` in its type where `R` is not known to outlive a free +// region (i.e., where `R` may be valid for just a subset of the fn body). +fn compute_live_locals( + tcx: TyCtxt<'_, '_, 'tcx>, + free_regions: &FxHashSet, + mir: &Mir<'tcx>, +) -> Vec { + let live_locals: Vec = mir + .local_decls + .iter_enumerated() + .filter_map(|(local, local_decl)| { + if tcx.all_free_regions_meet(&local_decl.ty, |r| { + free_regions.contains(&r.to_region_vid()) + }) { + None + } else { + Some(local) + } + }) + .collect(); + + debug!("{} total variables", mir.local_decls.len()); + debug!("{} variables need liveness", live_locals.len()); + debug!("{} regions outlive free regions", free_regions.len()); + + live_locals } -/// Compute all regions that are (currently) known to outlive free +/// Computes all regions that are (currently) known to outlive free /// regions. For these regions, we do not need to compute /// liveness, since the outlives constraints will ensure that they /// are live over the whole fn body anyhow. diff --git a/src/librustc_mir/borrow_check/nll/type_check/liveness/trace.rs b/src/librustc_mir/borrow_check/nll/type_check/liveness/trace.rs index bc4e0ca235139..f0df7070e6b5a 100644 --- a/src/librustc_mir/borrow_check/nll/type_check/liveness/trace.rs +++ b/src/librustc_mir/borrow_check/nll/type_check/liveness/trace.rs @@ -1,22 +1,11 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::location::LocationTable; -use borrow_check::nll::region_infer::values::{self, PointIndex, RegionValueElements}; -use borrow_check::nll::type_check::liveness::liveness_map::{LiveVar, NllLivenessMap}; -use borrow_check::nll::type_check::liveness::local_use_map::LocalUseMap; -use borrow_check::nll::type_check::NormalizeLocation; -use borrow_check::nll::type_check::TypeChecker; -use dataflow::move_paths::indexes::MovePathIndex; -use dataflow::move_paths::MoveData; -use dataflow::{FlowAtLocation, FlowsAtLocation, MaybeInitializedPlaces}; +use crate::borrow_check::location::LocationTable; +use crate::borrow_check::nll::region_infer::values::{self, PointIndex, RegionValueElements}; +use crate::borrow_check::nll::type_check::liveness::local_use_map::LocalUseMap; +use crate::borrow_check::nll::type_check::NormalizeLocation; +use crate::borrow_check::nll::type_check::TypeChecker; +use crate::dataflow::move_paths::indexes::MovePathIndex; +use crate::dataflow::move_paths::MoveData; +use crate::dataflow::{FlowAtLocation, FlowsAtLocation, MaybeInitializedPlaces}; use rustc::infer::canonical::QueryRegionConstraint; use rustc::mir::{BasicBlock, ConstraintCategory, Local, Location, Mir}; use rustc::traits::query::dropck_outlives::DropckOutlivesResult; @@ -26,7 +15,6 @@ use rustc::ty::{Ty, TypeFoldable}; use rustc_data_structures::bit_set::HybridBitSet; use rustc_data_structures::fx::FxHashMap; use std::rc::Rc; -use util::liveness::LiveVariableMap; /// This is the heart of the liveness computation. For each variable X /// that requires a liveness computation, it walks over all the uses @@ -46,18 +34,14 @@ pub(super) fn trace( typeck: &mut TypeChecker<'_, 'gcx, 'tcx>, mir: &Mir<'tcx>, elements: &Rc, - flow_inits: &mut FlowAtLocation>, + flow_inits: &mut FlowAtLocation<'tcx, MaybeInitializedPlaces<'_, 'gcx, 'tcx>>, move_data: &MoveData<'tcx>, - liveness_map: &NllLivenessMap, + live_locals: Vec, location_table: &LocationTable, ) { debug!("trace()"); - if liveness_map.is_empty() { - return; - } - - let local_use_map = &LocalUseMap::build(liveness_map, elements, mir); + let local_use_map = &LocalUseMap::build(&live_locals, elements, mir); let cx = LivenessContext { typeck, @@ -66,12 +50,11 @@ pub(super) fn trace( elements, local_use_map, move_data, - liveness_map, drop_data: FxHashMap::default(), location_table, }; - LivenessResults::new(cx).compute_for_all_locals(); + LivenessResults::new(cx).compute_for_all_locals(live_locals); } /// Contextual state for the type-liveness generator. @@ -99,14 +82,11 @@ where /// Results of dataflow tracking which variables (and paths) have been /// initialized. - flow_inits: &'me mut FlowAtLocation>, + flow_inits: &'me mut FlowAtLocation<'tcx, MaybeInitializedPlaces<'flow, 'gcx, 'tcx>>, /// Index indicating where each variable is assigned, used, or /// dropped. - local_use_map: &'me LocalUseMap<'me>, - - /// Map tracking which variables need liveness computation. - liveness_map: &'me NllLivenessMap, + local_use_map: &'me LocalUseMap, /// Maps between a MIR Location and a LocationIndex location_table: &'me LocationTable, @@ -158,15 +138,12 @@ impl LivenessResults<'me, 'typeck, 'flow, 'gcx, 'tcx> { } } - fn compute_for_all_locals(&mut self) { - for live_local in self.cx.liveness_map.to_local.indices() { - let local = self.cx.liveness_map.from_live_var(live_local); - debug!("local={:?} live_local={:?}", local, live_local); - + fn compute_for_all_locals(&mut self, live_locals: Vec) { + for local in live_locals { self.reset_local_state(); - self.add_defs_for(live_local); - self.compute_use_live_points_for(live_local); - self.compute_drop_live_points_for(live_local); + self.add_defs_for(local); + self.compute_use_live_points_for(local); + self.compute_drop_live_points_for(local); let local_ty = self.cx.mir.local_decls[local].ty; @@ -195,23 +172,23 @@ impl LivenessResults<'me, 'typeck, 'flow, 'gcx, 'tcx> { } /// Adds the definitions of `local` into `self.defs`. - fn add_defs_for(&mut self, live_local: LiveVar) { - for def in self.cx.local_use_map.defs(live_local) { + fn add_defs_for(&mut self, local: Local) { + for def in self.cx.local_use_map.defs(local) { debug!("- defined at {:?}", def); self.defs.insert(def); } } - /// Compute all points where local is "use live" -- meaning its + /// Computes all points where local is "use live" -- meaning its /// current value may be used later (except by a drop). This is - /// done by walking backwards from each use of `live_local` until we + /// done by walking backwards from each use of `local` until we /// find a `def` of local. /// - /// Requires `add_defs_for(live_local)` to have been executed. - fn compute_use_live_points_for(&mut self, live_local: LiveVar) { - debug!("compute_use_live_points_for(live_local={:?})", live_local); + /// Requires `add_defs_for(local)` to have been executed. + fn compute_use_live_points_for(&mut self, local: Local) { + debug!("compute_use_live_points_for(local={:?})", local); - self.stack.extend(self.cx.local_use_map.uses(live_local)); + self.stack.extend(self.cx.local_use_map.uses(local)); while let Some(p) = self.stack.pop() { if self.defs.contains(p) { continue; @@ -225,7 +202,7 @@ impl LivenessResults<'me, 'typeck, 'flow, 'gcx, 'tcx> { } } - /// Compute all points where local is "drop live" -- meaning its + /// Computes all points where local is "drop live" -- meaning its /// current value may be dropped later (but not used). This is /// done by iterating over the drops of `local` where `local` (or /// some subpart of `local`) is initialized. For each such drop, @@ -234,15 +211,14 @@ impl LivenessResults<'me, 'typeck, 'flow, 'gcx, 'tcx> { /// /// Requires `compute_use_live_points_for` and `add_defs_for` to /// have been executed. - fn compute_drop_live_points_for(&mut self, live_local: LiveVar) { - debug!("compute_drop_live_points_for(live_local={:?})", live_local); + fn compute_drop_live_points_for(&mut self, local: Local) { + debug!("compute_drop_live_points_for(local={:?})", local); - let local = self.cx.liveness_map.from_live_var(live_local); let mpi = self.cx.move_data.rev_lookup.find_local(local); debug!("compute_drop_live_points_for: mpi = {:?}", mpi); // Find the drops where `local` is initialized. - for drop_point in self.cx.local_use_map.drops(live_local) { + for drop_point in self.cx.local_use_map.drops(local) { let location = self.cx.elements.to_location(drop_point); debug_assert_eq!(self.cx.mir.terminator_loc(location.block), location,); @@ -417,7 +393,7 @@ impl LivenessResults<'me, 'typeck, 'flow, 'gcx, 'tcx> { } impl LivenessContext<'_, '_, '_, '_, 'tcx> { - /// True if the local variable (or some part of it) is initialized in + /// Returns `true` if the local variable (or some part of it) is initialized in /// the terminator of `block`. We need to check this to determine if a /// DROP of some local variable will have an effect -- note that /// drops, as they may unwind, are always terminators. @@ -439,7 +415,7 @@ impl LivenessContext<'_, '_, '_, '_, 'tcx> { self.flow_inits.has_any_child_of(mpi).is_some() } - /// True if the path `mpi` (or some part of it) is initialized at + /// Returns `true` if the path `mpi` (or some part of it) is initialized at /// the exit of `block`. /// /// **Warning:** Does not account for the result of `Call` @@ -449,7 +425,7 @@ impl LivenessContext<'_, '_, '_, '_, 'tcx> { self.flow_inits.has_any_child_of(mpi).is_some() } - /// Store the result that all regions in `value` are live for the + /// Stores the result that all regions in `value` are live for the /// points `live_at`. fn add_use_live_facts_for( &mut self, diff --git a/src/librustc_mir/borrow_check/nll/type_check/mod.rs b/src/librustc_mir/borrow_check/nll/type_check/mod.rs index 4807abe2bdd19..ec5637d17072d 100644 --- a/src/librustc_mir/borrow_check/nll/type_check/mod.rs +++ b/src/librustc_mir/borrow_check/nll/type_check/mod.rs @@ -1,42 +1,34 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This pass type-checks the MIR to ensure it is not broken. #![allow(unreachable_code)] -use borrow_check::borrow_set::BorrowSet; -use borrow_check::location::LocationTable; -use borrow_check::nll::constraints::{ConstraintSet, OutlivesConstraint}; -use borrow_check::nll::facts::AllFacts; -use borrow_check::nll::region_infer::values::LivenessValues; -use borrow_check::nll::region_infer::values::PlaceholderIndex; -use borrow_check::nll::region_infer::values::PlaceholderIndices; -use borrow_check::nll::region_infer::values::RegionValueElements; -use borrow_check::nll::region_infer::{ClosureRegionRequirementsExt, TypeTest}; -use borrow_check::nll::renumber; -use borrow_check::nll::type_check::free_region_relations::{ +use crate::borrow_check::borrow_set::BorrowSet; +use crate::borrow_check::location::LocationTable; +use crate::borrow_check::nll::constraints::{ConstraintSet, OutlivesConstraint}; +use crate::borrow_check::nll::facts::AllFacts; +use crate::borrow_check::nll::region_infer::values::LivenessValues; +use crate::borrow_check::nll::region_infer::values::PlaceholderIndex; +use crate::borrow_check::nll::region_infer::values::PlaceholderIndices; +use crate::borrow_check::nll::region_infer::values::RegionValueElements; +use crate::borrow_check::nll::region_infer::{ClosureRegionRequirementsExt, TypeTest}; +use crate::borrow_check::nll::renumber; +use crate::borrow_check::nll::type_check::free_region_relations::{ CreateResult, UniversalRegionRelations, }; -use borrow_check::nll::universal_regions::{DefiningTy, UniversalRegions}; -use borrow_check::nll::ToRegionVid; -use dataflow::move_paths::MoveData; -use dataflow::FlowAtLocation; -use dataflow::MaybeInitializedPlaces; +use crate::borrow_check::nll::universal_regions::{DefiningTy, UniversalRegions}; +use crate::borrow_check::nll::ToRegionVid; +use crate::dataflow::move_paths::MoveData; +use crate::dataflow::FlowAtLocation; +use crate::dataflow::MaybeInitializedPlaces; +use crate::transform::{MirPass, MirSource}; use either::Either; use rustc::hir; use rustc::hir::def_id::DefId; use rustc::infer::canonical::QueryRegionConstraint; use rustc::infer::outlives::env::RegionBoundPairs; use rustc::infer::{InferCtxt, InferOk, LateBoundRegionConversionTime, NLLRegionVariableOrigin}; -use rustc::mir::interpret::EvalErrorKind::BoundsCheck; +use rustc::infer::type_variable::TypeVariableOrigin; +use rustc::mir::interpret::{InterpError::BoundsCheck, ConstValue}; use rustc::mir::tcx::PlaceTy; use rustc::mir::visit::{PlaceContext, Visitor, MutatingUseContext, NonMutatingUseContext}; use rustc::mir::*; @@ -45,15 +37,18 @@ use rustc::traits::query::type_op::custom::CustomTypeOp; use rustc::traits::query::{Fallible, NoSolution}; use rustc::traits::{ObligationCause, PredicateObligations}; use rustc::ty::fold::TypeFoldable; -use rustc::ty::subst::{Subst, Substs, UnpackedKind}; -use rustc::ty::{self, RegionVid, ToPolyTraitRef, Ty, TyCtxt, TyKind}; +use rustc::ty::subst::{Subst, SubstsRef, UnpackedKind, UserSubsts}; +use rustc::ty::{ + self, RegionVid, ToPolyTraitRef, Ty, TyCtxt, UserType, + CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations, + UserTypeAnnotationIndex, +}; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc::ty::layout::VariantIdx; use std::rc::Rc; -use std::{fmt, iter}; +use std::{fmt, iter, mem}; use syntax_pos::{Span, DUMMY_SP}; -use transform::{MirPass, MirSource}; macro_rules! span_mirbug { ($context:expr, $elem:expr, $($message:tt)*) => ({ @@ -123,7 +118,7 @@ pub(crate) fn type_check<'gcx, 'tcx>( location_table: &LocationTable, borrow_set: &BorrowSet<'tcx>, all_facts: &mut Option, - flow_inits: &mut FlowAtLocation>, + flow_inits: &mut FlowAtLocation<'tcx, MaybeInitializedPlaces<'_, 'gcx, 'tcx>>, move_data: &MoveData<'tcx>, elements: &Rc, ) -> MirTypeckResults<'tcx> { @@ -131,7 +126,7 @@ pub(crate) fn type_check<'gcx, 'tcx>( let mut constraints = MirTypeckRegionConstraints { placeholder_indices: PlaceholderIndices::default(), placeholder_index_to_region: IndexVec::default(), - liveness_constraints: LivenessValues::new(elements), + liveness_constraints: LivenessValues::new(elements.clone()), outlives_constraints: ConstraintSet::default(), closure_bounds_mapping: Default::default(), type_tests: Vec::default(), @@ -217,7 +212,7 @@ fn type_check_internal<'a, 'gcx, 'tcx, R>( extra(&mut checker) } -fn translate_outlives_facts(cx: &mut BorrowCheckContext) { +fn translate_outlives_facts(cx: &mut BorrowCheckContext<'_, '_>) { if let Some(facts) = cx.all_facts { let location_table = cx.location_table; facts @@ -242,7 +237,7 @@ fn translate_outlives_facts(cx: &mut BorrowCheckContext) { } } -fn mirbug(tcx: TyCtxt, span: Span, msg: &str) { +fn mirbug(tcx: TyCtxt<'_, '_, '_>, span: Span, msg: &str) { // We sometimes see MIR failures (notably predicate failures) due to // the fact that we check rvalue sized predicates here. So use `delay_span_bug` // to avoid reporting bugs in those cases. @@ -260,7 +255,7 @@ enum FieldAccessError { /// is a problem. struct TypeVerifier<'a, 'b: 'a, 'gcx: 'tcx, 'tcx: 'b> { cx: &'a mut TypeChecker<'b, 'gcx, 'tcx>, - mir: &'a Mir<'tcx>, + mir: &'b Mir<'tcx>, last_span: Span, mir_def_id: DefId, errors_reported: bool, @@ -273,7 +268,7 @@ impl<'a, 'b, 'gcx, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'gcx, 'tcx> { } } - fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) { + fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext<'_>, location: Location) { self.sanitize_place(place, location, context); } @@ -282,23 +277,53 @@ impl<'a, 'b, 'gcx, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'gcx, 'tcx> { self.sanitize_constant(constant, location); self.sanitize_type(constant, constant.ty); - if let Some(user_ty) = constant.user_ty { + if let Some(annotation_index) = constant.user_ty { if let Err(terr) = self.cx.relate_type_and_user_type( constant.ty, ty::Variance::Invariant, - &UserTypeProjection { base: user_ty, projs: vec![], }, + &UserTypeProjection { base: annotation_index, projs: vec![], }, location.to_locations(), ConstraintCategory::Boring, ) { + let annotation = &self.cx.user_type_annotations[annotation_index]; span_mirbug!( self, constant, "bad constant user type {:?} vs {:?}: {:?}", - user_ty, + annotation, constant.ty, terr, ); } + } else { + if let ConstValue::Unevaluated(def_id, substs) = constant.literal.val { + if let Err(terr) = self.cx.fully_perform_op( + location.to_locations(), + ConstraintCategory::Boring, + self.cx.param_env.and(type_op::ascribe_user_type::AscribeUserType::new( + constant.ty, def_id, UserSubsts { substs, user_self_ty: None }, + )), + ) { + span_mirbug!( + self, + constant, + "bad constant type {:?} ({:?})", + constant, + terr + ); + } + } + if let ty::FnDef(def_id, substs) = constant.literal.ty.sty { + let tcx = self.tcx(); + + let instantiated_predicates = tcx + .predicates_of(def_id) + .instantiate(tcx, substs); + self.cx.normalize_and_prove_instantiated_predicates( + instantiated_predicates, + location.to_locations(), + ); + } } } @@ -313,8 +338,20 @@ impl<'a, 'b, 'gcx, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'gcx, 'tcx> { self.sanitize_type(local_decl, local_decl.ty); for (user_ty, span) in local_decl.user_ty.projections_and_spans() { + let ty = if !local_decl.is_nonref_binding() { + // If we have a binding of the form `let ref x: T = ..` then remove the outermost + // reference so we can check the type annotation for the remaining type. + if let ty::Ref(_, rty, _) = local_decl.ty.sty { + rty + } else { + bug!("{:?} with ref binding has wrong type {}", local, local_decl.ty); + } + } else { + local_decl.ty + }; + if let Err(terr) = self.cx.relate_type_and_user_type( - local_decl.ty, + ty, ty::Variance::Invariant, user_ty, Locations::All(*span), @@ -346,7 +383,7 @@ impl<'a, 'b, 'gcx, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'gcx, 'tcx> { } impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { - fn new(cx: &'a mut TypeChecker<'b, 'gcx, 'tcx>, mir: &'a Mir<'tcx>) -> Self { + fn new(cx: &'a mut TypeChecker<'b, 'gcx, 'tcx>, mir: &'b Mir<'tcx>) -> Self { TypeVerifier { mir, mir_def_id: cx.mir_def_id, @@ -368,47 +405,25 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { } } - /// Checks that the constant's `ty` field matches up with what - /// would be expected from its literal. + /// Checks that the constant's `ty` field matches up with what would be + /// expected from its literal. Unevaluated constants and well-formed + /// constraints are checked by `visit_constant`. fn sanitize_constant(&mut self, constant: &Constant<'tcx>, location: Location) { debug!( "sanitize_constant(constant={:?}, location={:?})", constant, location ); - // FIXME(#46702) -- We need some way to get the predicates - // associated with the "pre-evaluated" form of the - // constant. For example, consider that the constant - // may have associated constant projections (`>::SOME_CONST`) that impose - // constraints on `'a` and `'b`. These constraints - // would be lost if we just look at the normalized - // value. - if let ty::FnDef(def_id, substs) = constant.literal.ty.sty { - let tcx = self.tcx(); - let type_checker = &mut self.cx; - - // FIXME -- For now, use the substitutions from - // `value.ty` rather than `value.val`. The - // renumberer will rewrite them to independent - // sets of regions; in principle, we ought to - // derive the type of the `value.val` from "first - // principles" and equate with value.ty, but as we - // are transitioning to the miri-based system, we - // don't have a handy function for that, so for - // now we just ignore `value.val` regions. - - let instantiated_predicates = tcx.predicates_of(def_id).instantiate(tcx, substs); - type_checker.normalize_and_prove_instantiated_predicates( - instantiated_predicates, - location.to_locations(), - ); + let literal = constant.literal; + + if let ConstValue::Unevaluated(..) = literal.val { + return; } - debug!("sanitize_constant: expected_ty={:?}", constant.literal.ty); + debug!("sanitize_constant: expected_ty={:?}", literal.ty); if let Err(terr) = self.cx.eq_types( - constant.literal.ty, + literal.ty, constant.ty, location.to_locations(), ConstraintCategory::Boring, @@ -418,7 +433,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { constant, "constant {:?} should have type {:?} but has {:?} ({:?})", constant, - constant.literal.ty, + literal.ty, constant.ty, terr, ); @@ -431,46 +446,53 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { &mut self, place: &Place<'tcx>, location: Location, - context: PlaceContext, + context: PlaceContext<'_>, ) -> PlaceTy<'tcx> { debug!("sanitize_place: {:?}", place); - let place_ty = match *place { - Place::Local(index) => PlaceTy::Ty { - ty: self.mir.local_decls[index].ty, - }, - Place::Promoted(box (_index, sty)) => { - let sty = self.sanitize_type(place, sty); - // FIXME -- promoted MIR return types reference - // various "free regions" (e.g., scopes and things) - // that they ought not to do. We have to figure out - // how best to handle that -- probably we want treat - // promoted MIR much like closures, renumbering all - // their free regions and propagating constraints - // upwards. We have the same acyclic guarantees, so - // that should be possible. But for now, ignore them. - // - // let promoted_mir = &self.mir.promoted[index]; - // promoted_mir.return_ty() - PlaceTy::Ty { ty: sty } - } - Place::Static(box Static { def_id, ty: sty }) => { + let place_ty = match place { + Place::Base(PlaceBase::Local(index)) => + PlaceTy::from_ty(self.mir.local_decls[*index].ty), + Place::Base(PlaceBase::Static(box Static { kind, ty: sty })) => { let sty = self.sanitize_type(place, sty); - let ty = self.tcx().type_of(def_id); - let ty = self.cx.normalize(ty, location); - if let Err(terr) = - self.cx - .eq_types(ty, sty, location.to_locations(), ConstraintCategory::Boring) - { - span_mirbug!( - self, - place, - "bad static type ({:?}: {:?}): {:?}", - ty, - sty, - terr - ); + let check_err = + |verifier: &mut TypeVerifier<'a, 'b, 'gcx, 'tcx>, + place: &Place<'tcx>, + ty, + sty| { + if let Err(terr) = verifier.cx.eq_types( + sty, + ty, + location.to_locations(), + ConstraintCategory::Boring, + ) { + span_mirbug!( + verifier, + place, + "bad promoted type ({:?}: {:?}): {:?}", + ty, + sty, + terr + ); + }; + }; + match kind { + StaticKind::Promoted(promoted) => { + if !self.errors_reported { + let promoted_mir = &self.mir.promoted[*promoted]; + self.sanitize_promoted(promoted_mir, location); + + let promoted_ty = promoted_mir.return_ty(); + check_err(self, place, promoted_ty, sty); + } + } + StaticKind::Static(def_id) => { + let ty = self.tcx().type_of(*def_id); + let ty = self.cx.normalize(ty, location); + + check_err(self, place, ty, sty); + } } - PlaceTy::Ty { ty: sty } + PlaceTy::from_ty(sty) } Place::Projection(ref proj) => { let base_context = if context.is_mutating_use() { @@ -479,12 +501,10 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection) }; let base_ty = self.sanitize_place(&proj.base, location, base_context); - if let PlaceTy::Ty { ty } = base_ty { - if ty.references_error() { + if base_ty.variant_index.is_none() { + if base_ty.ty.references_error() { assert!(self.errors_reported); - return PlaceTy::Ty { - ty: self.tcx().types.err, - }; + return PlaceTy::from_ty(self.tcx().types.err); } } self.sanitize_projection(base_ty, &proj.elem, place, location) @@ -494,16 +514,20 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { let tcx = self.tcx(); let trait_ref = ty::TraitRef { def_id: tcx.lang_items().copy_trait().unwrap(), - substs: tcx.mk_substs_trait(place_ty.to_ty(tcx), &[]), + substs: tcx.mk_substs_trait(place_ty.ty, &[]), }; - // In order to have a Copy operand, the type T of the value must be Copy. Note that we - // prove that T: Copy, rather than using the type_moves_by_default test. This is - // important because type_moves_by_default ignores the resulting region obligations and - // assumes they pass. This can result in bounds from Copy impls being unsoundly ignored - // (e.g., #29149). Note that we decide to use Copy before knowing whether the bounds - // fully apply: in effect, the rule is that if a value of some type could implement - // Copy, then it must. + // In order to have a Copy operand, the type T of the + // value must be Copy. Note that we prove that T: Copy, + // rather than using the `is_copy_modulo_regions` + // test. This is important because + // `is_copy_modulo_regions` ignores the resulting region + // obligations and assumes they pass. This can result in + // bounds from Copy impls being unsoundly ignored (e.g., + // #29149). Note that we decide to use Copy before knowing + // whether the bounds fully apply: in effect, the rule is + // that if a value of some type could implement Copy, then + // it must. self.cx.prove_trait_ref( trait_ref, location.to_locations(), @@ -513,6 +537,72 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { place_ty } + fn sanitize_promoted(&mut self, promoted_mir: &'b Mir<'tcx>, location: Location) { + // Determine the constraints from the promoted MIR by running the type + // checker on the promoted MIR, then transfer the constraints back to + // the main MIR, changing the locations to the provided location. + + let parent_mir = mem::replace(&mut self.mir, promoted_mir); + + let all_facts = &mut None; + let mut constraints = Default::default(); + let mut closure_bounds = Default::default(); + if let Some(ref mut bcx) = self.cx.borrowck_context { + // Don't try to add borrow_region facts for the promoted MIR + mem::swap(bcx.all_facts, all_facts); + + // Use a new sets of constraints and closure bounds so that we can + // modify their locations. + mem::swap(&mut bcx.constraints.outlives_constraints, &mut constraints); + mem::swap(&mut bcx.constraints.closure_bounds_mapping, &mut closure_bounds); + }; + + self.visit_mir(promoted_mir); + + if !self.errors_reported { + // if verifier failed, don't do further checks to avoid ICEs + self.cx.typeck_mir(promoted_mir); + } + + self.mir = parent_mir; + // Merge the outlives constraints back in, at the given location. + if let Some(ref mut base_bcx) = self.cx.borrowck_context { + mem::swap(base_bcx.all_facts, all_facts); + mem::swap(&mut base_bcx.constraints.outlives_constraints, &mut constraints); + mem::swap(&mut base_bcx.constraints.closure_bounds_mapping, &mut closure_bounds); + + let locations = location.to_locations(); + for constraint in constraints.iter() { + let mut constraint = *constraint; + constraint.locations = locations; + if let ConstraintCategory::Return + | ConstraintCategory::UseAsConst + | ConstraintCategory::UseAsStatic = constraint.category + { + // "Returning" from a promoted is an assigment to a + // temporary from the user's point of view. + constraint.category = ConstraintCategory::Boring; + } + base_bcx.constraints.outlives_constraints.push(constraint) + } + + if !closure_bounds.is_empty() { + let combined_bounds_mapping = closure_bounds + .into_iter() + .flat_map(|(_, value)| value) + .collect(); + let existing = base_bcx + .constraints + .closure_bounds_mapping + .insert(location, combined_bounds_mapping); + assert!( + existing.is_none(), + "Multiple promoteds/closures at the same location." + ); + } + } + } + fn sanitize_projection( &mut self, base: PlaceTy<'tcx>, @@ -522,40 +612,40 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { ) -> PlaceTy<'tcx> { debug!("sanitize_projection: {:?} {:?} {:?}", base, pi, place); let tcx = self.tcx(); - let base_ty = base.to_ty(tcx); + let base_ty = base.ty; match *pi { ProjectionElem::Deref => { let deref_ty = base_ty.builtin_deref(true); - PlaceTy::Ty { - ty: deref_ty.map(|t| t.ty).unwrap_or_else(|| { + PlaceTy::from_ty( + deref_ty.map(|t| t.ty).unwrap_or_else(|| { span_mirbug_and_err!(self, place, "deref of non-pointer {:?}", base_ty) - }), - } + }) + ) } ProjectionElem::Index(i) => { - let index_ty = Place::Local(i).ty(self.mir, tcx).to_ty(tcx); + let index_ty = Place::Base(PlaceBase::Local(i)).ty(self.mir, tcx).ty; if index_ty != tcx.types.usize { - PlaceTy::Ty { - ty: span_mirbug_and_err!(self, i, "index by non-usize {:?}", i), - } + PlaceTy::from_ty( + span_mirbug_and_err!(self, i, "index by non-usize {:?}", i), + ) } else { - PlaceTy::Ty { - ty: base_ty.builtin_index().unwrap_or_else(|| { + PlaceTy::from_ty( + base_ty.builtin_index().unwrap_or_else(|| { span_mirbug_and_err!(self, place, "index of non-array {:?}", base_ty) }), - } + ) } } ProjectionElem::ConstantIndex { .. } => { // consider verifying in-bounds - PlaceTy::Ty { - ty: base_ty.builtin_index().unwrap_or_else(|| { + PlaceTy::from_ty( + base_ty.builtin_index().unwrap_or_else(|| { span_mirbug_and_err!(self, place, "index of non-array {:?}", base_ty) }), - } + ) } - ProjectionElem::Subslice { from, to } => PlaceTy::Ty { - ty: match base_ty.sty { + ProjectionElem::Subslice { from, to } => PlaceTy::from_ty( + match base_ty.sty { ty::Array(inner, size) => { let size = size.unwrap_usize(tcx); let min_size = (from as u64) + (to as u64); @@ -573,35 +663,39 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { ty::Slice(..) => base_ty, _ => span_mirbug_and_err!(self, place, "slice of non-array {:?}", base_ty), }, - }, - ProjectionElem::Downcast(adt_def1, index) => match base_ty.sty { - ty::Adt(adt_def, substs) if adt_def.is_enum() && adt_def == adt_def1 => { + ), + ProjectionElem::Downcast(maybe_name, index) => match base_ty.sty { + ty::Adt(adt_def, _substs) if adt_def.is_enum() => { if index.as_usize() >= adt_def.variants.len() { - PlaceTy::Ty { - ty: span_mirbug_and_err!( + PlaceTy::from_ty( + span_mirbug_and_err!( self, place, "cast to variant #{:?} but enum only has {:?}", index, adt_def.variants.len() ), - } + ) } else { - PlaceTy::Downcast { - adt_def, - substs, - variant_index: index, + PlaceTy { + ty: base_ty, + variant_index: Some(index), } } } - _ => PlaceTy::Ty { - ty: span_mirbug_and_err!( - self, - place, - "can't downcast {:?} as {:?}", - base_ty, - adt_def1 - ), + _ => { + let ty = if let Some(name) = maybe_name { + span_mirbug_and_err!( + self, + place, + "can't downcast {:?} as {:?}", + base_ty, + name + ) + } else { + span_mirbug_and_err!(self, place, "can't downcast {:?}", base_ty) + }; + PlaceTy::from_ty(ty) }, }, ProjectionElem::Field(field, fty) => { @@ -630,7 +724,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { field_count ), } - PlaceTy::Ty { ty: fty } + PlaceTy::from_ty(fty) } } } @@ -650,12 +744,13 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> { let tcx = self.tcx(); let (variant, substs) = match base_ty { - PlaceTy::Downcast { - adt_def, - substs, - variant_index, - } => (&adt_def.variants[variant_index], substs), - PlaceTy::Ty { ty } => match ty.sty { + PlaceTy { ty, variant_index: Some(variant_index) } => { + match ty.sty { + ty::Adt(adt_def, substs) => (&adt_def.variants[variant_index], substs), + _ => bug!("can't have downcast of non-adt type"), + } + } + PlaceTy { ty, variant_index: None } => match ty.sty { ty::Adt(adt_def, substs) if !adt_def.is_enum() => (&adt_def.variants[VariantIdx::new(0)], substs), ty::Closure(def_id, substs) => { @@ -718,7 +813,9 @@ struct TypeChecker<'a, 'gcx: 'tcx, 'tcx: 'a> { infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, param_env: ty::ParamEnv<'gcx>, last_span: Span, - mir: &'a Mir<'tcx>, + /// User type annotations are shared between the main MIR and the MIR of + /// all of the promoted items. + user_type_annotations: &'a CanonicalUserTypeAnnotations<'tcx>, mir_def_id: DefId, region_bound_pairs: &'a RegionBoundPairs<'tcx>, implicit_region_bound: Option>, @@ -819,7 +916,7 @@ pub enum Locations { /// older NLL analysis, we required this only at the entry point /// to the function. By the nature of the constraints, this wound /// up propagating to all points reachable from start (because - /// `'1` -- as a universal region -- is live everywhere). In the + /// `'1` -- as a universal region -- is live everywhere). In the /// newer analysis, though, this doesn't work: `_0` is considered /// dead at the start (it has no usable value) and hence this type /// equality is basically a no-op. Then, later on, when we do `_0 @@ -870,17 +967,79 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { borrowck_context: Option<&'a mut BorrowCheckContext<'a, 'tcx>>, universal_region_relations: Option<&'a UniversalRegionRelations<'tcx>>, ) -> Self { - TypeChecker { + let mut checker = Self { infcx, last_span: DUMMY_SP, - mir, mir_def_id, + user_type_annotations: &mir.user_type_annotations, param_env, region_bound_pairs, implicit_region_bound, borrowck_context, reported_errors: Default::default(), universal_region_relations, + }; + checker.check_user_type_annotations(); + checker + } + + /// Equate the inferred type and the annotated type for user type annotations + fn check_user_type_annotations(&mut self) { + debug!( + "check_user_type_annotations: user_type_annotations={:?}", + self.user_type_annotations + ); + for user_annotation in self.user_type_annotations { + let CanonicalUserTypeAnnotation { span, ref user_ty, inferred_ty } = *user_annotation; + let (annotation, _) = self.infcx.instantiate_canonical_with_fresh_inference_vars( + span, user_ty + ); + match annotation { + UserType::Ty(mut ty) => { + ty = self.normalize(ty, Locations::All(span)); + + if let Err(terr) = self.eq_types( + ty, + inferred_ty, + Locations::All(span), + ConstraintCategory::BoringNoLocation, + ) { + span_mirbug!( + self, + user_annotation, + "bad user type ({:?} = {:?}): {:?}", + ty, + inferred_ty, + terr + ); + } + + self.prove_predicate( + ty::Predicate::WellFormed(inferred_ty), + Locations::All(span), + ConstraintCategory::TypeAnnotation, + ); + }, + UserType::TypeOf(def_id, user_substs) => { + if let Err(terr) = self.fully_perform_op( + Locations::All(span), + ConstraintCategory::BoringNoLocation, + self.param_env.and(type_op::ascribe_user_type::AscribeUserType::new( + inferred_ty, def_id, user_substs, + )), + ) { + span_mirbug!( + self, + user_annotation, + "bad user type AscribeUserType({:?}, {:?} {:?}): {:?}", + inferred_ty, + def_id, + user_substs, + terr + ); + } + }, + } } } @@ -977,7 +1136,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { category: ConstraintCategory, ) -> Fallible<()> { if let Err(terr) = self.sub_types(sub, sup, locations, category) { - if let TyKind::Opaque(..) = sup.sty { + if let ty::Opaque(..) = sup.sty { // When you have `let x: impl Foo = ...` in a closure, // the resulting inferend values are stored with the // def-id of the base function. @@ -1004,7 +1163,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { &mut self, a: Ty<'tcx>, v: ty::Variance, - user_ty: &UserTypeProjection<'tcx>, + user_ty: &UserTypeProjection, locations: Locations, category: ConstraintCategory, ) -> Fallible<()> { @@ -1013,68 +1172,23 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { a, v, user_ty, locations, ); - match user_ty.base { - UserTypeAnnotation::Ty(canonical_ty) => { - let (ty, _) = self.infcx - .instantiate_canonical_with_fresh_inference_vars(DUMMY_SP, &canonical_ty); + let annotated_type = self.user_type_annotations[user_ty.base].inferred_ty; + let mut curr_projected_ty = PlaceTy::from_ty(annotated_type); - // The `TypeRelating` code assumes that "unresolved inference - // variables" appear in the "a" side, so flip `Contravariant` - // ambient variance to get the right relationship. - let v1 = ty::Contravariant.xform(v); + let tcx = self.infcx.tcx; - let tcx = self.infcx.tcx; - let ty = self.normalize(ty, locations); - - // We need to follow any provided projetions into the type. - // - // if we hit a ty var as we descend, then just skip the - // attempt to relate the mir local with any type. - #[derive(Debug)] struct HitTyVar; - let mut curr_projected_ty: Result; - - curr_projected_ty = Ok(PlaceTy::from_ty(ty)); - for proj in &user_ty.projs { - let projected_ty = if let Ok(projected_ty) = curr_projected_ty { - projected_ty - } else { - break; - }; - curr_projected_ty = projected_ty.projection_ty_core( - tcx, proj, |this, field, &()| { - if this.to_ty(tcx).is_ty_var() { - Err(HitTyVar) - } else { - let ty = this.field_ty(tcx, field); - Ok(self.normalize(ty, locations)) - } - }); - } - debug!("user_ty base: {:?} freshened: {:?} projs: {:?} yields: {:?}", - user_ty.base, ty, user_ty.projs, curr_projected_ty); - - if let Ok(projected_ty) = curr_projected_ty { - let ty = projected_ty.to_ty(tcx); - self.relate_types(ty, v1, a, locations, category)?; - } - } - UserTypeAnnotation::TypeOf(def_id, canonical_substs) => { - let ( - user_substs, - _, - ) = self.infcx - .instantiate_canonical_with_fresh_inference_vars(DUMMY_SP, &canonical_substs); - - let projs = self.infcx.tcx.intern_projs(&user_ty.projs); - self.fully_perform_op( - locations, - category, - self.param_env.and(type_op::ascribe_user_type::AscribeUserType::new( - a, v, def_id, user_substs, projs, - )), - )?; - } + for proj in &user_ty.projs { + let projected_ty = curr_projected_ty.projection_ty_core(tcx, proj, |this, field, &()| { + let ty = this.field_ty(tcx, field); + self.normalize(ty, locations) + }); + curr_projected_ty = projected_ty; } + debug!("user_ty base: {:?} freshened: {:?} projs: {:?} yields: {:?}", + user_ty.base, annotated_type, user_ty.projs, curr_projected_ty); + + let ty = curr_projected_ty.ty; + self.relate_types(a, v, ty, locations, category)?; Ok(()) } @@ -1197,7 +1311,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { // of lowering. Assignments to other sorts of places *are* interesting // though. let category = match *place { - Place::Local(RETURN_PLACE) => if let Some(BorrowCheckContext { + Place::Base(PlaceBase::Local(RETURN_PLACE)) => if let Some(BorrowCheckContext { universal_regions: UniversalRegions { defining_ty: DefiningTy::Const(def_id, _), @@ -1214,13 +1328,14 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { } else { ConstraintCategory::Return }, - Place::Local(l) if !mir.local_decls[l].is_user_variable.is_some() => { + Place::Base(PlaceBase::Local(l)) + if !mir.local_decls[l].is_user_variable.is_some() => { ConstraintCategory::Boring } _ => ConstraintCategory::Assignment, }; - let place_ty = place.ty(mir, tcx).to_ty(tcx); + let place_ty = place.ty(mir, tcx).ty; let rv_ty = rv.ty(mir, tcx); if let Err(terr) = self.sub_types_or_anon(rv_ty, place_ty, location.to_locations(), category) @@ -1235,19 +1350,20 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { ); } - if let Some(user_ty) = self.rvalue_user_ty(rv) { + if let Some(annotation_index) = self.rvalue_user_ty(rv) { if let Err(terr) = self.relate_type_and_user_type( rv_ty, ty::Variance::Invariant, - &UserTypeProjection { base: user_ty, projs: vec![], }, + &UserTypeProjection { base: annotation_index, projs: vec![], }, location.to_locations(), ConstraintCategory::Boring, ) { + let annotation = &self.user_type_annotations[annotation_index]; span_mirbug!( self, stmt, "bad user type on rvalue ({:?} = {:?}): {:?}", - user_ty, + annotation, rv_ty, terr ); @@ -1271,9 +1387,9 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { ref place, variant_index, } => { - let place_type = place.ty(mir, tcx).to_ty(tcx); + let place_type = place.ty(mir, tcx).ty; let adt = match place_type.sty { - TyKind::Adt(adt, _) if adt.is_enum() => adt, + ty::Adt(adt, _) if adt.is_enum() => adt, _ => { span_bug!( stmt.source_info.span, @@ -1292,21 +1408,23 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { ); }; } - StatementKind::AscribeUserType(ref place, variance, box ref c_ty) => { - let place_ty = place.ty(mir, tcx).to_ty(tcx); + StatementKind::AscribeUserType(ref place, variance, box ref projection) => { + let place_ty = place.ty(mir, tcx).ty; if let Err(terr) = self.relate_type_and_user_type( place_ty, variance, - c_ty, + projection, Locations::All(stmt.source_info.span), ConstraintCategory::TypeAnnotation, ) { + let annotation = &self.user_type_annotations[projection.base]; span_mirbug!( self, stmt, - "bad type assert ({:?} <: {:?}): {:?}", + "bad type assert ({:?} <: {:?} with projections {:?}): {:?}", place_ty, - c_ty, + annotation, + projection.projs, terr ); } @@ -1316,7 +1434,6 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { | StatementKind::StorageDead(..) | StatementKind::InlineAsm { .. } | StatementKind::Retag { .. } - | StatementKind::EscapeToRaw { .. } | StatementKind::Nop => {} } } @@ -1348,7 +1465,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { target: _, unwind: _, } => { - let place_ty = location.ty(mir, tcx).to_ty(tcx); + let place_ty = location.ty(mir, tcx).ty; let rv_ty = value.ty(mir, tcx); let locations = term_location.to_locations(); @@ -1416,7 +1533,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { self.check_call_dest(mir, term, &sig, destination, term_location); self.prove_predicates( - sig.inputs().iter().map(|ty| ty::Predicate::WellFormed(ty)), + sig.inputs_and_output.iter().map(|ty| ty::Predicate::WellFormed(ty)), term_location.to_locations(), ConstraintCategory::Boring, ); @@ -1496,9 +1613,9 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { let tcx = self.tcx(); match *destination { Some((ref dest, _target_block)) => { - let dest_ty = dest.ty(mir, tcx).to_ty(tcx); + let dest_ty = dest.ty(mir, tcx).ty; let category = match *dest { - Place::Local(RETURN_PLACE) => { + Place::Base(PlaceBase::Local(RETURN_PLACE)) => { if let Some(BorrowCheckContext { universal_regions: UniversalRegions { @@ -1517,7 +1634,8 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { ConstraintCategory::Return } } - Place::Local(l) if !mir.local_decls[l].is_user_variable.is_some() => { + Place::Base(PlaceBase::Local(l)) + if !mir.local_decls[l].is_user_variable.is_some() => { ConstraintCategory::Boring } _ => ConstraintCategory::Assignment, @@ -1546,8 +1664,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { } } None => { - // FIXME(canndrew): This is_never should probably be an is_uninhabited - if !sig.output().is_never() { + if !sig.output().conservative_is_privately_uninhabited(self.tcx()) { span_mirbug!(self, term, "call to converging function {:?} w/o dest", sig); } } @@ -1564,10 +1681,17 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { from_hir_call: bool, ) { debug!("check_call_inputs({:?}, {:?})", sig, args); - if args.len() < sig.inputs().len() || (args.len() > sig.inputs().len() && !sig.variadic) { + // Do not count the `VaList` argument as a "true" argument to + // a C-variadic function. + let inputs = if sig.c_variadic { + &sig.inputs()[..sig.inputs().len() - 1] + } else { + &sig.inputs()[..] + }; + if args.len() < inputs.len() || (args.len() > inputs.len() && !sig.c_variadic) { span_mirbug!(self, term, "call to {:?} with wrong # of args", sig); } - for (n, (fn_arg, op_arg)) in sig.inputs().iter().zip(args).enumerate() { + for (n, (fn_arg, op_arg)) in inputs.iter().zip(args).enumerate() { let op_arg_ty = op_arg.ty(mir, self.tcx()); let category = if from_hir_call { ConstraintCategory::CallArgument @@ -1877,14 +2001,14 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { } } - CastKind::ClosureFnPointer => { + CastKind::ClosureFnPointer(unsafety) => { let sig = match op.ty(mir, tcx).sty { ty::Closure(def_id, substs) => { substs.closure_sig_ty(def_id, tcx).fn_sig(tcx) } _ => bug!(), }; - let ty_fn_ptr_from = tcx.coerce_closure_fn_ty(sig); + let ty_fn_ptr_from = tcx.coerce_closure_fn_ty(sig, *unsafety); if let Err(terr) = self.eq_types( ty_fn_ptr_from, @@ -1946,15 +2070,163 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { ); } - CastKind::Misc => {} + CastKind::MutToConstPointer => { + let ty_from = match op.ty(mir, tcx).sty { + ty::RawPtr(ty::TypeAndMut { + ty: ty_from, + mutbl: hir::MutMutable, + }) => ty_from, + _ => { + span_mirbug!( + self, + rvalue, + "unexpected base type for cast {:?}", + ty, + ); + return; + } + }; + let ty_to = match ty.sty { + ty::RawPtr(ty::TypeAndMut { + ty: ty_to, + mutbl: hir::MutImmutable, + }) => ty_to, + _ => { + span_mirbug!( + self, + rvalue, + "unexpected target type for cast {:?}", + ty, + ); + return; + } + }; + if let Err(terr) = self.sub_types( + ty_from, + ty_to, + location.to_locations(), + ConstraintCategory::Cast, + ) { + span_mirbug!( + self, + rvalue, + "relating {:?} with {:?} yields {:?}", + ty_from, + ty_to, + terr + ) + } + } + + CastKind::Misc => { + if let ty::Ref(_, mut ty_from, _) = op.ty(mir, tcx).sty { + let (mut ty_to, mutability) = if let ty::RawPtr(ty::TypeAndMut { + ty: ty_to, + mutbl, + }) = ty.sty { + (ty_to, mutbl) + } else { + span_mirbug!( + self, + rvalue, + "invalid cast types {:?} -> {:?}", + op.ty(mir, tcx), + ty, + ); + return; + }; + + // Handle the direct cast from `&[T; N]` to `*const T` by unwrapping + // any array we find. + while let ty::Array(ty_elem_from, _) = ty_from.sty { + ty_from = ty_elem_from; + if let ty::Array(ty_elem_to, _) = ty_to.sty { + ty_to = ty_elem_to; + } else { + break; + } + } + + if let hir::MutMutable = mutability { + if let Err(terr) = self.eq_types( + ty_from, + ty_to, + location.to_locations(), + ConstraintCategory::Cast, + ) { + span_mirbug!( + self, + rvalue, + "equating {:?} with {:?} yields {:?}", + ty_from, + ty_to, + terr + ) + } + } else { + if let Err(terr) = self.sub_types( + ty_from, + ty_to, + location.to_locations(), + ConstraintCategory::Cast, + ) { + span_mirbug!( + self, + rvalue, + "relating {:?} with {:?} yields {:?}", + ty_from, + ty_to, + terr + ) + } + } + } + } } } Rvalue::Ref(region, _borrow_kind, borrowed_place) => { - self.add_reborrow_constraint(location, region, borrowed_place); + self.add_reborrow_constraint(mir, location, region, borrowed_place); + } + + Rvalue::BinaryOp(BinOp::Eq, left, right) + | Rvalue::BinaryOp(BinOp::Ne, left, right) + | Rvalue::BinaryOp(BinOp::Lt, left, right) + | Rvalue::BinaryOp(BinOp::Le, left, right) + | Rvalue::BinaryOp(BinOp::Gt, left, right) + | Rvalue::BinaryOp(BinOp::Ge, left, right) => { + let ty_left = left.ty(mir, tcx); + if let ty::RawPtr(_) | ty::FnPtr(_) = ty_left.sty { + let ty_right = right.ty(mir, tcx); + let common_ty = self.infcx.next_ty_var( + TypeVariableOrigin::MiscVariable(mir.source_info(location).span), + ); + self.sub_types( + common_ty, + ty_left, + location.to_locations(), + ConstraintCategory::Boring + ).unwrap_or_else(|err| { + bug!("Could not equate type variable with {:?}: {:?}", ty_left, err) + }); + if let Err(terr) = self.sub_types( + common_ty, + ty_right, + location.to_locations(), + ConstraintCategory::Boring + ) { + span_mirbug!( + self, + rvalue, + "unexpected comparison types {:?} and {:?} yields {:?}", + ty_left, + ty_right, + terr + ) + } + } } - // FIXME: These other cases have to be implemented in future PRs Rvalue::Use(..) | Rvalue::Len(..) | Rvalue::BinaryOp(..) @@ -1967,7 +2239,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { /// If this rvalue supports a user-given type annotation, then /// extract and return it. This represents the final type of the /// rvalue and will be unified with the inferred type. - fn rvalue_user_ty(&self, rvalue: &Rvalue<'tcx>) -> Option> { + fn rvalue_user_ty(&self, rvalue: &Rvalue<'tcx>) -> Option { match rvalue { Rvalue::Use(_) | Rvalue::Repeat(..) @@ -2041,7 +2313,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { } } - /// Add the constraints that arise from a borrow expression `&'a P` at the location `L`. + /// Adds the constraints that arise from a borrow expression `&'a P` at the location `L`. /// /// # Parameters /// @@ -2050,6 +2322,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { /// - `borrowed_place`: the place `P` being borrowed fn add_reborrow_constraint( &mut self, + mir: &Mir<'tcx>, location: Location, borrow_region: ty::Region<'tcx>, borrowed_place: &Place<'tcx>, @@ -2099,7 +2372,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { match *elem { ProjectionElem::Deref => { let tcx = self.infcx.tcx; - let base_ty = base.ty(self.mir, tcx).to_ty(tcx); + let base_ty = base.ty(mir, tcx).ty; debug!("add_reborrow_constraint - base_ty = {:?}", base_ty); match base_ty.sty { @@ -2223,12 +2496,12 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { &mut self, tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId, - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, location: Location, ) -> ty::InstantiatedPredicates<'tcx> { if let Some(closure_region_requirements) = tcx.mir_borrowck(def_id).closure_requirements { let closure_constraints = - closure_region_requirements.apply_requirements(tcx, location, def_id, substs); + closure_region_requirements.apply_requirements(tcx, def_id, substs); if let Some(ref mut borrowck_context) = self.borrowck_context { let bounds_mapping = closure_constraints @@ -2255,7 +2528,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { ), )) } - UnpackedKind::Type(_) => None, + UnpackedKind::Type(_) | UnpackedKind::Const(_) => None, } }) .collect(); @@ -2389,8 +2662,13 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> { pub struct TypeckMir; impl MirPass for TypeckMir { - fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, src: MirSource, mir: &mut Mir<'tcx>) { - let def_id = src.def_id; + fn run_pass<'a, 'tcx>( + &self, + tcx: TyCtxt<'a, 'tcx, 'tcx>, + src: MirSource<'tcx>, + mir: &mut Mir<'tcx>, + ) { + let def_id = src.def_id(); debug!("run_pass: {:?}", def_id); // When NLL is enabled, the borrow checker runs the typeck @@ -2405,8 +2683,8 @@ impl MirPass for TypeckMir { return; } - if tcx.is_struct_constructor(def_id) { - // We just assume that the automatically generated struct constructors are + if tcx.is_constructor(def_id) { + // We just assume that the automatically generated struct/variant constructors are // correct. See the comment in the `mir_borrowck` implementation for an // explanation why we need this. return; diff --git a/src/librustc_mir/borrow_check/nll/type_check/relate_tys.rs b/src/librustc_mir/borrow_check/nll/type_check/relate_tys.rs index 225e2841fb0ac..28835b959d76f 100644 --- a/src/librustc_mir/borrow_check/nll/type_check/relate_tys.rs +++ b/src/librustc_mir/borrow_check/nll/type_check/relate_tys.rs @@ -1,15 +1,5 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::nll::constraints::OutlivesConstraint; -use borrow_check::nll::type_check::{BorrowCheckContext, Locations}; +use crate::borrow_check::nll::constraints::OutlivesConstraint; +use crate::borrow_check::nll::type_check::{BorrowCheckContext, Locations}; use rustc::infer::nll_relate::{TypeRelating, TypeRelatingDelegate, NormalizationStrategy}; use rustc::infer::{InferCtxt, NLLRegionVariableOrigin}; use rustc::mir::ConstraintCategory; @@ -24,7 +14,7 @@ use rustc::ty::{self, Ty}; /// - "Invariant" `a == b` /// - "Contravariant" `a :> b` /// -/// NB. The type `a` is permitted to have unresolved inference +/// N.B., the type `a` is permitted to have unresolved inference /// variables, but not the type `b`. pub(super) fn relate_types<'tcx>( infcx: &InferCtxt<'_, '_, 'tcx>, diff --git a/src/librustc_mir/borrow_check/nll/universal_regions.rs b/src/librustc_mir/borrow_check/nll/universal_regions.rs index 97fdb80cc78a5..ae8dfa8144fd9 100644 --- a/src/librustc_mir/borrow_check/nll/universal_regions.rs +++ b/src/librustc_mir/borrow_check/nll/universal_regions.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Code to extract the universally quantified regions declared on a //! function and the relationships between them. For example: //! @@ -27,13 +17,12 @@ use rustc::hir::def_id::DefId; use rustc::hir::{self, BodyOwnerKind, HirId}; use rustc::infer::{InferCtxt, NLLRegionVariableOrigin}; use rustc::ty::fold::TypeFoldable; -use rustc::ty::subst::Substs; +use rustc::ty::subst::{InternalSubsts, SubstsRef}; use rustc::ty::{self, ClosureSubsts, GeneratorSubsts, RegionVid, Ty, TyCtxt}; use rustc::util::nodemap::FxHashMap; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc_errors::DiagnosticBuilder; use std::iter; -use syntax::ast; use super::ToRegionVid; @@ -45,15 +34,15 @@ pub struct UniversalRegions<'tcx> { pub fr_static: RegionVid, /// A special region vid created to represent the current MIR fn - /// body. It will outlive the entire CFG but it will not outlive + /// body. It will outlive the entire CFG but it will not outlive /// any other universal regions. pub fr_fn_body: RegionVid, /// We create region variables such that they are ordered by their /// `RegionClassification`. The first block are globals, then - /// externals, then locals. So things from: - /// - `FIRST_GLOBAL_INDEX..first_extern_index` are global; - /// - `first_extern_index..first_local_index` are external; and + /// externals, then locals. So, things from: + /// - `FIRST_GLOBAL_INDEX..first_extern_index` are global, + /// - `first_extern_index..first_local_index` are external, /// - `first_local_index..num_universals` are local. first_extern_index: usize, @@ -64,21 +53,21 @@ pub struct UniversalRegions<'tcx> { num_universals: usize, /// The "defining" type for this function, with all universal - /// regions instantiated. For a closure or generator, this is the + /// regions instantiated. For a closure or generator, this is the /// closure type, but for a top-level function it's the `FnDef`. pub defining_ty: DefiningTy<'tcx>, /// The return type of this function, with all regions replaced by /// their universal `RegionVid` equivalents. /// - /// NB. Associated types in this type have not been normalized, + /// N.B., associated types in this type have not been normalized, /// as the name suggests. =) pub unnormalized_output_ty: Ty<'tcx>, /// The fully liberated input types of this function, with all /// regions replaced by their universal `RegionVid` equivalents. /// - /// NB. Associated types in these types have not been normalized, + /// N.B., associated types in these types have not been normalized, /// as the name suggests. =) pub unnormalized_input_tys: &'tcx [Ty<'tcx>], @@ -102,14 +91,14 @@ pub enum DefiningTy<'tcx> { /// `ClosureSubsts::generator_return_ty`. Generator(DefId, ty::GeneratorSubsts<'tcx>, hir::GeneratorMovability), - /// The MIR is a fn item with the given def-id and substs. The signature + /// The MIR is a fn item with the given `DefId` and substs. The signature /// of the function can be bound then with the `fn_sig` query. - FnDef(DefId, &'tcx Substs<'tcx>), + FnDef(DefId, SubstsRef<'tcx>), /// The MIR represents some form of constant. The signature then /// is that it has no inputs and a single return value, which is /// the value of the constant. - Const(DefId, &'tcx Substs<'tcx>), + Const(DefId, SubstsRef<'tcx>), } impl<'tcx> DefiningTy<'tcx> { @@ -148,7 +137,7 @@ struct UniversalRegionIndices<'tcx> { /// used because trait matching and type-checking will feed us /// region constraints that reference those regions and we need to /// be able to map them our internal `RegionVid`. This is - /// basically equivalent to a `Substs`, except that it also + /// basically equivalent to a `InternalSubsts`, except that it also /// contains an entry for `ReStatic` -- it might be nice to just /// use a substs, and then handle `ReStatic` another way. indices: FxHashMap, RegionVid>, @@ -184,13 +173,13 @@ pub enum RegionClassification { /// A **local** lifetime is one about which we know the full set /// of relevant constraints (that is, relationships to other named - /// regions). For a closure, this includes any region bound in - /// the closure's signature. For a fn item, this includes all + /// regions). For a closure, this includes any region bound in + /// the closure's signature. For a fn item, this includes all /// regions other than global ones. /// /// Continuing with the example from `External`, if we were /// analyzing the closure, then `'x` would be local (and `'a` and - /// `'b` are external). If we are analyzing the function item + /// `'b` are external). If we are analyzing the function item /// `foo`, then `'a` and `'b` are local (and `'x` is not in /// scope). Local, @@ -210,12 +199,10 @@ impl<'tcx> UniversalRegions<'tcx> { param_env: ty::ParamEnv<'tcx>, ) -> Self { let tcx = infcx.tcx; - let mir_node_id = tcx.hir().as_local_node_id(mir_def_id).unwrap(); - let mir_hir_id = tcx.hir().node_to_hir_id(mir_node_id); + let mir_hir_id = tcx.hir().as_local_hir_id(mir_def_id).unwrap(); UniversalRegionsBuilder { infcx, mir_def_id, - mir_node_id, mir_hir_id, param_env, }.build() @@ -232,7 +219,7 @@ impl<'tcx> UniversalRegions<'tcx> { /// `V[1]: V[2]`. pub fn closure_mapping( tcx: TyCtxt<'_, '_, 'tcx>, - closure_substs: &'tcx Substs<'tcx>, + closure_substs: SubstsRef<'tcx>, expected_num_vars: usize, closure_base_def_id: DefId, ) -> IndexVec> { @@ -255,7 +242,7 @@ impl<'tcx> UniversalRegions<'tcx> { region_mapping } - /// True if `r` is a member of this set of universal regions. + /// Returns `true` if `r` is a member of this set of universal regions. pub fn is_universal_region(&self, r: RegionVid) -> bool { (FIRST_GLOBAL_INDEX..self.num_universals).contains(&r.index()) } @@ -281,7 +268,7 @@ impl<'tcx> UniversalRegions<'tcx> { (FIRST_GLOBAL_INDEX..self.num_universals).map(RegionVid::new) } - /// True if `r` is classified as an local region. + /// Returns `true` if `r` is classified as an local region. pub fn is_local_free_region(&self, r: RegionVid) -> bool { self.region_classification(r) == Some(RegionClassification::Local) } @@ -300,7 +287,7 @@ impl<'tcx> UniversalRegions<'tcx> { self.first_local_index } - /// Get an iterator over all the early-bound regions that have names. + /// Gets an iterator over all the early-bound regions that have names. pub fn named_universal_regions<'s>( &'s self, ) -> impl Iterator, ty::RegionVid)> + 's { @@ -380,7 +367,6 @@ struct UniversalRegionsBuilder<'cx, 'gcx: 'tcx, 'tcx: 'cx> { infcx: &'cx InferCtxt<'cx, 'gcx, 'tcx>, mir_def_id: DefId, mir_hir_id: HirId, - mir_node_id: ast::NodeId, param_env: ty::ParamEnv<'tcx>, } @@ -485,13 +471,14 @@ impl<'cx, 'gcx, 'tcx> UniversalRegionsBuilder<'cx, 'gcx, 'tcx> { let tcx = self.infcx.tcx; let closure_base_def_id = tcx.closure_base_def_id(self.mir_def_id); - match tcx.hir().body_owner_kind(self.mir_node_id) { + match tcx.hir().body_owner_kind_by_hir_id(self.mir_hir_id) { + BodyOwnerKind::Closure | BodyOwnerKind::Fn => { let defining_ty = if self.mir_def_id == closure_base_def_id { tcx.type_of(closure_base_def_id) } else { let tables = tcx.typeck_tables_of(self.mir_def_id); - tables.node_id_to_type(self.mir_hir_id) + tables.node_type(self.mir_hir_id) }; debug!("defining_ty (pre-replacement): {:?}", defining_ty); @@ -516,7 +503,7 @@ impl<'cx, 'gcx, 'tcx> UniversalRegionsBuilder<'cx, 'gcx, 'tcx> { BodyOwnerKind::Const | BodyOwnerKind::Static(..) => { assert_eq!(closure_base_def_id, self.mir_def_id); - let identity_substs = Substs::identity_for_item(tcx, closure_base_def_id); + let identity_substs = InternalSubsts::identity_for_item(tcx, closure_base_def_id); let substs = self.infcx .replace_free_regions_with_nll_infer_vars(FR, &identity_substs); DefiningTy::Const(self.mir_def_id, substs) @@ -536,7 +523,7 @@ impl<'cx, 'gcx, 'tcx> UniversalRegionsBuilder<'cx, 'gcx, 'tcx> { let tcx = self.infcx.tcx; let gcx = tcx.global_tcx(); let closure_base_def_id = tcx.closure_base_def_id(self.mir_def_id); - let identity_substs = Substs::identity_for_item(gcx, closure_base_def_id); + let identity_substs = InternalSubsts::identity_for_item(gcx, closure_base_def_id); let fr_substs = match defining_ty { DefiningTy::Closure(_, ClosureSubsts { ref substs }) | DefiningTy::Generator(_, GeneratorSubsts { ref substs }, _) => { @@ -701,7 +688,7 @@ impl<'cx, 'gcx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'gcx, 'tcx> { /// indices vector. Typically, we identify late-bound regions as we process the inputs and /// outputs of the closure/function. However, sometimes there are late-bound regions which do /// not appear in the fn parameters but which are nonetheless in scope. The simplest case of - /// this are unused functions, like fn foo<'a>() { } (see eg., #51351). Despite not being used, + /// this are unused functions, like fn foo<'a>() { } (see e.g., #51351). Despite not being used, /// users can still reference these regions (e.g., let x: &'a u32 = &22;), so we need to create /// entries for them and store them in the indices map. This code iterates over the complete /// set of late-bound regions and checks for any that we have not yet seen, adding them to the @@ -755,7 +742,7 @@ impl<'tcx> UniversalRegionIndices<'tcx> { } } - /// Replace all free regions in `value` with region vids, as + /// Replaces all free regions in `value` with region vids, as /// returned by `to_region_vid`. pub fn fold_to_region_vids(&self, tcx: TyCtxt<'_, '_, 'tcx>, value: &T) -> T where @@ -780,9 +767,8 @@ fn for_each_late_bound_region_defined_on<'tcx>( owner: fn_def_id.index, local_id: *late_bound, }; - let region_node_id = tcx.hir().hir_to_node_id(hir_id); - let name = tcx.hir().name(region_node_id).as_interned_str(); - let region_def_id = tcx.hir().local_def_id(region_node_id); + let name = tcx.hir().name_by_hir_id(hir_id).as_interned_str(); + let region_def_id = tcx.hir().local_def_id_from_hir_id(hir_id); let liberated_region = tcx.mk_region(ty::ReFree(ty::FreeRegion { scope: fn_def_id, bound_region: ty::BoundRegion::BrNamed(region_def_id, name), diff --git a/src/librustc_mir/borrow_check/path_utils.rs b/src/librustc_mir/borrow_check/path_utils.rs index 9250c04969f98..42eb502b9076d 100644 --- a/src/librustc_mir/borrow_check/path_utils.rs +++ b/src/librustc_mir/borrow_check/path_utils.rs @@ -1,24 +1,14 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::borrow_set::{BorrowSet, BorrowData, TwoPhaseActivation}; -use borrow_check::places_conflict; -use borrow_check::Context; -use borrow_check::AccessDepth; -use dataflow::indexes::BorrowIndex; -use rustc::mir::{BasicBlock, Location, Mir, Place}; +use crate::borrow_check::borrow_set::{BorrowSet, BorrowData, TwoPhaseActivation}; +use crate::borrow_check::places_conflict; +use crate::borrow_check::Context; +use crate::borrow_check::AccessDepth; +use crate::dataflow::indexes::BorrowIndex; +use rustc::mir::{BasicBlock, Location, Mir, Place, PlaceBase}; use rustc::mir::{ProjectionElem, BorrowKind}; use rustc::ty::TyCtxt; use rustc_data_structures::graph::dominators::Dominators; -/// Returns true if the borrow represented by `kind` is +/// Returns `true` if the borrow represented by `kind` is /// allowed to be split into separate Reservation and /// Activation phases. pub(super) fn allow_two_phase_borrow<'a, 'tcx, 'gcx: 'tcx>( @@ -68,6 +58,7 @@ pub(super) fn each_borrow_involving_path<'a, 'tcx, 'gcx: 'tcx, F, I, S> ( borrowed.kind, place, access, + places_conflict::PlaceConflictBias::Overlap, ) { debug!( "each_borrow_involving_path: {:?} @ {:?} vs. {:?}/{:?}", @@ -147,9 +138,8 @@ pub(super) fn is_active<'tcx>( /// This is called for all Yield statements on movable generators pub(super) fn borrow_of_local_data<'tcx>(place: &Place<'tcx>) -> bool { match place { - Place::Promoted(_) | - Place::Static(..) => false, - Place::Local(..) => true, + Place::Base(PlaceBase::Static(..)) => false, + Place::Base(PlaceBase::Local(..)) => true, Place::Projection(box proj) => { match proj.elem { // Reborrow of already borrowed data is ignored diff --git a/src/librustc_mir/borrow_check/place_ext.rs b/src/librustc_mir/borrow_check/place_ext.rs index 740cc64598438..8269b7b95f490 100644 --- a/src/librustc_mir/borrow_check/place_ext.rs +++ b/src/librustc_mir/borrow_check/place_ext.rs @@ -1,22 +1,12 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir; use rustc::mir::ProjectionElem; -use rustc::mir::{Local, Mir, Place, Mutability}; +use rustc::mir::{Local, Mir, Place, PlaceBase, Mutability, Static, StaticKind}; use rustc::ty::{self, TyCtxt}; -use borrow_check::borrow_set::LocalsStateAtExit; +use crate::borrow_check::borrow_set::LocalsStateAtExit; /// Extension methods for the `Place` type. crate trait PlaceExt<'tcx> { - /// Returns true if we can safely ignore borrows of this place. + /// Returns `true` if we can safely ignore borrows of this place. /// This is true whenever there is no action that the user can do /// to the place `self` that would invalidate the borrow. This is true /// for borrows of raw pointer dereferents as well as shared references. @@ -40,8 +30,6 @@ impl<'tcx> PlaceExt<'tcx> for Place<'tcx> { locals_state_at_exit: &LocalsStateAtExit, ) -> bool { match self { - Place::Promoted(_) => false, - // If a local variable is immutable, then we only need to track borrows to guard // against two kinds of errors: // * The variable being dropped while still borrowed (e.g., because the fn returns @@ -50,7 +38,7 @@ impl<'tcx> PlaceExt<'tcx> for Place<'tcx> { // // In particular, the variable cannot be mutated -- the "access checks" will fail -- // so we don't have to worry about mutation while borrowed. - Place::Local(index) => { + Place::Base(PlaceBase::Local(index)) => { match locals_state_at_exit { LocalsStateAtExit::AllAreInvalidated => false, LocalsStateAtExit::SomeAreInvalidated { has_storage_dead_or_moved } => { @@ -61,8 +49,10 @@ impl<'tcx> PlaceExt<'tcx> for Place<'tcx> { } } } - Place::Static(static_) => { - tcx.is_static(static_.def_id) == Some(hir::Mutability::MutMutable) + Place::Base(PlaceBase::Static(box Static{ kind: StaticKind::Promoted(_), .. })) => + false, + Place::Base(PlaceBase::Static(box Static{ kind: StaticKind::Static(def_id), .. })) => { + tcx.is_static(*def_id) == Some(hir::Mutability::MutMutable) } Place::Projection(proj) => match proj.elem { ProjectionElem::Field(..) @@ -73,7 +63,7 @@ impl<'tcx> PlaceExt<'tcx> for Place<'tcx> { tcx, mir, locals_state_at_exit), ProjectionElem::Deref => { - let ty = proj.base.ty(mir, tcx).to_ty(tcx); + let ty = proj.base.ty(mir, tcx).ty; match ty.sty { // For both derefs of raw pointers and `&T` // references, the original path is `Copy` and @@ -98,9 +88,8 @@ impl<'tcx> PlaceExt<'tcx> for Place<'tcx> { loop { match p { Place::Projection(pi) => p = &pi.base, - Place::Promoted(_) | - Place::Static(_) => return None, - Place::Local(l) => return Some(*l), + Place::Base(PlaceBase::Static(_)) => return None, + Place::Base(PlaceBase::Local(l)) => return Some(*l), } } } diff --git a/src/librustc_mir/borrow_check/places_conflict.rs b/src/librustc_mir/borrow_check/places_conflict.rs index e24586cca0929..fbe8b8485dda5 100644 --- a/src/librustc_mir/borrow_check/places_conflict.rs +++ b/src/librustc_mir/borrow_check/places_conflict.rs @@ -1,22 +1,48 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::ArtificialField; -use borrow_check::Overlap; -use borrow_check::{Deep, Shallow, AccessDepth}; +use crate::borrow_check::ArtificialField; +use crate::borrow_check::Overlap; +use crate::borrow_check::{Deep, Shallow, AccessDepth}; use rustc::hir; -use rustc::mir::{BorrowKind, Mir, Place}; -use rustc::mir::{Projection, ProjectionElem}; +use rustc::mir::{BorrowKind, Mir, Place, PlaceBase, Projection, ProjectionElem, StaticKind}; use rustc::ty::{self, TyCtxt}; use std::cmp::max; +/// When checking if a place conflicts with another place, this enum is used to influence decisions +/// where a place might be equal or disjoint with another place, such as if `a[i] == a[j]`. +/// `PlaceConflictBias::Overlap` would bias toward assuming that `i` might equal `j` and that these +/// places overlap. `PlaceConflictBias::NoOverlap` assumes that for the purposes of the predicate +/// being run in the calling context, the conservative choice is to assume the compared indices +/// are disjoint (and therefore, do not overlap). +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +crate enum PlaceConflictBias { + Overlap, + NoOverlap, +} + +/// Helper function for checking if places conflict with a mutable borrow and deep access depth. +/// This is used to check for places conflicting outside of the borrow checking code (such as in +/// dataflow). +crate fn places_conflict<'gcx, 'tcx>( + tcx: TyCtxt<'_, 'gcx, 'tcx>, + mir: &Mir<'tcx>, + borrow_place: &Place<'tcx>, + access_place: &Place<'tcx>, + bias: PlaceConflictBias, +) -> bool { + borrow_conflicts_with_place( + tcx, + mir, + borrow_place, + BorrowKind::Mut { allow_two_phase_borrow: true }, + access_place, + AccessDepth::Deep, + bias, + ) +} + +/// Checks whether the `borrow_place` conflicts with the `access_place` given a borrow kind and +/// access depth. The `bias` parameter is used to determine how the unknowable (comparing runtime +/// array indices, for example) should be interpreted - this depends on what the caller wants in +/// order to make the conservative choice and preserve soundness. pub(super) fn borrow_conflicts_with_place<'gcx, 'tcx>( tcx: TyCtxt<'_, 'gcx, 'tcx>, mir: &Mir<'tcx>, @@ -24,16 +50,17 @@ pub(super) fn borrow_conflicts_with_place<'gcx, 'tcx>( borrow_kind: BorrowKind, access_place: &Place<'tcx>, access: AccessDepth, + bias: PlaceConflictBias, ) -> bool { debug!( - "borrow_conflicts_with_place({:?},{:?},{:?})", - borrow_place, access_place, access + "borrow_conflicts_with_place({:?}, {:?}, {:?}, {:?})", + borrow_place, access_place, access, bias, ); // This Local/Local case is handled by the more general code below, but // it's so common that it's a speed win to check for it first. - if let Place::Local(l1) = borrow_place { - if let Place::Local(l2) = access_place { + if let Place::Base(PlaceBase::Local(l1)) = borrow_place { + if let Place::Base(PlaceBase::Local(l2)) = access_place { return l1 == l2; } } @@ -46,7 +73,8 @@ pub(super) fn borrow_conflicts_with_place<'gcx, 'tcx>( borrow_components, borrow_kind, access_components, - access + access, + bias, ) }) }) @@ -59,6 +87,7 @@ fn place_components_conflict<'gcx, 'tcx>( borrow_kind: BorrowKind, mut access_components: PlaceComponentsIter<'_, 'tcx>, access: AccessDepth, + bias: PlaceConflictBias, ) -> bool { // The borrowck rules for proving disjointness are applied from the "root" of the // borrow forwards, iterating over "similar" projections in lockstep until @@ -121,7 +150,7 @@ fn place_components_conflict<'gcx, 'tcx>( // check whether the components being borrowed vs // accessed are disjoint (as in the second example, // but not the first). - match place_element_conflict(tcx, mir, borrow_c, access_c) { + match place_element_conflict(tcx, mir, borrow_c, access_c, bias) { Overlap::Arbitrary => { // We have encountered different fields of potentially // the same union - the borrow now partially overlaps. @@ -162,7 +191,7 @@ fn place_components_conflict<'gcx, 'tcx>( Place::Projection(box Projection { base, elem }) => (base, elem), _ => bug!("place has no base?"), }; - let base_ty = base.ty(mir, tcx).to_ty(tcx); + let base_ty = base.ty(mir, tcx).ty; match (elem, &base_ty.sty, access) { (_, _, Shallow(Some(ArtificialField::ArrayLength))) @@ -190,7 +219,7 @@ fn place_components_conflict<'gcx, 'tcx>( bug!("Tracking borrow behind shared reference."); } (ProjectionElem::Deref, ty::Ref(_, _, hir::MutMutable), AccessDepth::Drop) => { - // Values behind a mutatble reference are not access either by Dropping a + // Values behind a mutable reference are not access either by dropping a // value, or by StorageDead debug!("borrow_conflicts_with_place: drop access behind ptr"); return false; @@ -245,10 +274,10 @@ fn place_components_conflict<'gcx, 'tcx>( /// A linked list of places running up the stack; begins with the /// innermost place and extends to projections (e.g., `a.b` would have -/// the place `a` with a "next" pointer to `a.b`). Created by +/// the place `a` with a "next" pointer to `a.b`). Created by /// `unroll_place`. /// -/// N.B., this particular impl strategy is not the most obvious. It was +/// N.B., this particular impl strategy is not the most obvious. It was /// chosen because it makes a measurable difference to NLL /// performance, as this code (`borrow_conflicts_with_place`) is somewhat hot. struct PlaceComponents<'p, 'tcx: 'p> { @@ -309,8 +338,7 @@ fn unroll_place<'tcx, R>( op, ), - Place::Promoted(_) | - Place::Local(_) | Place::Static(_) => { + Place::Base(PlaceBase::Local(_)) | Place::Base(PlaceBase::Static(_)) => { let list = PlaceComponents { component: place, next, @@ -328,9 +356,10 @@ fn place_element_conflict<'a, 'gcx: 'tcx, 'tcx>( mir: &Mir<'tcx>, elem1: &Place<'tcx>, elem2: &Place<'tcx>, + bias: PlaceConflictBias, ) -> Overlap { match (elem1, elem2) { - (Place::Local(l1), Place::Local(l2)) => { + (Place::Base(PlaceBase::Local(l1)), Place::Base(PlaceBase::Local(l2))) => { if l1 == l2 { // the same local - base case, equal debug!("place_element_conflict: DISJOINT-OR-EQ-LOCAL"); @@ -341,40 +370,47 @@ fn place_element_conflict<'a, 'gcx: 'tcx, 'tcx>( Overlap::Disjoint } } - (Place::Static(static1), Place::Static(static2)) => { - if static1.def_id != static2.def_id { - debug!("place_element_conflict: DISJOINT-STATIC"); - Overlap::Disjoint - } else if tcx.is_static(static1.def_id) == Some(hir::Mutability::MutMutable) { - // We ignore mutable statics - they can only be unsafe code. - debug!("place_element_conflict: IGNORE-STATIC-MUT"); - Overlap::Disjoint - } else { - debug!("place_element_conflict: DISJOINT-OR-EQ-STATIC"); - Overlap::EqualOrDisjoint - } - } - (Place::Promoted(p1), Place::Promoted(p2)) => { - if p1.0 == p2.0 { - if let ty::Array(_, size) = p1.1.sty { - if size.unwrap_usize(tcx) == 0 { - // Ignore conflicts with promoted [T; 0]. - debug!("place_element_conflict: IGNORE-LEN-0-PROMOTED"); - return Overlap::Disjoint; + (Place::Base(PlaceBase::Static(s1)), Place::Base(PlaceBase::Static(s2))) => { + match (&s1.kind, &s2.kind) { + (StaticKind::Static(def_id_1), StaticKind::Static(def_id_2)) => { + if def_id_1 != def_id_2 { + debug!("place_element_conflict: DISJOINT-STATIC"); + Overlap::Disjoint + } else if tcx.is_static(*def_id_1) == Some(hir::Mutability::MutMutable) { + // We ignore mutable statics - they can only be unsafe code. + debug!("place_element_conflict: IGNORE-STATIC-MUT"); + Overlap::Disjoint + } else { + debug!("place_element_conflict: DISJOINT-OR-EQ-STATIC"); + Overlap::EqualOrDisjoint + } + }, + (StaticKind::Promoted(promoted_1), StaticKind::Promoted(promoted_2)) => { + if promoted_1 == promoted_2 { + if let ty::Array(_, size) = s1.ty.sty { + if size.unwrap_usize(tcx) == 0 { + // Ignore conflicts with promoted [T; 0]. + debug!("place_element_conflict: IGNORE-LEN-0-PROMOTED"); + return Overlap::Disjoint; + } + } + // the same promoted - base case, equal + debug!("place_element_conflict: DISJOINT-OR-EQ-PROMOTED"); + Overlap::EqualOrDisjoint + } else { + // different promoteds - base case, disjoint + debug!("place_element_conflict: DISJOINT-PROMOTED"); + Overlap::Disjoint } + }, + (_, _) => { + debug!("place_element_conflict: DISJOINT-STATIC-PROMOTED"); + Overlap::Disjoint } - // the same promoted - base case, equal - debug!("place_element_conflict: DISJOINT-OR-EQ-PROMOTED"); - Overlap::EqualOrDisjoint - } else { - // different promoteds - base case, disjoint - debug!("place_element_conflict: DISJOINT-PROMOTED"); - Overlap::Disjoint } } - (Place::Local(_), Place::Promoted(_)) | (Place::Promoted(_), Place::Local(_)) | - (Place::Promoted(_), Place::Static(_)) | (Place::Static(_), Place::Promoted(_)) | - (Place::Local(_), Place::Static(_)) | (Place::Static(_), Place::Local(_)) => { + (Place::Base(PlaceBase::Local(_)), Place::Base(PlaceBase::Static(_))) | + (Place::Base(PlaceBase::Static(_)), Place::Base(PlaceBase::Local(_))) => { debug!("place_element_conflict: DISJOINT-STATIC-LOCAL-PROMOTED"); Overlap::Disjoint } @@ -391,7 +427,7 @@ fn place_element_conflict<'a, 'gcx: 'tcx, 'tcx>( debug!("place_element_conflict: DISJOINT-OR-EQ-FIELD"); Overlap::EqualOrDisjoint } else { - let ty = pi1.base.ty(mir, tcx).to_ty(tcx); + let ty = pi1.base.ty(mir, tcx).ty; match ty.sty { ty::Adt(def, _) if def.is_union() => { // Different fields of a union, we are basically stuck. @@ -445,10 +481,20 @@ fn place_element_conflict<'a, 'gcx: 'tcx, 'tcx>( | (ProjectionElem::ConstantIndex { .. }, ProjectionElem::Index(..)) | (ProjectionElem::Subslice { .. }, ProjectionElem::Index(..)) => { // Array indexes (`a[0]` vs. `a[i]`). These can either be disjoint - // (if the indexes differ) or equal (if they are the same), so this - // is the recursive case that gives "equal *or* disjoint" its meaning. - debug!("place_element_conflict: DISJOINT-OR-EQ-ARRAY-INDEX"); - Overlap::EqualOrDisjoint + // (if the indexes differ) or equal (if they are the same). + match bias { + PlaceConflictBias::Overlap => { + // If we are biased towards overlapping, then this is the recursive + // case that gives "equal *or* disjoint" its meaning. + debug!("place_element_conflict: DISJOINT-OR-EQ-ARRAY-INDEX"); + Overlap::EqualOrDisjoint + } + PlaceConflictBias::NoOverlap => { + // If we are biased towards no overlapping, then this is disjoint. + debug!("place_element_conflict: DISJOINT-ARRAY-INDEX"); + Overlap::Disjoint + } + } } (ProjectionElem::ConstantIndex { offset: o1, min_length: _, from_end: false }, ProjectionElem::ConstantIndex { offset: o2, min_length: _, from_end: false }) diff --git a/src/librustc_mir/borrow_check/prefixes.rs b/src/librustc_mir/borrow_check/prefixes.rs index b759e0416e566..866f1cf994e69 100644 --- a/src/librustc_mir/borrow_check/prefixes.rs +++ b/src/librustc_mir/borrow_check/prefixes.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! From the NLL RFC: "The deep [aka 'supporting'] prefixes for an //! place are formed by stripping away fields and derefs, except that //! we stop when we reach the deref of a shared reference. [...] " @@ -21,7 +11,7 @@ use super::MirBorrowckCtxt; use rustc::hir; use rustc::ty::{self, TyCtxt}; -use rustc::mir::{Mir, Place, ProjectionElem}; +use rustc::mir::{Mir, Place, PlaceBase, ProjectionElem}; pub trait IsPrefixOf<'tcx> { fn is_prefix_of(&self, other: &Place<'tcx>) -> bool; @@ -36,8 +26,8 @@ impl<'tcx> IsPrefixOf<'tcx> for Place<'tcx> { } match *cursor { - Place::Promoted(_) | - Place::Local(_) | Place::Static(_) => return false, + Place::Base(PlaceBase::Local(_)) | + Place::Base(PlaceBase::Static(_)) => return false, Place::Projection(ref proj) => { cursor = &proj.base; } @@ -96,9 +86,8 @@ impl<'cx, 'gcx, 'tcx> Iterator for Prefixes<'cx, 'gcx, 'tcx> { 'cursor: loop { let proj = match *cursor { - Place::Promoted(_) | - Place::Local(_) | // search yielded this leaf - Place::Static(_) => { + Place::Base(PlaceBase::Local(_)) | // search yielded this leaf + Place::Base(PlaceBase::Static(_)) => { self.next = None; return Some(cursor); } @@ -150,7 +139,7 @@ impl<'cx, 'gcx, 'tcx> Iterator for Prefixes<'cx, 'gcx, 'tcx> { // derefs, except we stop at the deref of a shared // reference. - let ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx); + let ty = proj.base.ty(self.mir, self.tcx).ty; match ty.sty { ty::RawPtr(_) | ty::Ref( diff --git a/src/librustc_mir/borrow_check/used_muts.rs b/src/librustc_mir/borrow_check/used_muts.rs index 7c75fb59917c0..b102bced0e335 100644 --- a/src/librustc_mir/borrow_check/used_muts.rs +++ b/src/librustc_mir/borrow_check/used_muts.rs @@ -1,19 +1,11 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::mir::visit::{PlaceContext, Visitor}; -use rustc::mir::{BasicBlock, Local, Location, Place, Statement, StatementKind, TerminatorKind}; +use rustc::mir::{ + BasicBlock, Local, Location, Place, PlaceBase, Statement, StatementKind, TerminatorKind +}; use rustc_data_structures::fx::FxHashSet; -use borrow_check::MirBorrowckCtxt; +use crate::borrow_check::MirBorrowckCtxt; impl<'cx, 'gcx, 'tcx> MirBorrowckCtxt<'cx, 'gcx, 'tcx> { /// Walks the MIR adding to the set of `used_mut` locals that will be ignored for the purposes @@ -124,7 +116,7 @@ impl<'visit, 'cx, 'gcx, 'tcx> Visitor<'tcx> for GatherUsedMutsVisitor<'visit, 'c "assignment of {:?} to {:?}, adding {:?} to used mutable set", path.place, local, path.place ); - if let Place::Local(user_local) = path.place { + if let Place::Base(PlaceBase::Local(user_local)) = path.place { self.mbcx.used_mut.insert(user_local); } } diff --git a/src/librustc_mir/build/block.rs b/src/librustc_mir/build/block.rs index 4df8d66e2fb5d..7469aceee3a9e 100644 --- a/src/librustc_mir/build/block.rs +++ b/src/librustc_mir/build/block.rs @@ -1,23 +1,11 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use build::{BlockAnd, BlockAndExtension, BlockFrame, Builder}; -use build::ForGuard::OutsideGuard; -use build::matches::ArmHasGuard; -use hair::*; +use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder}; +use crate::build::ForGuard::OutsideGuard; +use crate::build::matches::ArmHasGuard; +use crate::hair::*; use rustc::mir::*; use rustc::hir; use syntax_pos::Span; -use std::slice; - impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn ast_block(&mut self, destination: &Place<'tcx>, @@ -135,7 +123,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { None, remainder_span, lint_level, - slice::from_ref(&pattern), + &pattern, ArmHasGuard(false), Some((None, initializer_span)), ); @@ -148,12 +136,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { })); } else { scope = this.declare_bindings( - None, remainder_span, lint_level, slice::from_ref(&pattern), + None, remainder_span, lint_level, &pattern, ArmHasGuard(false), None); + debug!("ast_block_stmts: pattern={:?}", pattern); this.visit_bindings( &pattern, - &PatternTypeProjections::none(), + UserTypeProjections::none(), &mut |this, _, _, _, node, span, _, _| { this.storage_live_binding(block, node, span, OutsideGuard); this.schedule_drop_for_binding(node, span, OutsideGuard); @@ -174,7 +163,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // Then, the block may have an optional trailing expression which is a “return” value // of the block, which is stored into `destination`. let tcx = this.hir.tcx(); - let destination_ty = destination.ty(&this.local_decls, tcx).to_ty(tcx); + let destination_ty = destination.ty(&this.local_decls, tcx).ty; if let Some(expr) = expr { let tail_result_is_ignored = destination_ty.is_unit() || this.block_context.currently_ignores_tail_results(); @@ -215,14 +204,14 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { debug!("update_source_scope_for({:?}, {:?})", span, safety_mode); let new_unsafety = match safety_mode { BlockSafety::Safe => None, - BlockSafety::ExplicitUnsafe(node_id) => { + BlockSafety::ExplicitUnsafe(hir_id) => { assert_eq!(self.push_unsafe_count, 0); match self.unpushed_unsafe { Safety::Safe => {} _ => return } - self.unpushed_unsafe = Safety::ExplicitUnsafe(node_id); - Some(Safety::ExplicitUnsafe(node_id)) + self.unpushed_unsafe = Safety::ExplicitUnsafe(hir_id); + Some(Safety::ExplicitUnsafe(hir_id)) } BlockSafety::PushUnsafe => { self.push_unsafe_count += 1; diff --git a/src/librustc_mir/build/cfg.rs b/src/librustc_mir/build/cfg.rs index 2efb75c232d2e..778d1e71cedfc 100644 --- a/src/librustc_mir/build/cfg.rs +++ b/src/librustc_mir/build/cfg.rs @@ -1,19 +1,6 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - - - //! Routines for manipulating the control-flow graph. -use build::CFG; +use crate::build::CFG; use rustc::mir::*; impl<'tcx> CFG<'tcx> { diff --git a/src/librustc_mir/build/expr/as_constant.rs b/src/librustc_mir/build/expr/as_constant.rs index 606bd2978b642..614668170d5be 100644 --- a/src/librustc_mir/build/expr/as_constant.rs +++ b/src/librustc_mir/build/expr/as_constant.rs @@ -1,18 +1,9 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! See docs in build/expr/mod.rs -use build::Builder; -use hair::*; +use crate::build::Builder; +use crate::hair::*; use rustc::mir::*; +use rustc::ty::CanonicalUserTypeAnnotation; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Compile `expr`, yielding a compile-time constant. Assumes that @@ -39,11 +30,20 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { lint_level: _, value, } => this.as_constant(value), - ExprKind::Literal { literal, user_ty } => Constant { - span, - ty, - user_ty, - literal, + ExprKind::Literal { literal, user_ty } => { + let user_ty = user_ty.map(|user_ty| { + this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation { + span, + user_ty, + inferred_ty: ty, + }) + }); + Constant { + span, + ty, + user_ty, + literal, + } }, _ => span_bug!(span, "expression is not a valid constant {:?}", kind), } diff --git a/src/librustc_mir/build/expr/as_operand.rs b/src/librustc_mir/build/expr/as_operand.rs index 8046d898e0a88..e354a2ee8160b 100644 --- a/src/librustc_mir/build/expr/as_operand.rs +++ b/src/librustc_mir/build/expr/as_operand.rs @@ -1,18 +1,8 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! See docs in build/expr/mod.rs -use build::expr::category::Category; -use build::{BlockAnd, BlockAndExtension, Builder}; -use hair::*; +use crate::build::expr::category::Category; +use crate::build::{BlockAnd, BlockAndExtension, Builder}; +use crate::hair::*; use rustc::middle::region; use rustc::mir::*; @@ -84,7 +74,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } Category::Place | Category::Rvalue(..) => { let operand = unpack!(block = this.as_temp(block, scope, expr, Mutability::Mut)); - block.and(Operand::Move(Place::Local(operand))) + block.and(Operand::Move(Place::Base(PlaceBase::Local(operand)))) } } } diff --git a/src/librustc_mir/build/expr/as_place.rs b/src/librustc_mir/build/expr/as_place.rs index cb3c88876a3a8..f7cf09020138c 100644 --- a/src/librustc_mir/build/expr/as_place.rs +++ b/src/librustc_mir/build/expr/as_place.rs @@ -1,22 +1,12 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! See docs in build/expr/mod.rs -use build::expr::category::Category; -use build::ForGuard::{OutsideGuard, RefWithinGuard}; -use build::{BlockAnd, BlockAndExtension, Builder}; -use hair::*; -use rustc::mir::interpret::EvalErrorKind::BoundsCheck; +use crate::build::expr::category::Category; +use crate::build::ForGuard::{OutsideGuard, RefWithinGuard}; +use crate::build::{BlockAnd, BlockAndExtension, Builder}; +use crate::hair::*; +use rustc::mir::interpret::InterpError::BoundsCheck; use rustc::mir::*; -use rustc::ty::Variance; +use rustc::ty::{CanonicalUserTypeAnnotation, Variance}; use rustc_data_structures::indexed_vec::Idx; @@ -108,19 +98,19 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { <, Rvalue::BinaryOp( BinOp::Lt, - Operand::Copy(Place::Local(idx)), + Operand::Copy(Place::Base(PlaceBase::Local(idx))), Operand::Copy(len.clone()), ), ); let msg = BoundsCheck { len: Operand::Move(len), - index: Operand::Copy(Place::Local(idx)), + index: Operand::Copy(Place::Base(PlaceBase::Local(idx))), }; let success = this.assert(block, Operand::Move(lt), true, msg, expr_span); success.and(slice.index(idx)) } - ExprKind::SelfRef => block.and(Place::Local(Local::new(1))), + ExprKind::SelfRef => block.and(Place::Base(PlaceBase::Local(Local::new(1)))), ExprKind::VarRef { id } => { let place = if this.is_bound_var_in_guard(id) && this .hir @@ -128,21 +118,28 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { .all_pat_vars_are_implicit_refs_within_guards() { let index = this.var_local_id(id, RefWithinGuard); - Place::Local(index).deref() + Place::Base(PlaceBase::Local(index)).deref() } else { let index = this.var_local_id(id, OutsideGuard); - Place::Local(index) + Place::Base(PlaceBase::Local(index)) }; block.and(place) } - ExprKind::StaticRef { id } => block.and(Place::Static(Box::new(Static { - def_id: id, + ExprKind::StaticRef { id } => block.and(Place::Base(PlaceBase::Static(Box::new(Static { ty: expr.ty, - }))), + kind: StaticKind::Static(id), + })))), ExprKind::PlaceTypeAscription { source, user_ty } => { let place = unpack!(block = this.as_place(block, source)); if let Some(user_ty) = user_ty { + let annotation_index = this.canonical_user_type_annotations.push( + CanonicalUserTypeAnnotation { + span: source_info.span, + user_ty, + inferred_ty: expr.ty, + } + ); this.cfg.push( block, Statement { @@ -150,7 +147,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { kind: StatementKind::AscribeUserType( place.clone(), Variance::Invariant, - box UserTypeProjection { base: user_ty, projs: vec![], }, + box UserTypeProjection { base: annotation_index, projs: vec![], }, ), }, ); @@ -163,19 +160,26 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { block = this.as_temp(block, source.temp_lifetime, source, mutability) ); if let Some(user_ty) = user_ty { + let annotation_index = this.canonical_user_type_annotations.push( + CanonicalUserTypeAnnotation { + span: source_info.span, + user_ty, + inferred_ty: expr.ty, + } + ); this.cfg.push( block, Statement { source_info, kind: StatementKind::AscribeUserType( - Place::Local(temp.clone()), + Place::Base(PlaceBase::Local(temp.clone())), Variance::Invariant, - box UserTypeProjection { base: user_ty, projs: vec![], }, + box UserTypeProjection { base: annotation_index, projs: vec![], }, ), }, ); } - block.and(Place::Local(temp)) + block.and(Place::Base(PlaceBase::Local(temp))) } ExprKind::Array { .. } @@ -192,6 +196,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { | ExprKind::ReifyFnPointer { .. } | ExprKind::ClosureFnPointer { .. } | ExprKind::UnsafeFnPointer { .. } + | ExprKind::MutToConstPointer { .. } | ExprKind::Unsize { .. } | ExprKind::Repeat { .. } | ExprKind::Borrow { .. } @@ -215,7 +220,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { }); let temp = unpack!(block = this.as_temp(block, expr.temp_lifetime, expr, mutability)); - block.and(Place::Local(temp)) + block.and(Place::Base(PlaceBase::Local(temp))) } } } diff --git a/src/librustc_mir/build/expr/as_rvalue.rs b/src/librustc_mir/build/expr/as_rvalue.rs index a476165462a2f..7289dd96edb8d 100644 --- a/src/librustc_mir/build/expr/as_rvalue.rs +++ b/src/librustc_mir/build/expr/as_rvalue.rs @@ -1,25 +1,15 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! See docs in `build/expr/mod.rs`. use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::indexed_vec::Idx; -use build::expr::category::{Category, RvalueFunc}; -use build::{BlockAnd, BlockAndExtension, Builder}; -use hair::*; +use crate::build::expr::category::{Category, RvalueFunc}; +use crate::build::{BlockAnd, BlockAndExtension, Builder}; +use crate::hair::*; use rustc::middle::region; -use rustc::mir::interpret::EvalErrorKind; +use rustc::mir::interpret::InterpError; use rustc::mir::*; -use rustc::ty::{self, Ty, UpvarSubsts}; +use rustc::ty::{self, CanonicalUserTypeAnnotation, Ty, UpvarSubsts}; use syntax_pos::Span; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { @@ -77,7 +67,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { block.and(Rvalue::Repeat(value_operand, count)) } ExprKind::Borrow { - region, borrow_kind, arg, } => { @@ -85,7 +74,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { BorrowKind::Shared => unpack!(block = this.as_read_only_place(block, arg)), _ => unpack!(block = this.as_place(block, arg)), }; - block.and(Rvalue::Ref(region, borrow_kind, arg_place)) + block.and(Rvalue::Ref(this.hir.tcx().types.re_erased, borrow_kind, arg_place)) } ExprKind::Binary { op, lhs, rhs } => { let lhs = unpack!(block = this.as_operand(block, scope, lhs)); @@ -112,7 +101,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { block, Operand::Move(is_min), false, - EvalErrorKind::OverflowNeg, + InterpError::OverflowNeg, expr_span, ); } @@ -138,7 +127,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { this.schedule_drop_storage_and_value( expr_span, scope, - &Place::Local(result), + &Place::Base(PlaceBase::Local(result)), value.ty, ); } @@ -146,15 +135,18 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // malloc some memory of suitable type (thus far, uninitialized): let box_ = Rvalue::NullaryOp(NullOp::Box, value.ty); this.cfg - .push_assign(block, source_info, &Place::Local(result), box_); + .push_assign(block, source_info, &Place::Base(PlaceBase::Local(result)), box_); // initialize the box contents: - unpack!(block = this.into(&Place::Local(result).deref(), block, value)); - block.and(Rvalue::Use(Operand::Move(Place::Local(result)))) + unpack!( + block = this.into( + &Place::Base(PlaceBase::Local(result)).deref(), + block, value + ) + ); + block.and(Rvalue::Use(Operand::Move(Place::Base(PlaceBase::Local(result))))) } ExprKind::Cast { source } => { - let source = this.hir.mirror(source); - let source = unpack!(block = this.as_operand(block, scope, source)); block.and(Rvalue::Cast(CastKind::Misc, source, expr.ty)) } @@ -170,9 +162,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let source = unpack!(block = this.as_operand(block, scope, source)); block.and(Rvalue::Cast(CastKind::UnsafeFnPointer, source, expr.ty)) } - ExprKind::ClosureFnPointer { source } => { + ExprKind::ClosureFnPointer { source, unsafety } => { let source = unpack!(block = this.as_operand(block, scope, source)); - block.and(Rvalue::Cast(CastKind::ClosureFnPointer, source, expr.ty)) + block.and(Rvalue::Cast(CastKind::ClosureFnPointer(unsafety), source, expr.ty)) + } + ExprKind::MutToConstPointer { source } => { + let source = unpack!(block = this.as_operand(block, scope, source)); + block.and(Rvalue::Cast(CastKind::MutToConstPointer, source, expr.ty)) } ExprKind::Unsize { source } => { let source = unpack!(block = this.as_operand(block, scope, source)); @@ -259,11 +255,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { BorrowKind::Mut { allow_two_phase_borrow: false, }, - region, arg, } => unpack!( block = this.limit_capture_mutability( - upvar.span, upvar.ty, scope, block, arg, region, + upvar.span, upvar.ty, scope, block, arg, ) ), _ => unpack!(block = this.as_operand(block, scope, upvar)), @@ -280,10 +275,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { span: expr_span, ty: this.hir.tcx().types.u32, user_ty: None, - literal: ty::Const::from_bits( - this.hir.tcx(), - 0, - ty::ParamEnv::empty().and(this.hir.tcx().types.u32), + literal: this.hir.tcx().mk_const( + ty::Const::from_bits( + this.hir.tcx(), + 0, + ty::ParamEnv::empty().and(this.hir.tcx().types.u32), + ), ), })); box AggregateKind::Generator(closure_id, substs, movability) @@ -341,6 +338,14 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { .collect() }; + let inferred_ty = expr.ty; + let user_ty = user_ty.map(|ty| { + this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation { + span: source_info.span, + user_ty: ty, + inferred_ty, + }) + }); let adt = box AggregateKind::Adt( adt_def, variant_index, @@ -428,7 +433,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let val = result_value.clone().field(val_fld, ty); let of = result_value.field(of_fld, bool_ty); - let err = EvalErrorKind::Overflow(op); + let err = InterpError::Overflow(op); block = self.assert(block, Operand::Move(of), false, err, span); @@ -439,9 +444,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // and 2. there are two possible failure cases, divide-by-zero and overflow. let (zero_err, overflow_err) = if op == BinOp::Div { - (EvalErrorKind::DivisionByZero, EvalErrorKind::Overflow(op)) + (InterpError::DivisionByZero, InterpError::Overflow(op)) } else { - (EvalErrorKind::RemainderByZero, EvalErrorKind::Overflow(op)) + (InterpError::RemainderByZero, InterpError::Overflow(op)) }; // Check for / 0 @@ -505,7 +510,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { temp_lifetime: Option, mut block: BasicBlock, arg: ExprRef<'tcx>, - region: &'tcx ty::RegionKind, ) -> BlockAnd> { let this = self; @@ -525,9 +529,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let arg_place = unpack!(block = this.as_place(block, arg)); let mutability = match arg_place { - Place::Local(local) => this.local_decls[local].mutability, + Place::Base(PlaceBase::Local(local)) => this.local_decls[local].mutability, Place::Projection(box Projection { - base: Place::Local(local), + base: Place::Base(PlaceBase::Local(local)), elem: ProjectionElem::Deref, }) => { debug_assert!( @@ -557,11 +561,11 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // Not projected from the implicit `self` in a closure. debug_assert!( match *base { - Place::Local(local) => local == Local::new(1), + Place::Base(PlaceBase::Local(local)) => local == Local::new(1), Place::Projection(box Projection { ref base, elem: ProjectionElem::Deref, - }) => *base == Place::Local(Local::new(1)), + }) => *base == Place::Base(PlaceBase::Local(Local::new(1))), _ => false, }, "Unexpected capture place" @@ -586,8 +590,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { this.cfg.push_assign( block, source_info, - &Place::Local(temp), - Rvalue::Ref(region, borrow_kind, arg_place), + &Place::Base(PlaceBase::Local(temp)), + Rvalue::Ref(this.hir.tcx().types.re_erased, borrow_kind, arg_place), ); // In constants, temp_lifetime is None. We should not need to drop @@ -597,12 +601,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { this.schedule_drop_storage_and_value( upvar_span, temp_lifetime, - &Place::Local(temp), + &Place::Base(PlaceBase::Local(temp)), upvar_ty, ); } - block.and(Operand::Move(Place::Local(temp))) + block.and(Operand::Move(Place::Base(PlaceBase::Local(temp)))) } // Helper to get a `-1` value of the appropriate type diff --git a/src/librustc_mir/build/expr/as_temp.rs b/src/librustc_mir/build/expr/as_temp.rs index 2db9fb9cb99f4..cba771f27065d 100644 --- a/src/librustc_mir/build/expr/as_temp.rs +++ b/src/librustc_mir/build/expr/as_temp.rs @@ -1,17 +1,7 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! See docs in build/expr/mod.rs -use build::{BlockAnd, BlockAndExtension, Builder}; -use hair::*; +use crate::build::{BlockAnd, BlockAndExtension, Builder}; +use crate::hair::*; use rustc::middle::region; use rustc::mir::*; @@ -83,7 +73,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ); } - unpack!(block = this.into(&Place::Local(temp), block, expr)); + unpack!(block = this.into(&Place::Base(PlaceBase::Local(temp)), block, expr)); // In constants, temp_lifetime is None for temporaries that live for the // 'static lifetime. Thus we do not drop these temporaries and simply leak them. @@ -98,7 +88,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { this.schedule_drop_storage_and_value( expr_span, temp_lifetime, - &Place::Local(temp), + &Place::Base(PlaceBase::Local(temp)), expr_ty, ); } diff --git a/src/librustc_mir/build/expr/category.rs b/src/librustc_mir/build/expr/category.rs index 05a9079cdb1ef..c8c30ac3ce4d0 100644 --- a/src/librustc_mir/build/expr/category.rs +++ b/src/librustc_mir/build/expr/category.rs @@ -1,14 +1,4 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hair::*; +use crate::hair::*; #[derive(Debug, PartialEq)] pub enum Category { @@ -72,6 +62,7 @@ impl Category { | ExprKind::ReifyFnPointer { .. } | ExprKind::ClosureFnPointer { .. } | ExprKind::UnsafeFnPointer { .. } + | ExprKind::MutToConstPointer { .. } | ExprKind::Unsize { .. } | ExprKind::Repeat { .. } | ExprKind::Borrow { .. } diff --git a/src/librustc_mir/build/expr/into.rs b/src/librustc_mir/build/expr/into.rs index 0e7305e076ede..4d0418beea4dd 100644 --- a/src/librustc_mir/build/expr/into.rs +++ b/src/librustc_mir/build/expr/into.rs @@ -1,18 +1,8 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! See docs in build/expr/mod.rs -use build::expr::category::{Category, RvalueFunc}; -use build::{BlockAnd, BlockAndExtension, BlockFrame, Builder}; -use hair::*; +use crate::build::expr::category::{Category, RvalueFunc}; +use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder}; +use crate::hair::*; use rustc::mir::*; use rustc::ty; @@ -63,8 +53,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ExprKind::Block { body: ast_block } => { this.ast_block(destination, block, ast_block, source_info) } - ExprKind::Match { discriminant, arms } => { - this.match_expr(destination, expr_span, block, discriminant, arms) + ExprKind::Match { scrutinee, arms } => { + this.match_expr(destination, expr_span, block, scrutinee, arms) } ExprKind::NeverToAny { source } => { let source = this.hir.mirror(source); @@ -126,18 +116,17 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ExprKind::LogicalOp { op, lhs, rhs } => { // And: // - // [block: If(lhs)] -true-> [else_block: If(rhs)] -true-> [true_block] - // | | (false) - // +----------false-----------+------------------> [false_block] + // [block: If(lhs)] -true-> [else_block: dest = (rhs)] + // | (false) + // [shortcurcuit_block: dest = false] // // Or: // - // [block: If(lhs)] -false-> [else_block: If(rhs)] -true-> [true_block] - // | (true) | (false) - // [true_block] [false_block] + // [block: If(lhs)] -false-> [else_block: dest = (rhs)] + // | (true) + // [shortcurcuit_block: dest = true] - let (true_block, false_block, mut else_block, join_block) = ( - this.cfg.start_new_block(), + let (shortcircuit_block, mut else_block, join_block) = ( this.cfg.start_new_block(), this.cfg.start_new_block(), this.cfg.start_new_block(), @@ -145,47 +134,41 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let lhs = unpack!(block = this.as_local_operand(block, lhs)); let blocks = match op { - LogicalOp::And => (else_block, false_block), - LogicalOp::Or => (true_block, else_block), + LogicalOp::And => (else_block, shortcircuit_block), + LogicalOp::Or => (shortcircuit_block, else_block), }; let term = TerminatorKind::if_(this.hir.tcx(), lhs, blocks.0, blocks.1); this.cfg.terminate(block, source_info, term); - let rhs = unpack!(else_block = this.as_local_operand(else_block, rhs)); - let term = TerminatorKind::if_(this.hir.tcx(), rhs, true_block, false_block); - this.cfg.terminate(else_block, source_info, term); - this.cfg.push_assign_constant( - true_block, + shortcircuit_block, source_info, destination, Constant { span: expr_span, ty: this.hir.bool_ty(), user_ty: None, - literal: this.hir.true_literal(), + literal: match op { + LogicalOp::And => this.hir.false_literal(), + LogicalOp::Or => this.hir.true_literal(), + }, }, ); - - this.cfg.push_assign_constant( - false_block, + this.cfg.terminate( + shortcircuit_block, source_info, - destination, - Constant { - span: expr_span, - ty: this.hir.bool_ty(), - user_ty: None, - literal: this.hir.false_literal(), - }, + TerminatorKind::Goto { target: join_block }, ); - this.cfg.terminate( - true_block, + let rhs = unpack!(else_block = this.as_local_operand(else_block, rhs)); + this.cfg.push_assign( + else_block, source_info, - TerminatorKind::Goto { target: join_block }, + destination, + Rvalue::Use(rhs), ); this.cfg.terminate( - false_block, + else_block, source_info, TerminatorKind::Goto { target: join_block }, ); @@ -275,8 +258,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { exit_block.unit() } ExprKind::Call { ty, fun, args, from_hir_call } => { - // FIXME(canndrew): This is_never should probably be an is_uninhabited - let diverges = expr.ty.is_never(); let intrinsic = match ty.sty { ty::FnDef(def_id, _) => { let f = ty.fn_sig(this.hir.tcx()); @@ -314,7 +295,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { is_user_variable: None, is_block_tail: None, }); - let ptr_temp = Place::Local(ptr_temp); + let ptr_temp = Place::Base(PlaceBase::Local(ptr_temp)); let block = unpack!(this.into(&ptr_temp, block, ptr)); this.into(&ptr_temp.deref(), block, val) } else { @@ -332,7 +313,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { func: fun, args, cleanup: Some(cleanup), - destination: if diverges { + // FIXME(varkor): replace this with an uninhabitedness-based check. + // This requires getting access to the current module to call + // `tcx.is_ty_uninhabited_from`, which is currently tricky to do. + destination: if expr.ty.is_never() { None } else { Some((destination.clone(), success)) @@ -399,6 +383,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { | ExprKind::ReifyFnPointer { .. } | ExprKind::ClosureFnPointer { .. } | ExprKind::UnsafeFnPointer { .. } + | ExprKind::MutToConstPointer { .. } | ExprKind::Unsize { .. } | ExprKind::Repeat { .. } | ExprKind::Borrow { .. } @@ -421,8 +406,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { }); let rvalue = unpack!(block = this.as_local_rvalue(block, expr)); - this.cfg - .push_assign(block, source_info, destination, rvalue); + this.cfg.push_assign(block, source_info, destination, rvalue); block.unit() } }; diff --git a/src/librustc_mir/build/expr/mod.rs b/src/librustc_mir/build/expr/mod.rs index 6442ba34da4b2..ac8c7e725e1b4 100644 --- a/src/librustc_mir/build/expr/mod.rs +++ b/src/librustc_mir/build/expr/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Builds MIR from expressions. As a caller into this module, you //! have many options, but the first thing you have to decide is //! whether you are evaluating this expression for its *value*, its diff --git a/src/librustc_mir/build/expr/stmt.rs b/src/librustc_mir/build/expr/stmt.rs index 0e9f81bbe9560..b58914b017fd4 100644 --- a/src/librustc_mir/build/expr/stmt.rs +++ b/src/librustc_mir/build/expr/stmt.rs @@ -1,16 +1,6 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use build::scope::BreakableScope; -use build::{BlockAnd, BlockAndExtension, BlockFrame, Builder}; -use hair::*; +use crate::build::scope::BreakableScope; +use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder}; +use crate::hair::*; use rustc::mir::*; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { @@ -149,13 +139,22 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { Some(value) => { debug!("stmt_expr Return val block_context.push(SubExpr) : {:?}", expr2); this.block_context.push(BlockFrame::SubExpr); - let result = unpack!(this.into(&Place::Local(RETURN_PLACE), block, value)); + let result = unpack!( + this.into( + &Place::RETURN_PLACE, + block, + value + ) + ); this.block_context.pop(); result } None => { - this.cfg - .push_assign_unit(block, source_info, &Place::Local(RETURN_PLACE)); + this.cfg.push_assign_unit( + block, + source_info, + &Place::RETURN_PLACE, + ); block } }; @@ -189,11 +188,11 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { block, Statement { source_info, - kind: StatementKind::InlineAsm { - asm: box asm.clone(), + kind: StatementKind::InlineAsm(box InlineAsm { + asm: asm.clone(), outputs, inputs, - }, + }), }, ); this.block_context.pop(); @@ -236,7 +235,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } } let temp = this.local_decls.push(local_decl); - let place = Place::Local(temp); + let place = Place::Base(PlaceBase::Local(temp)); debug!("created temp {:?} for expr {:?} in block_context: {:?}", temp, expr, this.block_context); place diff --git a/src/librustc_mir/build/into.rs b/src/librustc_mir/build/into.rs index 3e57c4acb4288..67b6540febea8 100644 --- a/src/librustc_mir/build/into.rs +++ b/src/librustc_mir/build/into.rs @@ -1,24 +1,14 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! In general, there are a number of things for which it's convenient //! to just call `builder.into` and have it emit its result into a //! given location. This is basically for expressions or things that can be //! wrapped up as expressions (e.g., blocks). To make this ergonomic, we use this //! latter `EvalInto` trait. -use build::{BlockAnd, Builder}; -use hair::*; +use crate::build::{BlockAnd, Builder}; +use crate::hair::*; use rustc::mir::*; -pub(in build) trait EvalInto<'tcx> { +pub(in crate::build) trait EvalInto<'tcx> { fn eval_into<'a, 'gcx>(self, builder: &mut Builder<'a, 'gcx, 'tcx>, destination: &Place<'tcx>, diff --git a/src/librustc_mir/build/matches/mod.rs b/src/librustc_mir/build/matches/mod.rs index 4d61bf8dae681..566f1790f8f7f 100644 --- a/src/librustc_mir/build/matches/mod.rs +++ b/src/librustc_mir/build/matches/mod.rs @@ -1,30 +1,22 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Code related to match expressions. These are sufficiently complex -//! to warrant their own module and submodules. :) This main module -//! includes the high-level algorithm, the submodules contain the -//! details. - -use build::scope::{CachedBlock, DropKind}; -use build::ForGuard::{self, OutsideGuard, RefWithinGuard, ValWithinGuard}; -use build::{BlockAnd, BlockAndExtension, Builder}; -use build::{GuardFrame, GuardFrameLocal, LocalsForNode}; -use hair::*; -use hair::pattern::PatternTypeProjections; +//! Code related to match expressions. These are sufficiently complex to +//! warrant their own module and submodules. :) This main module includes the +//! high-level algorithm, the submodules contain the details. +//! +//! This also includes code for pattern bindings in `let` statements and +//! function parameters. + +use crate::build::scope::{CachedBlock, DropKind}; +use crate::build::ForGuard::{self, OutsideGuard, RefWithinGuard}; +use crate::build::{BlockAnd, BlockAndExtension, Builder}; +use crate::build::{GuardFrame, GuardFrameLocal, LocalsForNode}; +use crate::hair::{self, *}; +use rustc::hir::HirId; use rustc::mir::*; -use rustc::ty::{self, Ty}; +use rustc::ty::{self, CanonicalUserTypeAnnotation, Ty}; use rustc::ty::layout::VariantIdx; use rustc_data_structures::bit_set::BitSet; -use rustc_data_structures::fx::FxHashMap; -use syntax::ast::{Name, NodeId}; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use syntax::ast::Name; use syntax_pos::Span; // helper functions, broken out by category: @@ -34,25 +26,94 @@ mod util; use std::convert::TryFrom; -/// ArmHasGuard is isomorphic to a boolean flag. It indicates whether -/// a match arm has a guard expression attached to it. -#[derive(Copy, Clone, Debug)] -pub(crate) struct ArmHasGuard(pub bool); - impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { + /// Generates MIR for a `match` expression. + /// + /// The MIR that we generate for a match looks like this. + /// + /// ```text + /// [ 0. Pre-match ] + /// | + /// [ 1. Evaluate Scrutinee (expression being matched on) ] + /// [ (fake read of scrutinee) ] + /// | + /// [ 2. Decision tree -- check discriminants ] <--------+ + /// | | + /// | (once a specific arm is chosen) | + /// | | + /// [pre_binding_block] [otherwise_block] + /// | | + /// [ 3. Create "guard bindings" for arm ] | + /// [ (create fake borrows) ] | + /// | | + /// [ 4. Execute guard code ] | + /// [ (read fake borrows) ] --(guard is false)-----------+ + /// | + /// | (guard results in true) + /// | + /// [ 5. Create real bindings and execute arm ] + /// | + /// [ Exit match ] + /// ``` + /// + /// All of the different arms have been stacked on top of each other to + /// simplify the diagram. For an arm with no guard the blocks marked 3 and + /// 4 and the fake borrows are omitted. + /// + /// We generate MIR in the following steps: + /// + /// 1. Evaluate the scrutinee and add the fake read of it. + /// 2. Create the prebinding and otherwise blocks. + /// 3. Create the decision tree and record the places that we bind or test. + /// 4. Determine the fake borrows that are needed from the above places. + /// Create the required temporaries for them. + /// 5. Create everything else: Create everything else: the guards and the + /// arms. + /// + /// ## Fake Reads and borrows + /// + /// Match exhaustiveness checking is not able to handle the case where the + /// place being matched on is mutated in the guards. There is an AST check + /// that tries to stop this but it is buggy and overly restrictive. Instead + /// we add "fake borrows" to the guards that prevent any mutation of the + /// place being matched. There are a some subtleties: + /// + /// 1. Borrowing `*x` doesn't prevent assigning to `x`. If `x` is a shared + /// refence, the borrow isn't even tracked. As such we have to add fake + /// borrows of any prefixes of a place + /// 2. We don't want `match x { _ => (), }` to conflict with mutable + /// borrows of `x`, so we only add fake borrows for places which are + /// bound or tested by the match. + /// 3. We don't want the fake borrows to conflict with `ref mut` bindings, + /// so we use a special BorrowKind for them. + /// 4. The fake borrows may be of places in inactive variants, so it would + /// be UB to generate code for them. They therefore have to be removed + /// by a MIR pass run after borrow checking. + /// + /// ## False edges + /// + /// We don't want to have the exact structure of the decision tree be + /// visible through borrow checking. False edges ensure that the CFG as + /// seen by borrow checking doesn't encode this. False edges are added: + /// + /// * From each prebinding block to the next prebinding block. + /// * From each otherwise block to the next prebinding block. pub fn match_expr( &mut self, destination: &Place<'tcx>, span: Span, mut block: BasicBlock, - discriminant: ExprRef<'tcx>, + scrutinee: ExprRef<'tcx>, arms: Vec>, ) -> BlockAnd<()> { let tcx = self.hir.tcx(); - let discriminant_span = discriminant.span(); - let discriminant_place = unpack!(block = self.as_place(block, discriminant)); - // Matching on a `discriminant_place` with an uninhabited type doesn't + // Step 1. Evaluate the scrutinee and add the fake read of it. + + let scrutinee_span = scrutinee.span(); + let scrutinee_place = unpack!(block = self.as_place(block, scrutinee)); + + // Matching on a `scrutinee_place` with an uninhabited type doesn't // generate any memory reads by itself, and so if the place "expression" // contains unsafe operations like raw pointer dereferences or union // field projections, we wouldn't know to require an `unsafe` block @@ -60,43 +121,24 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // See issue #47412 for this hole being discovered in the wild. // // HACK(eddyb) Work around the above issue by adding a dummy inspection - // of `discriminant_place`, specifically by applying `ReadForMatch`. + // of `scrutinee_place`, specifically by applying `ReadForMatch`. // - // NOTE: ReadForMatch also checks that the discriminant is initialized. + // NOTE: ReadForMatch also checks that the scrutinee is initialized. // This is currently needed to not allow matching on an uninitialized, // uninhabited value. If we get never patterns, those will check that // the place is initialized, and so this read would only be used to // check safety. - let source_info = self.source_info(discriminant_span); + let source_info = self.source_info(scrutinee_span); self.cfg.push(block, Statement { source_info, kind: StatementKind::FakeRead( FakeReadCause::ForMatchedPlace, - discriminant_place.clone(), + scrutinee_place.clone(), ), }); - let mut arm_blocks = ArmBlocks { - blocks: arms.iter().map(|_| self.cfg.start_new_block()).collect(), - }; - - // Get the arm bodies and their scopes, while declaring bindings. - let arm_bodies: Vec<_> = arms.iter() - .map(|arm| { - // BUG: use arm lint level - let body = self.hir.mirror(arm.body.clone()); - let scope = self.declare_bindings( - None, - body.span, - LintLevel::Inherited, - &arm.patterns[..], - ArmHasGuard(arm.guard.is_some()), - Some((Some(&discriminant_place), discriminant_span)), - ); - (body, scope.unwrap_or(self.source_scope)) - }) - .collect(); + // Step 2. Create the otherwise and prebinding blocks. // create binding start block for link them by false edges let candidate_count = arms.iter().map(|c| c.patterns.len()).sum::(); @@ -104,70 +146,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { .map(|_| self.cfg.start_new_block()) .collect(); - let mut has_guard = false; - - // assemble a list of candidates: there is one candidate per - // pattern, which means there may be more than one candidate - // *per arm*. These candidates are kept sorted such that the - // highest priority candidate comes first in the list. - // (i.e., same order as in source) - - let candidates: Vec<_> = arms.iter() - .enumerate() - .flat_map(|(arm_index, arm)| { - arm.patterns - .iter() - .enumerate() - .map(move |(pat_index, pat)| (arm_index, pat_index, pat, arm.guard.clone())) - }) - .zip( - pre_binding_blocks - .iter() - .zip(pre_binding_blocks.iter().skip(1)), - ) - .map( - |( - (arm_index, pat_index, pattern, guard), - (pre_binding_block, next_candidate_pre_binding_block) - )| { - has_guard |= guard.is_some(); - - // One might ask: why not build up the match pair such that it - // matches via `borrowed_input_temp.deref()` instead of - // using the `discriminant_place` directly, as it is doing here? - // - // The basic answer is that if you do that, then you end up with - // accceses to a shared borrow of the input and that conflicts with - // any arms that look like e.g. - // - // match Some(&4) { - // ref mut foo => { - // ... /* mutate `foo` in arm body */ ... - // } - // } - // - // (Perhaps we could further revise the MIR - // construction here so that it only does a - // shared borrow at the outset and delays doing - // the mutable borrow until after the pattern is - // matched *and* the guard (if any) for the arm - // has been run.) - - Candidate { - span: pattern.span, - match_pairs: vec![MatchPair::new(discriminant_place.clone(), pattern)], - bindings: vec![], - ascriptions: vec![], - guard, - arm_index, - pat_index, - pre_binding_block: *pre_binding_block, - next_candidate_pre_binding_block: *next_candidate_pre_binding_block, - } - }, - ) - .collect(); - + // There's one more pre_binding block than there are candidates so that + // every candidate can have a `next_candidate_pre_binding_block`. let outer_source_info = self.source_info(span); self.cfg.terminate( *pre_binding_blocks.last().unwrap(), @@ -175,27 +155,70 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { TerminatorKind::Unreachable, ); - // Maps a place to the kind of Fake borrow that we want to perform on - // it: either Shallow or Shared, depending on whether the place is - // bound in the match, or just switched on. - // If there are no match guards then we don't need any fake borrows, - // so don't track them. - let mut fake_borrows = if has_guard && tcx.generate_borrow_of_any_match_input() { - Some(FxHashMap::default()) + let mut match_has_guard = false; + + let mut candidate_pre_binding_blocks = pre_binding_blocks.iter(); + let mut next_candidate_pre_binding_blocks = pre_binding_blocks.iter().skip(1); + + // Assemble a list of candidates: there is one candidate per pattern, + // which means there may be more than one candidate *per arm*. + let mut arm_candidates: Vec<_> = arms + .iter() + .map(|arm| { + let arm_has_guard = arm.guard.is_some(); + match_has_guard |= arm_has_guard; + let arm_candidates: Vec<_> = arm.patterns + .iter() + .zip(candidate_pre_binding_blocks.by_ref()) + .zip(next_candidate_pre_binding_blocks.by_ref()) + .map( + |((pattern, pre_binding_block), next_candidate_pre_binding_block)| { + Candidate { + span: pattern.span, + match_pairs: vec![ + MatchPair::new(scrutinee_place.clone(), pattern), + ], + bindings: vec![], + ascriptions: vec![], + otherwise_block: if arm_has_guard { + Some(self.cfg.start_new_block()) + } else { + None + }, + pre_binding_block: *pre_binding_block, + next_candidate_pre_binding_block: + *next_candidate_pre_binding_block, + } + }, + ) + .collect(); + (arm, arm_candidates) + }) + .collect(); + + // Step 3. Create the decision tree and record the places that we bind or test. + + // The set of places that we are creating fake borrows of. If there are + // no match guards then we don't need any fake borrows, so don't track + // them. + let mut fake_borrows = if match_has_guard && tcx.generate_borrow_of_any_match_input() { + Some(FxHashSet::default()) } else { None }; - let pre_binding_blocks: Vec<_> = candidates - .iter() - .map(|cand| (cand.pre_binding_block, cand.span)) - .collect(); + // These candidates are kept sorted such that the highest priority + // candidate comes first in the list. (i.e., same order as in source) + // As we gnerate the decision tree, + let candidates = &mut arm_candidates + .iter_mut() + .flat_map(|(_, candidates)| candidates) + .collect::>(); - // this will generate code to test discriminant_place and + // this will generate code to test scrutinee_place and // branch to the appropriate arm block let otherwise = self.match_candidates( - discriminant_span, - &mut arm_blocks, + scrutinee_span, candidates, block, &mut fake_borrows, @@ -208,36 +231,70 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // // In that case, the inexhaustive tips of the decision tree // can't be reached - terminate them with an `unreachable`. - let source_info = self.source_info(span); - let mut otherwise = otherwise; otherwise.sort(); otherwise.dedup(); // variant switches can introduce duplicate target blocks for block in otherwise { self.cfg - .terminate(block, source_info, TerminatorKind::Unreachable); + .terminate(block, outer_source_info, TerminatorKind::Unreachable); } } - if let Some(fake_borrows) = fake_borrows { - self.add_fake_borrows(&pre_binding_blocks, fake_borrows, source_info, block); - } + // Step 4. Determine the fake borrows that are needed from the above + // places. Create the required temporaries for them. - // all the arm blocks will rejoin here - let end_block = self.cfg.start_new_block(); + let fake_borrow_temps = if let Some(ref borrows) = fake_borrows { + self.calculate_fake_borrows(borrows, scrutinee_span) + } else { + Vec::new() + }; + + // Step 5. Create everything else: the guards and the arms. let outer_source_info = self.source_info(span); - for (arm_index, (body, source_scope)) in arm_bodies.into_iter().enumerate() { - let mut arm_block = arm_blocks.blocks[arm_index]; - // Re-enter the source scope we created the bindings in. - self.source_scope = source_scope; + let arm_end_blocks: Vec<_> = arm_candidates.into_iter().map(|(arm, candidates)| { + let mut arm_block = self.cfg.start_new_block(); + + let body = self.hir.mirror(arm.body.clone()); + let scope = self.declare_bindings( + None, + body.span, + LintLevel::Inherited, + &arm.patterns[0], + ArmHasGuard(arm.guard.is_some()), + Some((Some(&scrutinee_place), scrutinee_span)), + ); + + for candidate in candidates { + self.bind_and_guard_matched_candidate( + candidate, + arm.guard.clone(), + arm_block, + &fake_borrow_temps, + scrutinee_span, + ); + } + + if let Some(source_scope) = scope { + self.source_scope = source_scope; + } + unpack!(arm_block = self.into(destination, arm_block, body)); + + arm_block + }).collect(); + + // all the arm blocks will rejoin here + let end_block = self.cfg.start_new_block(); + + for arm_block in arm_end_blocks { self.cfg.terminate( arm_block, outer_source_info, TerminatorKind::Goto { target: end_block }, ); } + self.source_scope = outer_source_info.scope; end_block.unit() @@ -294,8 +351,11 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { }, .. }, - user_ty: pat_ascription_ty, - user_ty_span, + ascription: hair::pattern::Ascription { + user_ty: pat_ascription_ty, + variance: _, + user_ty_span, + }, } => { let place = self.storage_live_binding(block, var, irrefutable_pat.span, OutsideGuard); @@ -312,14 +372,33 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ); let ty_source_info = self.source_info(user_ty_span); + let user_ty = box pat_ascription_ty.user_ty( + &mut self.canonical_user_type_annotations, + place.ty(&self.local_decls, self.hir.tcx()).ty, + ty_source_info.span, + ); self.cfg.push( block, Statement { source_info: ty_source_info, kind: StatementKind::AscribeUserType( place, + // We always use invariant as the variance here. This is because the + // variance field from the ascription refers to the variance to use + // when applying the type to the value being matched, but this + // ascription applies rather to the type of the binding. e.g., in this + // example: + // + // ``` + // let x: T = + // ``` + // + // We are creating an ascription that defines the type of `x` to be + // exactly `T` (i.e., with invariance). The variance field, in + // contrast, is intended to be used to relate `T` to the type of + // ``. ty::Variance::Invariant, - box pat_ascription_ty.user_ty(), + user_ty, ), }, ); @@ -327,6 +406,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { self.schedule_drop_for_binding(var, irrefutable_pat.span, OutsideGuard); block.unit() } + _ => { let place = unpack!(block = self.as_place(block, initializer)); self.place_into_pattern(block, irrefutable_pat, &place, true) @@ -347,11 +427,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { match_pairs: vec![MatchPair::new(initializer.clone(), &irrefutable_pat)], bindings: vec![], ascriptions: vec![], - guard: None, - // since we don't call `match_candidates`, next fields is unused - arm_index: 0, - pat_index: 0, + // since we don't call `match_candidates`, next fields are unused + otherwise_block: None, pre_binding_block: block, next_candidate_pre_binding_block: block, }; @@ -361,11 +439,17 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { self.simplify_candidate(&mut candidate); if !candidate.match_pairs.is_empty() { - span_bug!( + // ICE if no other errors have been emitted. This used to be a hard error that wouldn't + // be reached because `hair::pattern::check_match::check_match` wouldn't have let the + // compiler continue. In our tests this is only ever hit by + // `ui/consts/const-match-check.rs` with `--cfg eval1`, and that file already generates + // a different error before hand. + self.hir.tcx().sess.delay_span_bug( candidate.match_pairs[0].pattern.span, - "match pairs {:?} remaining after simplifying \ - irrefutable pattern", - candidate.match_pairs + &format!( + "match pairs {:?} remaining after simplifying irrefutable pattern", + candidate.match_pairs, + ), ); } @@ -406,7 +490,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { mut visibility_scope: Option, scope_span: Span, lint_level: LintLevel, - patterns: &[Pattern<'tcx>], + pattern: &Pattern<'tcx>, has_guard: ArmHasGuard, opt_match_place: Option<(Option<&Place<'tcx>>, Span)>, ) -> Option { @@ -415,10 +499,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { "can't have both a visibility and a lint scope at the same time" ); let mut scope = self.source_scope; - let num_patterns = patterns.len(); + debug!("declare_bindings: pattern={:?}", pattern); self.visit_bindings( - &patterns[0], - &PatternTypeProjections::none(), + &pattern, + UserTypeProjections::none(), &mut |this, mutability, name, mode, var, span, ty, user_ty| { if visibility_scope.is_none() { visibility_scope = @@ -438,13 +522,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { mutability, name, mode, - num_patterns, var, ty, user_ty, has_guard, opt_match_place.map(|(x, y)| (x.cloned(), y)), - patterns[0].span, + pattern.span, ); }, ); @@ -454,7 +537,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn storage_live_binding( &mut self, block: BasicBlock, - var: NodeId, + var: HirId, span: Span, for_guard: ForGuard, ) -> Place<'tcx> { @@ -467,23 +550,21 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { kind: StatementKind::StorageLive(local_id), }, ); - let place = Place::Local(local_id); + let place = Place::Base(PlaceBase::Local(local_id)); let var_ty = self.local_decls[local_id].ty; - let hir_id = self.hir.tcx().hir().node_to_hir_id(var); - let region_scope = self.hir.region_scope_tree.var_scope(hir_id.local_id); + let region_scope = self.hir.region_scope_tree.var_scope(var.local_id); self.schedule_drop(span, region_scope, &place, var_ty, DropKind::Storage); place } - pub fn schedule_drop_for_binding(&mut self, var: NodeId, span: Span, for_guard: ForGuard) { + pub fn schedule_drop_for_binding(&mut self, var: HirId, span: Span, for_guard: ForGuard) { let local_id = self.var_local_id(var, for_guard); let var_ty = self.local_decls[local_id].ty; - let hir_id = self.hir.tcx().hir().node_to_hir_id(var); - let region_scope = self.hir.region_scope_tree.var_scope(hir_id.local_id); + let region_scope = self.hir.region_scope_tree.var_scope(var.local_id); self.schedule_drop( span, region_scope, - &Place::Local(local_id), + &Place::Base(PlaceBase::Local(local_id)), var_ty, DropKind::Value { cached_block: CachedBlock::default(), @@ -494,18 +575,19 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub(super) fn visit_bindings( &mut self, pattern: &Pattern<'tcx>, - pattern_user_ty: &PatternTypeProjections<'tcx>, + pattern_user_ty: UserTypeProjections, f: &mut impl FnMut( &mut Self, Mutability, Name, BindingMode, - NodeId, + HirId, Span, Ty<'tcx>, - &PatternTypeProjections<'tcx>, + UserTypeProjections, ), ) { + debug!("visit_bindings: pattern={:?} pattern_user_ty={:?}", pattern, pattern_user_ty); match *pattern.kind { PatternKind::Binding { mutability, @@ -516,23 +598,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ref subpattern, .. } => { - let pattern_ref_binding; // sidestep temp lifetime limitations. - let binding_user_ty = match mode { - BindingMode::ByValue => { pattern_user_ty } - BindingMode::ByRef(..) => { - // If this is a `ref` binding (e.g., `let ref - // x: T = ..`), then the type of `x` is not - // `T` but rather `&T`. - pattern_ref_binding = pattern_user_ty.ref_binding(); - &pattern_ref_binding - } - }; - - f(self, mutability, name, mode, var, pattern.span, ty, binding_user_ty); + f(self, mutability, name, mode, var, pattern.span, ty, pattern_user_ty.clone()); if let Some(subpattern) = subpattern.as_ref() { self.visit_bindings(subpattern, pattern_user_ty, f); } } + PatternKind::Array { ref prefix, ref slice, @@ -546,54 +617,71 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let from = u32::try_from(prefix.len()).unwrap(); let to = u32::try_from(suffix.len()).unwrap(); for subpattern in prefix { - self.visit_bindings(subpattern, &pattern_user_ty.index(), f); + self.visit_bindings(subpattern, pattern_user_ty.clone().index(), f); } for subpattern in slice { - self.visit_bindings(subpattern, &pattern_user_ty.subslice(from, to), f); + self.visit_bindings(subpattern, pattern_user_ty.clone().subslice(from, to), f); } for subpattern in suffix { - self.visit_bindings(subpattern, &pattern_user_ty.index(), f); + self.visit_bindings(subpattern, pattern_user_ty.clone().index(), f); } } + PatternKind::Constant { .. } | PatternKind::Range { .. } | PatternKind::Wild => {} + PatternKind::Deref { ref subpattern } => { - self.visit_bindings(subpattern, &pattern_user_ty.deref(), f); + self.visit_bindings(subpattern, pattern_user_ty.deref(), f); } - PatternKind::AscribeUserType { ref subpattern, ref user_ty, user_ty_span } => { + + PatternKind::AscribeUserType { + ref subpattern, + ascription: hair::pattern::Ascription { + ref user_ty, + user_ty_span, + variance: _, + }, + } => { // This corresponds to something like // // ``` // let A::<'a>(_): A<'static> = ...; // ``` - let subpattern_user_ty = pattern_user_ty.add_user_type(user_ty, user_ty_span); - self.visit_bindings(subpattern, &subpattern_user_ty, f) + // + // Note that the variance doesn't apply here, as we are tracking the effect + // of `user_ty` on any bindings contained with subpattern. + let annotation = CanonicalUserTypeAnnotation { + span: user_ty_span, + user_ty: user_ty.user_ty, + inferred_ty: subpattern.ty, + }; + let projection = UserTypeProjection { + base: self.canonical_user_type_annotations.push(annotation), + projs: Vec::new(), + }; + let subpattern_user_ty = pattern_user_ty.push_projection(&projection, user_ty_span); + self.visit_bindings(subpattern, subpattern_user_ty, f) } PatternKind::Leaf { ref subpatterns } => { for subpattern in subpatterns { - let subpattern_user_ty = pattern_user_ty.leaf(subpattern.field); - self.visit_bindings(&subpattern.pattern, &subpattern_user_ty, f); + let subpattern_user_ty = pattern_user_ty.clone().leaf(subpattern.field); + debug!("visit_bindings: subpattern_user_ty={:?}", subpattern_user_ty); + self.visit_bindings(&subpattern.pattern, subpattern_user_ty, f); } } PatternKind::Variant { adt_def, substs: _, variant_index, ref subpatterns } => { for subpattern in subpatterns { - let subpattern_user_ty = pattern_user_ty.variant( + let subpattern_user_ty = pattern_user_ty.clone().variant( adt_def, variant_index, subpattern.field); - self.visit_bindings(&subpattern.pattern, &subpattern_user_ty, f); + self.visit_bindings(&subpattern.pattern, subpattern_user_ty, f); } } } } } -/// List of blocks for each arm (and potentially other metadata in the -/// future). -struct ArmBlocks { - blocks: Vec, -} - -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct Candidate<'pat, 'tcx: 'pat> { // span of the original pattern that gave rise to this candidate span: Span, @@ -604,21 +692,15 @@ pub struct Candidate<'pat, 'tcx: 'pat> { // ...these bindings established... bindings: Vec>, - // ...these types asserted... + // ...and these types asserted... ascriptions: Vec>, - // ...and the guard must be evaluated... - guard: Option>, - - // ...and then we branch to arm with this index. - arm_index: usize, + // ...and the guard must be evaluated, if false branch to Block... + otherwise_block: Option, // ...and the blocks for add false edges between candidates pre_binding_block: BasicBlock, next_candidate_pre_binding_block: BasicBlock, - - // This uniquely identifies this candidate *within* the arm. - pat_index: usize, } #[derive(Clone, Debug)] @@ -626,10 +708,10 @@ struct Binding<'tcx> { span: Span, source: Place<'tcx>, name: Name, - var_id: NodeId, + var_id: HirId, var_ty: Ty<'tcx>, mutability: Mutability, - binding_mode: BindingMode<'tcx>, + binding_mode: BindingMode, } /// Indicates that the type of `source` must be a subtype of the @@ -640,6 +722,7 @@ struct Ascription<'tcx> { span: Span, source: Place<'tcx>, user_ty: PatternTypeProjection<'tcx>, + variance: ty::Variance, } #[derive(Clone, Debug)] @@ -649,13 +732,6 @@ pub struct MatchPair<'pat, 'tcx: 'pat> { // ... must match this pattern. pattern: &'pat Pattern<'tcx>, - - // HACK(eddyb) This is used to toggle whether a Slice pattern - // has had its length checked. This is only necessary because - // the "rest" part of the pattern right now has type &[T] and - // as such, it requires an Rvalue::Slice to be generated. - // See RFC 495 / issue #23121 for the eventual (proper) solution. - slice_len_checked: bool, } #[derive(Clone, Debug, PartialEq)] @@ -670,12 +746,12 @@ enum TestKind<'tcx> { SwitchInt { switch_ty: Ty<'tcx>, options: Vec, - indices: FxHashMap<&'tcx ty::Const<'tcx>, usize>, + indices: FxHashMap, usize>, }, // test for equality Eq { - value: &'tcx ty::Const<'tcx>, + value: ty::Const<'tcx>, ty: Ty<'tcx>, }, @@ -695,6 +771,11 @@ pub struct Test<'tcx> { kind: TestKind<'tcx>, } +/// ArmHasGuard is isomorphic to a boolean flag. It indicates whether +/// a match arm has a guard expression attached to it. +#[derive(Copy, Clone, Debug)] +pub(crate) struct ArmHasGuard(pub bool); + /////////////////////////////////////////////////////////////////////////// // Main matching algorithm @@ -705,11 +786,11 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// candidates are sorted such that the first item in the list /// has the highest priority. When a candidate is found to match /// the value, we will generate a branch to the appropriate - /// block found in `arm_blocks`. + /// prebinding block. /// /// The return value is a list of "otherwise" blocks. These are /// points in execution where we found that *NONE* of the - /// candidates apply. In principle, this means that the input + /// candidates apply. In principle, this means that the input /// list was not exhaustive, though at present we sometimes are /// not smart enough to recognize all exhaustive inputs. /// @@ -720,32 +801,30 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// list. This is important to keep the size of the generated code /// under control. See `test_candidates` for more details. /// - /// If `add_fake_borrows` is true, then places which need fake borrows + /// If `fake_borrows` is Some, then places which need fake borrows /// will be added to it. fn match_candidates<'pat>( &mut self, span: Span, - arm_blocks: &mut ArmBlocks, - mut candidates: Vec>, + candidates: &mut [&mut Candidate<'pat, 'tcx>], mut block: BasicBlock, - fake_borrows: &mut Option, BorrowKind>>, + fake_borrows: &mut Option>>, ) -> Vec { debug!( "matched_candidate(span={:?}, block={:?}, candidates={:?})", span, block, candidates ); - // Start by simplifying candidates. Once this process is - // complete, all the match pairs which remain require some - // form of test, whether it be a switch or pattern comparison. - for candidate in &mut candidates { + // Start by simplifying candidates. Once this process is complete, all + // the match pairs which remain require some form of test, whether it + // be a switch or pattern comparison. + for candidate in &mut *candidates { self.simplify_candidate(candidate); } - // The candidates are sorted by priority. Check to see - // whether the higher priority candidates (and hence at - // the front of the vec) have satisfied all their match - // pairs. + // The candidates are sorted by priority. Check to see whether the + // higher priority candidates (and hence at the front of the slice) + // have satisfied all their match pairs. let fully_matched = candidates .iter() .take_while(|c| c.match_pairs.is_empty()) @@ -754,87 +833,192 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { "match_candidates: {:?} candidates fully matched", fully_matched ); - let mut unmatched_candidates = candidates.split_off(fully_matched); - - // Insert a *Shared* borrow of any places that are bound. - if let Some(fake_borrows) = fake_borrows { - for Binding { source, .. } - in candidates.iter().flat_map(|candidate| &candidate.bindings) - { - fake_borrows.insert(source.clone(), BorrowKind::Shared); - } - } + let (matched_candidates, unmatched_candidates) = candidates.split_at_mut(fully_matched); - let fully_matched_with_guard = candidates.iter().take_while(|c| c.guard.is_some()).count(); - - let unreachable_candidates = if fully_matched_with_guard + 1 < candidates.len() { - candidates.split_off(fully_matched_with_guard + 1) - } else { - vec![] - }; - - for candidate in candidates { - // If so, apply any bindings, test the guard (if any), and - // branch to the arm. - if let Some(b) = self.bind_and_guard_matched_candidate(block, arm_blocks, candidate) { - block = b; + if !matched_candidates.is_empty() { + block = if let Some(last_otherwise_block) = self.select_matched_candidates( + matched_candidates, + block, + fake_borrows, + ) { + last_otherwise_block } else { - // if None is returned, then any remaining candidates - // are unreachable (at least not through this path). - // Link them with false edges. - debug!( - "match_candidates: add false edges for unreachable {:?} and unmatched {:?}", - unreachable_candidates, unmatched_candidates - ); - for candidate in unreachable_candidates { - let source_info = self.source_info(candidate.span); - let target = self.cfg.start_new_block(); - if let Some(otherwise) = - self.bind_and_guard_matched_candidate(target, arm_blocks, candidate) - { - self.cfg - .terminate(otherwise, source_info, TerminatorKind::Unreachable); - } - } - + // Any remaining candidates are unreachable. if unmatched_candidates.is_empty() { - return vec![]; + return Vec::new(); } else { - let target = self.cfg.start_new_block(); - return self.match_candidates( - span, - arm_blocks, - unmatched_candidates, - target, - &mut None, - ); + self.cfg.start_new_block() } - } + }; } - // If there are no candidates that still need testing, we're done. - // Since all matches are exhaustive, execution should never reach this point. + // If there are no candidates that still need testing, we're + // done. Since all matches are exhaustive, execution should + // never reach this point. if unmatched_candidates.is_empty() { return vec![block]; } // Test candidates where possible. - let (otherwise, tested_candidates) = - self.test_candidates(span, arm_blocks, &unmatched_candidates, block, fake_borrows); + let (otherwise, untested_candidates) = self.test_candidates( + span, + unmatched_candidates, + block, + fake_borrows, + ); // If the target candidates were exhaustive, then we are done. // But for borrowck continue build decision tree. - - // If all candidates were sorted into `target_candidates` somewhere, then - // the initial set was inexhaustive. - let untested_candidates = unmatched_candidates.split_off(tested_candidates); - if untested_candidates.len() == 0 { + if untested_candidates.is_empty() { return otherwise; } // Otherwise, let's process those remaining candidates. let join_block = self.join_otherwise_blocks(span, otherwise); - self.match_candidates(span, arm_blocks, untested_candidates, join_block, &mut None) + self.match_candidates( + span, + untested_candidates, + join_block, + &mut None, + ) + } + + /// Link up matched candidates. For example, if we have something like + /// this: + /// + /// ... + /// Some(x) if cond => ... + /// Some(x) => ... + /// Some(x) if cond => ... + /// ... + /// + /// We generate real edges from: + /// * `block` to the prebinding_block of the first pattern, + /// * the otherwise block of the first pattern to the second pattern, + /// * the otherwise block of the third pattern to the a block with an + /// Unreachable terminator. + /// + /// As well as that we add fake edges from the otherwise blocks to the + /// prebinding block of the next candidate in the original set of + /// candidates. + fn select_matched_candidates( + &mut self, + matched_candidates: &mut [&mut Candidate<'_, 'tcx>], + block: BasicBlock, + fake_borrows: &mut Option>>, + ) -> Option { + debug_assert!( + !matched_candidates.is_empty(), + "select_matched_candidates called with no candidates", + ); + + // Insert a borrows of prefixes of places that are bound and are + // behind a dereference projection. + // + // These borrows are taken to avoid situations like the following: + // + // match x[10] { + // _ if { x = &[0]; false } => (), + // y => (), // Out of bounds array access! + // } + // + // match *x { + // // y is bound by reference in the guard and then by copy in the + // // arm, so y is 2 in the arm! + // y if { y == 1 && (x = &2) == () } => y, + // _ => 3, + // } + if let Some(fake_borrows) = fake_borrows { + for Binding { source, .. } + in matched_candidates.iter().flat_map(|candidate| &candidate.bindings) + { + let mut cursor = source; + while let Place::Projection(box Projection { base, elem }) = cursor { + cursor = base; + if let ProjectionElem::Deref = elem { + fake_borrows.insert(cursor.clone()); + break; + } + } + } + } + + let fully_matched_with_guard = matched_candidates + .iter() + .position(|c| c.otherwise_block.is_none()) + .unwrap_or(matched_candidates.len() - 1); + + let (reachable_candidates, unreachable_candidates) + = matched_candidates.split_at_mut(fully_matched_with_guard + 1); + + let first_candidate = &reachable_candidates[0]; + + let candidate_source_info = self.source_info(first_candidate.span); + + self.cfg.terminate( + block, + candidate_source_info, + TerminatorKind::Goto { + target: first_candidate.pre_binding_block, + }, + ); + + for window in reachable_candidates.windows(2) { + if let [first_candidate, second_candidate] = window { + let source_info = self.source_info(first_candidate.span); + if let Some(otherwise_block) = first_candidate.otherwise_block { + self.cfg.terminate( + otherwise_block, + source_info, + TerminatorKind::FalseEdges { + real_target: second_candidate.pre_binding_block, + imaginary_targets: vec![ + first_candidate.next_candidate_pre_binding_block + ], + } + ) + } else { + bug!("candidate other than the last has no guard"); + } + } else { + bug!("<[_]>::windows returned incorrectly sized window"); + } + } + + debug!("match_candidates: add false edges for unreachable {:?}", unreachable_candidates); + for candidate in unreachable_candidates { + if let Some(otherwise) = candidate.otherwise_block { + let source_info = self.source_info(candidate.span); + let unreachable = self.cfg.start_new_block(); + self.cfg.terminate( + otherwise, + source_info, + TerminatorKind::FalseEdges { + real_target: unreachable, + imaginary_targets: vec![candidate.next_candidate_pre_binding_block], + } + ); + self.cfg.terminate(unreachable, source_info, TerminatorKind::Unreachable); + } + } + + let last_candidate = reachable_candidates.last().unwrap(); + + if let Some(otherwise) = last_candidate.otherwise_block { + let source_info = self.source_info(last_candidate.span); + let block = self.cfg.start_new_block(); + self.cfg.terminate( + otherwise, + source_info, + TerminatorKind::FalseEdges { + real_target: block, + imaginary_targets: vec![last_candidate.next_candidate_pre_binding_block] + } + ); + Some(block) + } else { + None + } } fn join_otherwise_blocks(&mut self, span: Span, mut otherwise: Vec) -> BasicBlock { @@ -856,7 +1040,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } } - /// This is the most subtle part of the matching algorithm. At + /// This is the most subtle part of the matching algorithm. At /// this point, the input candidates have been fully simplified, /// and so we know that all remaining match-pairs require some /// sort of test. To decide what test to do, we take the highest @@ -876,10 +1060,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// 4. etc. /// /// Once we know what sort of test we are going to perform, this - /// test may also help us with other candidates. So we walk over + /// Tests may also help us with other candidates. So we walk over /// the candidates (from high to low priority) and check. This /// gives us, for each outcome of the test, a transformed list of - /// candidates. For example, if we are testing the current + /// candidates. For example, if we are testing the current /// variant of `x.0`, and we have a candidate `{x.0 @ Some(v), x.1 /// @ 22}`, then we would have a resulting candidate of `{(x.0 as /// Some).0 @ v, x.1 @ 22}`. Note that the first match-pair is now @@ -968,17 +1152,17 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// In addition to avoiding exponential-time blowups, this algorithm /// also has nice property that each guard and arm is only generated /// once. - fn test_candidates<'pat>( + fn test_candidates<'pat, 'b, 'c>( &mut self, span: Span, - arm_blocks: &mut ArmBlocks, - candidates: &[Candidate<'pat, 'tcx>], + mut candidates: &'b mut [&'c mut Candidate<'pat, 'tcx>], block: BasicBlock, - fake_borrows: &mut Option, BorrowKind>>, - ) -> (Vec, usize) { + fake_borrows: &mut Option>>, + ) -> (Vec, &'b mut [&'c mut Candidate<'pat, 'tcx>]) { // extract the match-pair from the highest priority candidate let match_pair = &candidates.first().unwrap().match_pairs[0]; let mut test = self.test(match_pair); + let match_place = match_pair.place.clone(); // most of the time, the test to perform is simply a function // of the main candidate; but for a test like SwitchInt, we @@ -992,7 +1176,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } => { for candidate in candidates.iter() { if !self.add_cases_to_switch( - &match_pair.place, + &match_place, candidate, switch_ty, options, @@ -1007,7 +1191,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ref mut variants, } => { for candidate in candidates.iter() { - if !self.add_variants_to_switch(&match_pair.place, candidate, variants) { + if !self.add_variants_to_switch(&match_place, candidate, variants) { break; } } @@ -1017,7 +1201,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // Insert a Shallow borrow of any places that is switched on. fake_borrows.as_mut().map(|fb| { - fb.entry(match_pair.place.clone()).or_insert(BorrowKind::Shallow) + fb.insert(match_place.clone()) }); // perform the test, branching to one of N blocks. For each of @@ -1028,25 +1212,29 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { "match_candidates: test={:?} match_pair={:?}", test, match_pair ); - let target_blocks = self.perform_test(block, &match_pair.place, &test); - let mut target_candidates = vec![vec![]; target_blocks.len()]; + let target_blocks = self.perform_test(block, &match_place, &test); + let mut target_candidates: Vec>> = vec![]; + target_candidates.resize_with(target_blocks.len(), Default::default); + + let total_candidate_count = candidates.len(); // Sort the candidates into the appropriate vector in // `target_candidates`. Note that at some point we may // encounter a candidate where the test is not relevant; at // that point, we stop sorting. - let tested_candidates = candidates - .iter() - .take_while(|c| { - self.sort_candidate(&match_pair.place, &test, c, &mut target_candidates) - }) - .count(); - assert!(tested_candidates > 0); // at least the last candidate ought to be tested - debug!("tested_candidates: {}", tested_candidates); - debug!( - "untested_candidates: {}", - candidates.len() - tested_candidates - ); + while let Some(candidate) = candidates.first_mut() { + if let Some(idx) = self.sort_candidate(&match_place, &test, candidate) { + let (candidate, rest) = candidates.split_first_mut().unwrap(); + target_candidates[idx].push(candidate); + candidates = rest; + } else { + break; + } + } + // at least the first candidate ought to be tested + assert!(total_candidate_count > candidates.len()); + debug!("tested_candidates: {}", total_candidate_count - candidates.len()); + debug!("untested_candidates: {}", candidates.len()); // For each outcome of test, process the candidates that still // apply. Collect a list of blocks where control flow will @@ -1055,59 +1243,98 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let otherwise: Vec<_> = target_blocks .into_iter() .zip(target_candidates) - .flat_map(|(target_block, target_candidates)| { + .flat_map(|(target_block, mut target_candidates)| { self.match_candidates( span, - arm_blocks, - target_candidates, + &mut *target_candidates, target_block, fake_borrows, ) }) .collect(); - (otherwise, tested_candidates) + (otherwise, candidates) + } + + // Determine the fake borrows that are needed to ensure that the place + // will evaluate to the same thing until an arm has been chosen. + fn calculate_fake_borrows<'b>( + &mut self, + fake_borrows: &'b FxHashSet>, + temp_span: Span, + ) -> Vec<(&'b Place<'tcx>, Local)> { + let tcx = self.hir.tcx(); + + debug!("add_fake_borrows fake_borrows = {:?}", fake_borrows); + + let mut all_fake_borrows = Vec::with_capacity(fake_borrows.len()); + + // Insert a Shallow borrow of the prefixes of any fake borrows. + for place in fake_borrows + { + let mut prefix_cursor = place; + while let Place::Projection(box Projection { base, elem }) = prefix_cursor { + if let ProjectionElem::Deref = elem { + // Insert a shallow borrow after a deref. For other + // projections the borrow of prefix_cursor will + // conflict with any mutation of base. + all_fake_borrows.push(base); + } + prefix_cursor = base; + } + + all_fake_borrows.push(place); + } + + // Deduplicate and ensure a deterministic order. + all_fake_borrows.sort(); + all_fake_borrows.dedup(); + + debug!("add_fake_borrows all_fake_borrows = {:?}", all_fake_borrows); + + all_fake_borrows.into_iter().map(|matched_place| { + let fake_borrow_deref_ty = matched_place.ty(&self.local_decls, tcx).ty; + let fake_borrow_ty = tcx.mk_imm_ref(tcx.types.re_erased, fake_borrow_deref_ty); + let fake_borrow_temp = self.local_decls.push( + LocalDecl::new_temp(fake_borrow_ty, temp_span) + ); + + (matched_place, fake_borrow_temp) + }).collect() } +} +/////////////////////////////////////////////////////////////////////////// +// Pattern binding - used for `let` and function parameters as well. + +impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Initializes each of the bindings from the candidate by - /// moving/copying/ref'ing the source as appropriate. Tests the - /// guard, if any, and then branches to the arm. Returns the block - /// for the case where the guard fails. + /// moving/copying/ref'ing the source as appropriate. Tests the guard, if + /// any, and then branches to the arm. Returns the block for the case where + /// the guard fails. /// - /// Note: we check earlier that if there is a guard, there cannot - /// be move bindings. This isn't really important for the - /// self-consistency of this fn, but the reason for it should be - /// clear: after we've done the assignments, if there were move - /// bindings, further tests would be a use-after-move (which would - /// in turn be detected by the borrowck code that runs on the - /// MIR). + /// Note: we check earlier that if there is a guard, there cannot be move + /// bindings (unless feature(bind_by_move_pattern_guards) is used). This + /// isn't really important for the self-consistency of this fn, but the + /// reason for it should be clear: after we've done the assignments, if + /// there were move bindings, further tests would be a use-after-move. + /// bind_by_move_pattern_guards avoids this by only moving the binding once + /// the guard has evaluated to true (see below). fn bind_and_guard_matched_candidate<'pat>( &mut self, - mut block: BasicBlock, - arm_blocks: &mut ArmBlocks, candidate: Candidate<'pat, 'tcx>, - ) -> Option { - debug!( - "bind_and_guard_matched_candidate(block={:?}, candidate={:?})", - block, candidate - ); + guard: Option>, + arm_block: BasicBlock, + fake_borrows: &Vec<(&Place<'tcx>, Local)>, + scrutinee_span: Span, + ) { + debug!("bind_and_guard_matched_candidate(candidate={:?})", candidate); debug_assert!(candidate.match_pairs.is_empty()); - self.ascribe_types(block, &candidate.ascriptions); - - let arm_block = arm_blocks.blocks[candidate.arm_index]; let candidate_source_info = self.source_info(candidate.span); - self.cfg.terminate( - block, - candidate_source_info, - TerminatorKind::Goto { - target: candidate.pre_binding_block, - }, - ); - - block = self.cfg.start_new_block(); + let mut block = self.cfg.start_new_block(); self.cfg.terminate( candidate.pre_binding_block, candidate_source_info, @@ -1116,6 +1343,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { imaginary_targets: vec![candidate.next_candidate_pre_binding_block], }, ); + self.ascribe_types(block, &candidate.ascriptions); // rust-lang/rust#27282: The `autoref` business deserves some // explanation here. @@ -1194,19 +1422,15 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // // * Here, the guard expression wants a `&&` or `&&mut` // into the original input. This means we need to borrow - // a reference that we do not immediately have at hand - // (because all we have is the places associated with the - // match input itself; it is up to us to create a place - // holding a `&` or `&mut` that we can then borrow). - - let autoref = self.hir - .tcx() - .all_pat_vars_are_implicit_refs_within_guards(); - if let Some(guard) = candidate.guard { + // the reference that we create for the arm. + // * So we eagerly create the reference for the arm and then take a + // reference to that. + let tcx = self.hir.tcx(); + let autoref = tcx.all_pat_vars_are_implicit_refs_within_guards(); + if let Some(guard) = guard { if autoref { self.bind_matched_candidate_for_guard( block, - candidate.pat_index, &candidate.bindings, ); let guard_frame = GuardFrame { @@ -1222,12 +1446,29 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { self.bind_matched_candidate_for_arm_body(block, &candidate.bindings); } + let re_erased = tcx.types.re_erased; + let scrutinee_source_info = self.source_info(scrutinee_span); + for &(place, temp) in fake_borrows { + let borrow = Rvalue::Ref( + re_erased, + BorrowKind::Shallow, + place.clone(), + ); + self.cfg.push_assign( + block, + scrutinee_source_info, + &Place::Base(PlaceBase::Local(temp)), + borrow, + ); + } + // the block to branch to if the guard fails; if there is no // guard, this block is simply unreachable let guard = match guard { Guard::If(e) => self.hir.mirror(e), }; let source_info = self.source_info(guard.span); + let guard_end = self.source_info(tcx.sess.source_map().end_point(guard.span)); let cond = unpack!(block = self.as_local_operand(block, guard)); if autoref { let guard_frame = self.guard_context.pop().unwrap(); @@ -1237,7 +1478,15 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ); } - let false_edge_block = self.cfg.start_new_block(); + for &(_, temp) in fake_borrows { + self.cfg.push(block, Statement { + source_info: guard_end, + kind: StatementKind::FakeRead( + FakeReadCause::ForMatchGuard, + Place::Base(PlaceBase::Local(temp)), + ), + }); + } // We want to ensure that the matched candidates are bound // after we have confirmed this candidate *and* any @@ -1269,11 +1518,35 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { self.cfg.terminate( block, source_info, - TerminatorKind::if_(self.hir.tcx(), cond, post_guard_block, false_edge_block), + TerminatorKind::if_( + self.hir.tcx(), + cond, + post_guard_block, + candidate.otherwise_block.unwrap() + ), ); if autoref { - self.bind_matched_candidate_for_arm_body(post_guard_block, &candidate.bindings); + let by_value_bindings = candidate.bindings.iter().filter(|binding| { + if let BindingMode::ByValue = binding.binding_mode { true } else { false } + }); + // Read all of the by reference bindings to ensure that the + // place they refer to can't be modified by the guard. + for binding in by_value_bindings.clone() { + let local_id = self.var_local_id(binding.var_id, RefWithinGuard); + let place = Place::Base(PlaceBase::Local(local_id)); + self.cfg.push( + block, + Statement { + source_info: guard_end, + kind: StatementKind::FakeRead(FakeReadCause::ForGuardBinding, place), + }, + ); + } + self.bind_matched_candidate_for_arm_body( + post_guard_block, + by_value_bindings, + ); } self.cfg.terminate( @@ -1281,19 +1554,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { source_info, TerminatorKind::Goto { target: arm_block }, ); - - let otherwise = self.cfg.start_new_block(); - - self.cfg.terminate( - false_edge_block, - source_info, - TerminatorKind::FalseEdges { - real_target: otherwise, - imaginary_targets: vec![candidate.next_candidate_pre_binding_block], - }, - ); - Some(otherwise) } else { + assert!(candidate.otherwise_block.is_none()); // (Here, it is not too early to bind the matched // candidate on `block`, because there is no guard result // that we have to inspect before we bind them.) @@ -1303,7 +1565,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { candidate_source_info, TerminatorKind::Goto { target: arm_block }, ); - None } } @@ -1324,14 +1585,19 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { ascription.user_ty, ); + let user_ty = box ascription.user_ty.clone().user_ty( + &mut self.canonical_user_type_annotations, + ascription.source.ty(&self.local_decls, self.hir.tcx()).ty, + source_info.span + ); self.cfg.push( block, Statement { source_info, kind: StatementKind::AscribeUserType( ascription.source.clone(), - ty::Variance::Covariant, - box ascription.user_ty.clone().user_ty(), + ascription.variance, + user_ty, ), }, ); @@ -1343,18 +1609,14 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { fn bind_matched_candidate_for_guard( &mut self, block: BasicBlock, - pat_index: usize, bindings: &[Binding<'tcx>], ) { - debug!( - "bind_matched_candidate_for_guard(block={:?}, pat_index={:?}, bindings={:?})", - block, pat_index, bindings - ); + debug!("bind_matched_candidate_for_guard(block={:?}, bindings={:?})", block, bindings); // Assign each of the bindings. Since we are binding for a // guard expression, this will never trigger moves out of the // candidate. - let re_empty = self.hir.tcx().types.re_empty; + let re_erased = self.hir.tcx().types.re_erased; for binding in bindings { let source_info = self.source_info(binding.span); @@ -1365,59 +1627,33 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let ref_for_guard = self.storage_live_binding(block, binding.var_id, binding.span, RefWithinGuard); // Question: Why schedule drops if bindings are all - // shared-&'s? Answer: Because schedule_drop_for_binding - // also emits StorageDead's for those locals. + // shared-&'s? + // Answer: Because schedule_drop_for_binding also emits + // StorageDead's for those locals. self.schedule_drop_for_binding(binding.var_id, binding.span, RefWithinGuard); match binding.binding_mode { BindingMode::ByValue => { - let rvalue = Rvalue::Ref(re_empty, BorrowKind::Shared, binding.source.clone()); + let rvalue = Rvalue::Ref(re_erased, BorrowKind::Shared, binding.source.clone()); self.cfg .push_assign(block, source_info, &ref_for_guard, rvalue); } - BindingMode::ByRef(region, borrow_kind) => { - // Tricky business: For `ref id` and `ref mut id` - // patterns, we want `id` within the guard to - // correspond to a temp of type `& &T` or `& &mut - // T` (i.e., a "borrow of a borrow") that is - // implicitly dereferenced. - // - // To borrow a borrow, we need that inner borrow - // to point to. So, create a temp for the inner - // borrow, and then take a reference to it. - // - // Note: the temp created here is *not* the one - // used by the arm body itself. This eases - // observing two-phase borrow restrictions. - let val_for_guard = self.storage_live_binding( + BindingMode::ByRef(borrow_kind) => { + let value_for_arm = self.storage_live_binding( block, binding.var_id, binding.span, - ValWithinGuard(pat_index), + OutsideGuard, ); self.schedule_drop_for_binding( binding.var_id, binding.span, - ValWithinGuard(pat_index), + OutsideGuard, ); - // rust-lang/rust#27282: We reuse the two-phase - // borrow infrastructure so that the mutable - // borrow (whose mutabilty is *unusable* within - // the guard) does not conflict with the implicit - // borrow of the whole match input. See additional - // discussion on rust-lang/rust#49870. - let borrow_kind = match borrow_kind { - BorrowKind::Shared - | BorrowKind::Shallow - | BorrowKind::Unique => borrow_kind, - BorrowKind::Mut { .. } => BorrowKind::Mut { - allow_two_phase_borrow: true, - }, - }; - let rvalue = Rvalue::Ref(region, borrow_kind, binding.source.clone()); + let rvalue = Rvalue::Ref(re_erased, borrow_kind, binding.source.clone()); self.cfg - .push_assign(block, source_info, &val_for_guard, rvalue); - let rvalue = Rvalue::Ref(region, BorrowKind::Shared, val_for_guard); + .push_assign(block, source_info, &value_for_arm, rvalue); + let rvalue = Rvalue::Ref(re_erased, BorrowKind::Shared, value_for_arm); self.cfg .push_assign(block, source_info, &ref_for_guard, rvalue); } @@ -1425,16 +1661,14 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } } - fn bind_matched_candidate_for_arm_body( + fn bind_matched_candidate_for_arm_body<'b>( &mut self, block: BasicBlock, - bindings: &[Binding<'tcx>], - ) { - debug!( - "bind_matched_candidate_for_arm_body(block={:?}, bindings={:?}", - block, bindings - ); + bindings: impl IntoIterator>, + ) where 'tcx: 'b { + debug!("bind_matched_candidate_for_arm_body(block={:?})", block); + let re_erased = self.hir.tcx().types.re_erased; // Assign each of the bindings. This may trigger moves out of the candidate. for binding in bindings { let source_info = self.source_info(binding.span); @@ -1445,29 +1679,19 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { BindingMode::ByValue => { Rvalue::Use(self.consume_by_copy_or_move(binding.source.clone())) } - BindingMode::ByRef(region, borrow_kind) => { - Rvalue::Ref(region, borrow_kind, binding.source.clone()) + BindingMode::ByRef(borrow_kind) => { + Rvalue::Ref(re_erased, borrow_kind, binding.source.clone()) } }; self.cfg.push_assign(block, source_info, &local, rvalue); } } - /// Each binding (`ref mut var`/`ref var`/`mut var`/`var`, where - /// the bound `var` has type `T` in the arm body) in a pattern - /// maps to `2+N` locals. The first local is a binding for - /// occurrences of `var` in the guard, which will all have type - /// `&T`. The N locals are bindings for the `T` that is referenced - /// by the first local; they are not used outside of the - /// guard. The last local is a binding for occurrences of `var` in - /// the arm body, which will have type `T`. - /// - /// The reason we have N locals rather than just 1 is to - /// accommodate rust-lang/rust#51348: If the arm has N candidate - /// patterns, then in general they can correspond to distinct - /// parts of the matched data, and we want them to be distinct - /// temps in order to simplify checks performed by our internal - /// leveraging of two-phase borrows). + /// Each binding (`ref mut var`/`ref var`/`mut var`/`var`, where the bound + /// `var` has type `T` in the arm body) in a pattern maps to 2 locals. The + /// first local is a binding for occurrences of `var` in the guard, which + /// will have type `&T`. The second local is a binding for occurrences of + /// `var` in the arm body, which will have type `T`. fn declare_binding( &mut self, source_info: SourceInfo, @@ -1475,10 +1699,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { mutability: Mutability, name: Name, mode: BindingMode, - num_patterns: usize, - var_id: NodeId, + var_id: HirId, var_ty: Ty<'tcx>, - user_var_ty: &PatternTypeProjections<'tcx>, + user_ty: UserTypeProjections, has_guard: ArmHasGuard, opt_match_place: Option<(Option>, Span)>, pat_span: Span, @@ -1492,12 +1715,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { let tcx = self.hir.tcx(); let binding_mode = match mode { BindingMode::ByValue => ty::BindingMode::BindByValue(mutability.into()), - BindingMode::ByRef { .. } => ty::BindingMode::BindByReference(mutability.into()), + BindingMode::ByRef(_) => ty::BindingMode::BindByReference(mutability.into()), }; + debug!("declare_binding: user_ty={:?}", user_ty); let local = LocalDecl::<'tcx> { mutability, ty: var_ty, - user_ty: user_var_ty.clone().user_ty(), + user_ty, name: Some(name), source_info, visibility_scope, @@ -1516,31 +1740,20 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { }; let for_arm_body = self.local_decls.push(local.clone()); let locals = if has_guard.0 && tcx.all_pat_vars_are_implicit_refs_within_guards() { - let mut vals_for_guard = Vec::with_capacity(num_patterns); - for _ in 0..num_patterns { - let val_for_guard_idx = self.local_decls.push(LocalDecl { - // This variable isn't mutated but has a name, so has to be - // immutable to avoid the unused mut lint. - mutability: Mutability::Not, - ..local.clone() - }); - vals_for_guard.push(val_for_guard_idx); - } let ref_for_guard = self.local_decls.push(LocalDecl::<'tcx> { - // See previous comment. + // This variable isn't mutated but has a name, so has to be + // immutable to avoid the unused mut lint. mutability: Mutability::Not, - ty: tcx.mk_imm_ref(tcx.types.re_empty, var_ty), + ty: tcx.mk_imm_ref(tcx.types.re_erased, var_ty), user_ty: UserTypeProjections::none(), name: Some(name), source_info, visibility_scope, - // FIXME: should these secretly injected ref_for_guard's be marked as `internal`? internal: false, is_block_tail: None, is_user_variable: Some(ClearCrossCrate::Set(BindingForm::RefForGuard)), }); LocalsForNode::ForGuard { - vals_for_guard, ref_for_guard, for_arm_body, } @@ -1550,86 +1763,4 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { debug!("declare_binding: vars={:?}", locals); self.var_indices.insert(var_id, locals); } - - // Determine the fake borrows that are needed to ensure that the place - // will evaluate to the same thing until an arm has been chosen. - fn add_fake_borrows<'pat>( - &mut self, - pre_binding_blocks: &[(BasicBlock, Span)], - fake_borrows: FxHashMap, BorrowKind>, - source_info: SourceInfo, - start_block: BasicBlock, - ) { - let tcx = self.hir.tcx(); - - debug!("add_fake_borrows pre_binding_blocks = {:?}, fake_borrows = {:?}", - pre_binding_blocks, fake_borrows); - - let mut all_fake_borrows = Vec::with_capacity(fake_borrows.len()); - - // Insert a Shallow borrow of the prefixes of any fake borrows. - for (place, borrow_kind) in fake_borrows - { - { - let mut prefix_cursor = &place; - while let Place::Projection(box Projection { base, elem }) = prefix_cursor { - if let ProjectionElem::Deref = elem { - // Insert a shallow borrow after a deref. For other - // projections the borrow of prefix_cursor will - // conflict with any mutation of base. - all_fake_borrows.push((base.clone(), BorrowKind::Shallow)); - } - prefix_cursor = base; - } - } - - all_fake_borrows.push((place, borrow_kind)); - } - - // Deduplicate and ensure a deterministic order. - all_fake_borrows.sort(); - all_fake_borrows.dedup(); - - debug!("add_fake_borrows all_fake_borrows = {:?}", all_fake_borrows); - - // Add fake borrows to the start of the match and reads of them before - // the start of each arm. - let mut borrowed_input_temps = Vec::with_capacity(all_fake_borrows.len()); - - for (matched_place, borrow_kind) in all_fake_borrows { - let borrowed_input = - Rvalue::Ref(tcx.types.re_empty, borrow_kind, matched_place.clone()); - let borrowed_input_ty = borrowed_input.ty(&self.local_decls, tcx); - let borrowed_input_temp = self.temp(borrowed_input_ty, source_info.span); - self.cfg.push_assign( - start_block, - source_info, - &borrowed_input_temp, - borrowed_input - ); - borrowed_input_temps.push(borrowed_input_temp); - } - - // FIXME: This could be a lot of reads (#fake borrows * #patterns). - // The false edges that we currently generate would allow us to only do - // this on the last Candidate, but it's possible that there might not be - // so many false edges in the future, so we read for all Candidates for - // now. - // Another option would be to make our own block and add our own false - // edges to it. - if tcx.emit_read_for_match() { - for &(pre_binding_block, span) in pre_binding_blocks { - let pattern_source_info = self.source_info(span); - for temp in &borrowed_input_temps { - self.cfg.push(pre_binding_block, Statement { - source_info: pattern_source_info, - kind: StatementKind::FakeRead( - FakeReadCause::ForMatchGuard, - temp.clone(), - ), - }); - } - } - } - } } diff --git a/src/librustc_mir/build/matches/simplify.rs b/src/librustc_mir/build/matches/simplify.rs index 0ce642838707e..2e4f53023f409 100644 --- a/src/librustc_mir/build/matches/simplify.rs +++ b/src/librustc_mir/build/matches/simplify.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Simplifying Candidates //! //! *Simplifying* a match pair `place @ pattern` means breaking it down @@ -22,13 +12,14 @@ //! sort of test: for example, testing which variant an enum is, or //! testing a value against a constant. -use build::Builder; -use build::matches::{Ascription, Binding, MatchPair, Candidate}; -use hair::*; +use crate::build::Builder; +use crate::build::matches::{Ascription, Binding, MatchPair, Candidate}; +use crate::hair::{self, *}; use rustc::ty; use rustc::ty::layout::{Integer, IntegerExt, Size}; use syntax::attr::{SignedInt, UnsignedInt}; use rustc::hir::RangeEnd; +use rustc::mir::interpret::truncate; use std::mem; @@ -55,10 +46,10 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } } - /// Tries to simplify `match_pair`, returning true if + /// Tries to simplify `match_pair`, returning `Ok(())` if /// successful. If successful, new match pairs and bindings will /// have been pushed into the candidate. If no simplification is - /// possible, Err is returned and no changes are made to + /// possible, `Err` is returned and no changes are made to /// candidate. fn simplify_match_pair<'pat>(&mut self, match_pair: MatchPair<'pat, 'tcx>, @@ -66,11 +57,21 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { -> Result<(), MatchPair<'pat, 'tcx>> { let tcx = self.hir.tcx(); match *match_pair.pattern.kind { - PatternKind::AscribeUserType { ref subpattern, ref user_ty, user_ty_span } => { + PatternKind::AscribeUserType { + ref subpattern, + ascription: hair::pattern::Ascription { + variance, + ref user_ty, + user_ty_span, + }, + } => { + // Apply the type ascription to the value at `match_pair.place`, which is the + // value being matched, taking the variance field into account. candidate.ascriptions.push(Ascription { span: user_ty_span, user_ty: user_ty.clone(), source: match_pair.place.clone(), + variance, }); candidate.match_pairs.push(MatchPair::new(match_pair.place, subpattern)); @@ -108,27 +109,32 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } PatternKind::Range(PatternRange { lo, hi, ty, end }) => { - let range = match ty.sty { + let (range, bias) = match ty.sty { ty::Char => { - Some(('\u{0000}' as u128, '\u{10FFFF}' as u128, Size::from_bits(32))) + (Some(('\u{0000}' as u128, '\u{10FFFF}' as u128, Size::from_bits(32))), 0) } ty::Int(ity) => { - // FIXME(49937): refactor these bit manipulations into interpret. let size = Integer::from_attr(&tcx, SignedInt(ity)).size(); - let min = 1u128 << (size.bits() - 1); - let max = (1u128 << (size.bits() - 1)) - 1; - Some((min, max, size)) + let max = truncate(u128::max_value(), size); + let bias = 1u128 << (size.bits() - 1); + (Some((0, max, size)), bias) } ty::Uint(uty) => { - // FIXME(49937): refactor these bit manipulations into interpret. let size = Integer::from_attr(&tcx, UnsignedInt(uty)).size(); - let max = !0u128 >> (128 - size.bits()); - Some((0, max, size)) + let max = truncate(u128::max_value(), size); + (Some((0, max, size)), 0) } - _ => None, + _ => (None, 0), }; if let Some((min, max, sz)) = range { if let (Some(lo), Some(hi)) = (lo.val.try_to_bits(sz), hi.val.try_to_bits(sz)) { + // We want to compare ranges numerically, but the order of the bitwise + // representation of signed integers does not match their numeric order. + // Thus, to correct the ordering, we need to shift the range of signed + // integers to correct the comparison. This is achieved by XORing with a + // bias (see pattern/_match.rs for another pertinent example of this + // pattern). + let (lo, hi) = (lo ^ bias, hi ^ bias); if lo <= min && (hi > max || hi == max && end == RangeEnd::Included) { // Irrefutable pattern match. return Ok(()); @@ -157,7 +163,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { i == variant_index || { self.hir.tcx().features().never_type && self.hir.tcx().features().exhaustive_patterns && - self.hir.tcx().is_variant_uninhabited_from_all_modules(v, substs) + !v.uninhabited_from(self.hir.tcx(), substs, adt_def.adt_kind()).is_empty() } }); if irrefutable { @@ -167,7 +173,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } else { Err(match_pair) } - }, + } PatternKind::Array { ref prefix, ref slice, ref suffix } => { self.prefix_slice_suffix(&mut candidate.match_pairs, diff --git a/src/librustc_mir/build/matches/test.rs b/src/librustc_mir/build/matches/test.rs index c8dec6d0b9764..b06022196106a 100644 --- a/src/librustc_mir/build/matches/test.rs +++ b/src/librustc_mir/build/matches/test.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // Testing candidates // // After candidates have been simplified, the only match pairs that @@ -15,10 +5,10 @@ // identify what tests are needed, perform the tests, and then filter // the candidates based on the result. -use build::Builder; -use build::matches::{Candidate, MatchPair, Test, TestKind}; -use hair::*; -use hair::pattern::compare_const_vals; +use crate::build::Builder; +use crate::build::matches::{Candidate, MatchPair, Test, TestKind}; +use crate::hair::*; +use crate::hair::pattern::compare_const_vals; use rustc_data_structures::bit_set::BitSet; use rustc_data_structures::fx::FxHashMap; use rustc::ty::{self, Ty}; @@ -45,10 +35,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } } - PatternKind::Constant { .. } - if is_switch_ty(match_pair.pattern.ty) => { - // for integers, we use a SwitchInt match, which allows - // us to handle more cases + PatternKind::Constant { .. } if is_switch_ty(match_pair.pattern.ty) => { + // For integers, we use a `SwitchInt` match, which allows + // us to handle more cases. Test { span: match_pair.pattern.span, kind: TestKind::SwitchInt { @@ -80,8 +69,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } } - PatternKind::Slice { ref prefix, ref slice, ref suffix } - if !match_pair.slice_len_checked => { + PatternKind::Slice { ref prefix, ref slice, ref suffix } => { let len = prefix.len() + suffix.len(); let op = if slice.is_some() { BinOp::Ge @@ -96,7 +84,6 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { PatternKind::AscribeUserType { .. } | PatternKind::Array { .. } | - PatternKind::Slice { .. } | PatternKind::Wild | PatternKind::Binding { .. } | PatternKind::Leaf { .. } | @@ -111,7 +98,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { candidate: &Candidate<'pat, 'tcx>, switch_ty: Ty<'tcx>, options: &mut Vec, - indices: &mut FxHashMap<&'tcx ty::Const<'tcx>, usize>) + indices: &mut FxHashMap, usize>) -> bool { let match_pair = match candidate.match_pairs.iter().find(|mp| mp.place == *test_place) { @@ -263,12 +250,12 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { TestKind::Eq { value, mut ty } => { let val = Operand::Copy(place.clone()); let mut expect = self.literal_operand(test.span, ty, value); - // Use PartialEq::eq instead of BinOp::Eq + // Use `PartialEq::eq` instead of `BinOp::Eq` // (the binop can only handle primitives) let fail = self.cfg.start_new_block(); if !ty.is_scalar() { - // If we're using b"..." as a pattern, we need to insert an - // unsizing coercion, as the byte string has the type &[u8; N]. + // If we're using `b"..."` as a pattern, we need to insert an + // unsizing coercion, as the byte string has the type `&[u8; N]`. // // We want to do this even when the scrutinee is a reference to an // array, so we can call `<[u8]>::eq` rather than having to find an @@ -312,18 +299,18 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } let eq_def_id = self.hir.tcx().lang_items().eq_trait().unwrap(); let (mty, method) = self.hir.trait_method(eq_def_id, "eq", ty, &[ty.into()]); + let method = self.hir.tcx().mk_const(method); + let re_erased = self.hir.tcx().types.re_erased; // take the argument by reference - let region_scope = self.topmost_scope(); - let region = self.hir.tcx().mk_region(ty::ReScope(region_scope)); let tam = ty::TypeAndMut { ty, mutbl: Mutability::MutImmutable, }; - let ref_ty = self.hir.tcx().mk_ref(region, tam); + let ref_ty = self.hir.tcx().mk_ref(re_erased, tam); // let lhs_ref_place = &lhs; - let ref_rvalue = Rvalue::Ref(region, BorrowKind::Shared, place); + let ref_rvalue = Rvalue::Ref(re_erased, BorrowKind::Shared, place); let lhs_ref_place = self.temp(ref_ty, test.span); self.cfg.push_assign(block, source_info, &lhs_ref_place, ref_rvalue); let val = Operand::Move(lhs_ref_place); @@ -333,7 +320,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { self.cfg.push_assign(block, source_info, &rhs_place, Rvalue::Use(expect)); // let rhs_ref_place = &rhs_place; - let ref_rvalue = Rvalue::Ref(region, BorrowKind::Shared, rhs_place); + let ref_rvalue = Rvalue::Ref(re_erased, BorrowKind::Shared, rhs_place); let rhs_ref_place = self.temp(ref_ty, test.span); self.cfg.push_assign(block, source_info, &rhs_ref_place, ref_rvalue); let expect = Operand::Move(rhs_ref_place); @@ -347,7 +334,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { span: test.span, ty: mty, - // FIXME(#47184): This constant comes from user + // FIXME(#54571): This constant comes from user // input (a constant in a pattern). Are // there forms where users can add type // annotations here? For example, an @@ -444,59 +431,49 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { target_block } - /// Given that we are performing `test` against `test_place`, - /// this job sorts out what the status of `candidate` will be - /// after the test. The `resulting_candidates` vector stores, for - /// each possible outcome of `test`, a vector of the candidates - /// that will result. This fn should add a (possibly modified) - /// clone of candidate into `resulting_candidates` wherever - /// appropriate. + /// Given that we are performing `test` against `test_place`, this job + /// sorts out what the status of `candidate` will be after the test. See + /// `test_candidates` for the usage of this function. The returned index is + /// the index that this candiate should be placed in the + /// `target_candidates` vec. The candidate may be modified to update its + /// `match_pairs`. /// - /// So, for example, if this candidate is `x @ Some(P0)` and the - /// test is a variant test, then we would add `(x as Option).0 @ - /// P0` to the `resulting_candidates` entry corresponding to the - /// variant `Some`. + /// So, for example, if this candidate is `x @ Some(P0)` and the `Test` is + /// a variant test, then we would modify the candidate to be `(x as + /// Option).0 @ P0` and return the index corresponding to the variant + /// `Some`. /// - /// However, in some cases, the test may just not be relevant to - /// candidate. For example, suppose we are testing whether `foo.x == 22`, - /// but in one match arm we have `Foo { x: _, ... }`... in that case, - /// the test for what value `x` has has no particular relevance - /// to this candidate. In such cases, this function just returns false - /// without doing anything. This is used by the overall `match_candidates` - /// algorithm to structure the match as a whole. See `match_candidates` for - /// more details. + /// However, in some cases, the test may just not be relevant to candidate. + /// For example, suppose we are testing whether `foo.x == 22`, but in one + /// match arm we have `Foo { x: _, ... }`... in that case, the test for + /// what value `x` has has no particular relevance to this candidate. In + /// such cases, this function just returns None without doing anything. + /// This is used by the overall `match_candidates` algorithm to structure + /// the match as a whole. See `match_candidates` for more details. /// - /// FIXME(#29623). In some cases, we have some tricky choices to - /// make. for example, if we are testing that `x == 22`, but the - /// candidate is `x @ 13..55`, what should we do? In the event - /// that the test is true, we know that the candidate applies, but - /// in the event of false, we don't know that it *doesn't* - /// apply. For now, we return false, indicate that the test does - /// not apply to this candidate, but it might be we can get + /// FIXME(#29623). In some cases, we have some tricky choices to make. for + /// example, if we are testing that `x == 22`, but the candidate is `x @ + /// 13..55`, what should we do? In the event that the test is true, we know + /// that the candidate applies, but in the event of false, we don't know + /// that it *doesn't* apply. For now, we return false, indicate that the + /// test does not apply to this candidate, but it might be we can get /// tighter match code if we do something a bit different. - pub fn sort_candidate<'pat>(&mut self, - test_place: &Place<'tcx>, - test: &Test<'tcx>, - candidate: &Candidate<'pat, 'tcx>, - resulting_candidates: &mut [Vec>]) - -> bool { + pub fn sort_candidate<'pat, 'cand>( + &mut self, + test_place: &Place<'tcx>, + test: &Test<'tcx>, + candidate: &mut Candidate<'pat, 'tcx>, + ) -> Option { // Find the match_pair for this place (if any). At present, // afaik, there can be at most one. (In the future, if we // adopted a more general `@` operator, there might be more // than one, but it'd be very unusual to have two sides that // both require tests; you'd expect one side to be simplified // away.) - let tested_match_pair = candidate.match_pairs.iter() - .enumerate() - .find(|&(_, mp)| mp.place == *test_place); - let (match_pair_index, match_pair) = match tested_match_pair { - Some(pair) => pair, - None => { - // We are not testing this place. Therefore, this - // candidate applies to ALL outcomes. - return false; - } - }; + let (match_pair_index, match_pair) = candidate.match_pairs + .iter() + .enumerate() + .find(|&(_, mp)| mp.place == *test_place)?; match (&test.kind, &*match_pair.pattern.kind) { // If we are performing a variant switch, then this @@ -504,16 +481,15 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { (&TestKind::Switch { adt_def: tested_adt_def, .. }, &PatternKind::Variant { adt_def, variant_index, ref subpatterns, .. }) => { assert_eq!(adt_def, tested_adt_def); - let new_candidate = - self.candidate_after_variant_switch(match_pair_index, - adt_def, - variant_index, - subpatterns, - candidate); - resulting_candidates[variant_index.as_usize()].push(new_candidate); - true + self.candidate_after_variant_switch(match_pair_index, + adt_def, + variant_index, + subpatterns, + candidate); + Some(variant_index.as_usize()) } - (&TestKind::Switch { .. }, _) => false, + + (&TestKind::Switch { .. }, _) => None, // If we are performing a switch over integers, then this informs integer // equality, but nothing else. @@ -524,10 +500,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { &PatternKind::Constant { ref value }) if is_switch_ty(match_pair.pattern.ty) => { let index = indices[value]; - let new_candidate = self.candidate_without_match_pair(match_pair_index, - candidate); - resulting_candidates[index].push(new_candidate); - true + self.candidate_without_match_pair(match_pair_index, candidate); + Some(index) } (&TestKind::SwitchInt { switch_ty: _, ref options, ref indices }, @@ -540,15 +514,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // No switch values are contained in the pattern range, // so the pattern can be matched only if this test fails. let otherwise = options.len(); - resulting_candidates[otherwise].push(candidate.clone()); - true + Some(otherwise) } else { - false + None } } - (&TestKind::SwitchInt { .. }, _) => false, - + (&TestKind::SwitchInt { .. }, _) => None, (&TestKind::Len { len: test_len, op: BinOp::Eq }, &PatternKind::Slice { ref prefix, ref slice, ref suffix }) => { @@ -557,32 +529,28 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { (Ordering::Equal, &None) => { // on true, min_len = len = $actual_length, // on false, len != $actual_length - resulting_candidates[0].push( - self.candidate_after_slice_test(match_pair_index, - candidate, - prefix, - slice.as_ref(), - suffix) - ); - true + self.candidate_after_slice_test(match_pair_index, + candidate, + prefix, + slice.as_ref(), + suffix); + Some(0) } (Ordering::Less, _) => { // test_len < pat_len. If $actual_len = test_len, // then $actual_len < pat_len and we don't have // enough elements. - resulting_candidates[1].push(candidate.clone()); - true + Some(1) } (Ordering::Equal, &Some(_)) | (Ordering::Greater, &Some(_)) => { // This can match both if $actual_len = test_len >= pat_len, // and if $actual_len > test_len. We can't advance. - false + None } (Ordering::Greater, &None) => { // test_len != pat_len, so if $actual_len = test_len, then // $actual_len != pat_len. - resulting_candidates[1].push(candidate.clone()); - true + Some(1) } } } @@ -595,32 +563,28 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { (Ordering::Equal, &Some(_)) => { // $actual_len >= test_len = pat_len, // so we can match. - resulting_candidates[0].push( - self.candidate_after_slice_test(match_pair_index, - candidate, - prefix, - slice.as_ref(), - suffix) - ); - true + self.candidate_after_slice_test(match_pair_index, + candidate, + prefix, + slice.as_ref(), + suffix); + Some(0) } (Ordering::Less, _) | (Ordering::Equal, &None) => { // test_len <= pat_len. If $actual_len < test_len, // then it is also < pat_len, so the test passing is // necessary (but insufficient). - resulting_candidates[0].push(candidate.clone()); - true + Some(0) } (Ordering::Greater, &None) => { // test_len > pat_len. If $actual_len >= test_len > pat_len, // then we know we won't have a match. - resulting_candidates[1].push(candidate.clone()); - true + Some(1) } (Ordering::Greater, &Some(_)) => { // test_len < pat_len, and is therefore less // strict. This can still go both ways. - false + None } } } @@ -628,12 +592,11 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { (&TestKind::Range(test), &PatternKind::Range(pat)) => { if test == pat { - resulting_candidates[0] - .push(self.candidate_without_match_pair( - match_pair_index, - candidate, - )); - return true; + self.candidate_without_match_pair( + match_pair_index, + candidate, + ); + return Some(0); } let no_overlap = (|| { @@ -660,26 +623,23 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { if no_overlap == Some(true) { // Testing range does not overlap with pattern range, // so the pattern can be matched only if this test fails. - resulting_candidates[1].push(candidate.clone()); - true + Some(1) } else { - false + None } } - (&TestKind::Range(range), &PatternKind::Constant { ref value }) => { + (&TestKind::Range(range), &PatternKind::Constant { value }) => { if self.const_range_contains(range, value) == Some(false) { // `value` is not contained in the testing range, // so `value` can be matched only if this test fails. - resulting_candidates[1].push(candidate.clone()); - true + Some(1) } else { - false + None } } - (&TestKind::Range { .. }, _) => false, - + (&TestKind::Range { .. }, _) => None, (&TestKind::Eq { .. }, _) | (&TestKind::Len { .. }, _) => { @@ -688,73 +648,54 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { // FIXME(#29623) we can be more clever here let pattern_test = self.test(&match_pair); if pattern_test.kind == test.kind { - let new_candidate = self.candidate_without_match_pair(match_pair_index, - candidate); - resulting_candidates[0].push(new_candidate); - true + self.candidate_without_match_pair(match_pair_index, candidate); + Some(0) } else { - false + None } } } } - fn candidate_without_match_pair<'pat>(&mut self, - match_pair_index: usize, - candidate: &Candidate<'pat, 'tcx>) - -> Candidate<'pat, 'tcx> { - let other_match_pairs = - candidate.match_pairs.iter() - .enumerate() - .filter(|&(index, _)| index != match_pair_index) - .map(|(_, mp)| mp.clone()) - .collect(); - Candidate { - span: candidate.span, - match_pairs: other_match_pairs, - bindings: candidate.bindings.clone(), - ascriptions: candidate.ascriptions.clone(), - guard: candidate.guard.clone(), - arm_index: candidate.arm_index, - pat_index: candidate.pat_index, - pre_binding_block: candidate.pre_binding_block, - next_candidate_pre_binding_block: candidate.next_candidate_pre_binding_block, - } + fn candidate_without_match_pair( + &mut self, + match_pair_index: usize, + candidate: &mut Candidate<'_, 'tcx>, + ) { + candidate.match_pairs.remove(match_pair_index); } fn candidate_after_slice_test<'pat>(&mut self, match_pair_index: usize, - candidate: &Candidate<'pat, 'tcx>, + candidate: &mut Candidate<'pat, 'tcx>, prefix: &'pat [Pattern<'tcx>], opt_slice: Option<&'pat Pattern<'tcx>>, - suffix: &'pat [Pattern<'tcx>]) - -> Candidate<'pat, 'tcx> { - let mut new_candidate = - self.candidate_without_match_pair(match_pair_index, candidate); + suffix: &'pat [Pattern<'tcx>]) { + let removed_place = candidate.match_pairs.remove(match_pair_index).place; self.prefix_slice_suffix( - &mut new_candidate.match_pairs, - &candidate.match_pairs[match_pair_index].place, + &mut candidate.match_pairs, + &removed_place, prefix, opt_slice, suffix); - - new_candidate } - fn candidate_after_variant_switch<'pat>(&mut self, - match_pair_index: usize, - adt_def: &'tcx ty::AdtDef, - variant_index: VariantIdx, - subpatterns: &'pat [FieldPattern<'tcx>], - candidate: &Candidate<'pat, 'tcx>) - -> Candidate<'pat, 'tcx> { - let match_pair = &candidate.match_pairs[match_pair_index]; + fn candidate_after_variant_switch<'pat>( + &mut self, + match_pair_index: usize, + adt_def: &'tcx ty::AdtDef, + variant_index: VariantIdx, + subpatterns: &'pat [FieldPattern<'tcx>], + candidate: &mut Candidate<'pat, 'tcx>, + ) { + let match_pair = candidate.match_pairs.remove(match_pair_index); // So, if we have a match-pattern like `x @ Enum::Variant(P1, P2)`, // we want to create a set of derived match-patterns like // `(x as Variant).0 @ P1` and `(x as Variant).1 @ P1`. - let elem = ProjectionElem::Downcast(adt_def, variant_index); - let downcast_place = match_pair.place.clone().elem(elem); // `(x as Variant)` + let elem = ProjectionElem::Downcast( + Some(adt_def.variants[variant_index].ident.name), variant_index); + let downcast_place = match_pair.place.elem(elem); // `(x as Variant)` let consequent_match_pairs = subpatterns.iter() .map(|subpattern| { @@ -765,26 +706,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { MatchPair::new(place, &subpattern.pattern) }); - // In addition, we need all the other match pairs from the old candidate. - let other_match_pairs = - candidate.match_pairs.iter() - .enumerate() - .filter(|&(index, _)| index != match_pair_index) - .map(|(_, mp)| mp.clone()); - - let all_match_pairs = consequent_match_pairs.chain(other_match_pairs).collect(); - - Candidate { - span: candidate.span, - match_pairs: all_match_pairs, - bindings: candidate.bindings.clone(), - ascriptions: candidate.ascriptions.clone(), - guard: candidate.guard.clone(), - arm_index: candidate.arm_index, - pat_index: candidate.pat_index, - pre_binding_block: candidate.pre_binding_block, - next_candidate_pre_binding_block: candidate.next_candidate_pre_binding_block, - } + candidate.match_pairs.extend(consequent_match_pairs); } fn error_simplifyable<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> ! { @@ -796,7 +718,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { fn const_range_contains( &self, range: PatternRange<'tcx>, - value: &'tcx ty::Const<'tcx>, + value: ty::Const<'tcx>, ) -> Option { use std::cmp::Ordering::*; @@ -816,9 +738,9 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { fn values_not_contained_in_range( &self, range: PatternRange<'tcx>, - indices: &FxHashMap<&'tcx ty::Const<'tcx>, usize>, + indices: &FxHashMap, usize>, ) -> Option { - for val in indices.keys() { + for &val in indices.keys() { if self.const_range_contains(range, val)? { return Some(false); } diff --git a/src/librustc_mir/build/matches/util.rs b/src/librustc_mir/build/matches/util.rs index a87660db2d313..3b90ff7884f01 100644 --- a/src/librustc_mir/build/matches/util.rs +++ b/src/librustc_mir/build/matches/util.rs @@ -1,16 +1,6 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use build::Builder; -use build::matches::MatchPair; -use hair::*; +use crate::build::Builder; +use crate::build::matches::MatchPair; +use crate::hair::*; use rustc::mir::*; use std::u32; use std::convert::TryInto; @@ -23,7 +13,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { subpatterns.iter() .map(|fieldpat| { let place = place.clone().field(fieldpat.field, - fieldpat.pattern.ty); + fieldpat.pattern.ty); MatchPair::new(place, &fieldpat.pattern) }) .collect() @@ -82,7 +72,6 @@ impl<'pat, 'tcx> MatchPair<'pat, 'tcx> { MatchPair { place, pattern, - slice_len_checked: false, } } } diff --git a/src/librustc_mir/build/misc.rs b/src/librustc_mir/build/misc.rs index 3ac7bd3fc68dc..d71a13dec5a2f 100644 --- a/src/librustc_mir/build/misc.rs +++ b/src/librustc_mir/build/misc.rs @@ -1,17 +1,7 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Miscellaneous builder routines that are not specific to building any particular //! kind of thing. -use build::Builder; +use crate::build::Builder; use rustc::ty::{self, Ty}; @@ -19,14 +9,14 @@ use rustc::mir::*; use syntax_pos::{Span, DUMMY_SP}; impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { - /// Add a new temporary value of type `ty` storing the result of + /// Adds a new temporary value of type `ty` storing the result of /// evaluating `expr`. /// /// N.B., **No cleanup is scheduled for this temporary.** You should /// call `schedule_drop` once the temporary is initialized. pub fn temp(&mut self, ty: Ty<'tcx>, span: Span) -> Place<'tcx> { let temp = self.local_decls.push(LocalDecl::new_temp(ty, span)); - let place = Place::Local(temp); + let place = Place::Base(PlaceBase::Local(temp)); debug!("temp: created temp {:?} with type {:?}", place, self.local_decls[temp].ty); place @@ -37,13 +27,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn literal_operand(&mut self, span: Span, ty: Ty<'tcx>, - literal: &'tcx ty::Const<'tcx>) + literal: ty::Const<'tcx>) -> Operand<'tcx> { let constant = box Constant { span, ty, user_ty: None, - literal, + literal: self.hir.tcx().mk_const(literal), }; Operand::Constant(constant) } @@ -80,8 +70,8 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { pub fn consume_by_copy_or_move(&self, place: Place<'tcx>) -> Operand<'tcx> { let tcx = self.hir.tcx(); - let ty = place.ty(&self.local_decls, tcx).to_ty(tcx); - if self.hir.type_moves_by_default(ty, DUMMY_SP) { + let ty = place.ty(&self.local_decls, tcx).ty; + if !self.hir.type_is_copy_modulo_regions(ty, DUMMY_SP) { Operand::Move(place) } else { Operand::Copy(place) diff --git a/src/librustc_mir/build/mod.rs b/src/librustc_mir/build/mod.rs index 451034fd15334..7fe86d11c9ee4 100644 --- a/src/librustc_mir/build/mod.rs +++ b/src/librustc_mir/build/mod.rs @@ -1,52 +1,37 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - -use build; -use build::scope::{CachedBlock, DropKind}; -use hair::cx::Cx; -use hair::{LintLevel, BindingMode, PatternKind}; +use crate::build; +use crate::build::scope::{CachedBlock, DropKind}; +use crate::hair::cx::Cx; +use crate::hair::{LintLevel, BindingMode, PatternKind}; +use crate::shim; +use crate::transform::MirSource; +use crate::util as mir_util; use rustc::hir; use rustc::hir::Node; -use rustc::hir::def_id::{DefId, LocalDefId}; +use rustc::hir::def_id::DefId; use rustc::middle::region; use rustc::mir::*; use rustc::mir::visit::{MutVisitor, TyContext}; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::subst::Substs; -use rustc::util::nodemap::NodeMap; +use rustc::ty::subst::SubstsRef; +use rustc::util::nodemap::HirIdMap; use rustc_target::spec::PanicStrategy; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; -use shim; use std::mem; use std::u32; use rustc_target::spec::abi::Abi; -use syntax::ast; use syntax::attr::{self, UnwindAttr}; use syntax::symbol::keywords; use syntax_pos::Span; -use transform::MirSource; -use util as mir_util; use super::lints; -/// Construct the MIR for a given def-id. +/// Construct the MIR for a given `DefId`. pub fn mir_build<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Mir<'tcx> { - let id = tcx.hir().as_local_node_id(def_id).unwrap(); + let id = tcx.hir().as_local_hir_id(def_id).unwrap(); // Figure out what primary body this item has. - let (body_id, return_ty_span) = match tcx.hir().get(id) { - Node::Variant(variant) => - return create_constructor_shim(tcx, id, &variant.node.data), - Node::StructCtor(ctor) => - return create_constructor_shim(tcx, id, ctor), + let (body_id, return_ty_span) = match tcx.hir().get_by_hir_id(id) { + Node::Ctor(ctor) => return create_constructor_shim(tcx, id, ctor), Node::Expr(hir::Expr { node: hir::ExprKind::Closure(_, decl, body_id, _, _), .. }) | Node::Item(hir::Item { node: hir::ItemKind::Fn(decl, _, _, body_id), .. }) @@ -75,23 +60,22 @@ pub fn mir_build<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Mir<'t ) => { (*body_id, ty.span) } - Node::AnonConst(hir::AnonConst { body, id, .. }) => { - (*body, tcx.hir().span(*id)) + Node::AnonConst(hir::AnonConst { body, hir_id, .. }) => { + (*body, tcx.hir().span_by_hir_id(*hir_id)) } - _ => span_bug!(tcx.hir().span(id), "can't build MIR for {:?}", def_id), + _ => span_bug!(tcx.hir().span_by_hir_id(id), "can't build MIR for {:?}", def_id), }; tcx.infer_ctxt().enter(|infcx| { let cx = Cx::new(&infcx, id); let mut mir = if cx.tables().tainted_by_errors { build::construct_error(cx, body_id) - } else if let hir::BodyOwnerKind::Fn = cx.body_owner_kind { + } else if cx.body_owner_kind.is_fn_or_closure() { // fetch the fully liberated fn signature (that is, all bound // types/lifetimes replaced) - let fn_hir_id = tcx.hir().node_to_hir_id(id); - let fn_sig = cx.tables().liberated_fn_sigs()[fn_hir_id].clone(); - let fn_def_id = tcx.hir().local_def_id(id); + let fn_sig = cx.tables().liberated_fn_sigs()[id].clone(); + let fn_def_id = tcx.hir().local_def_id_from_hir_id(id); let ty = tcx.type_of(fn_def_id); let mut abi = fn_sig.abi; @@ -103,7 +87,7 @@ pub fn mir_build<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Mir<'t Some(ArgInfo(liberated_closure_env_ty(tcx, id, body_id), None, None, None)) } ty::Generator(..) => { - let gen_ty = tcx.body_tables(body_id).node_id_to_type(fn_hir_id); + let gen_ty = tcx.body_tables(body_id).node_type(id); Some(ArgInfo(gen_ty, None, None, None)) } _ => None, @@ -125,7 +109,7 @@ pub fn mir_build<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Mir<'t let self_arg; if let Some(ref fn_decl) = tcx.hir().fn_decl(owner_id) { let ty_hir_id = fn_decl.inputs[index].hir_id; - let ty_span = tcx.hir().span(tcx.hir().hir_to_node_id(ty_hir_id)); + let ty_span = tcx.hir().span_by_hir_id(ty_hir_id); opt_ty_info = Some(ty_span); self_arg = if index == 0 && fn_decl.implicit_self.has_implicit_self() { match fn_decl.implicit_self { @@ -152,7 +136,8 @@ pub fn mir_build<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Mir<'t ty::Generator(gen_def_id, gen_substs, ..) => gen_substs.sig(gen_def_id, tcx), _ => - span_bug!(tcx.hir().span(id), "generator w/o generator type: {:?}", ty), + span_bug!(tcx.hir().span_by_hir_id(id), + "generator w/o generator type: {:?}", ty), }; (Some(gen_sig.yield_ty), gen_sig.return_ty) } else { @@ -172,7 +157,7 @@ pub fn mir_build<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Mir<'t }; globalizer.visit_mir(&mut mir); let mir = unsafe { - mem::transmute::>(mir) + mem::transmute::, Mir<'tcx>>(mir) }; mir_util::dump_mir(tcx, None, "mir_map", &0, @@ -184,9 +169,9 @@ pub fn mir_build<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Mir<'t }) } -/// A pass to lift all the types and substitutions in a Mir +/// A pass to lift all the types and substitutions in a MIR /// to the global tcx. Sadly, we don't have a "folder" that -/// can change 'tcx so we have to transmute afterwards. +/// can change `'tcx` so we have to transmute afterwards. struct GlobalizeMir<'a, 'gcx: 'a> { tcx: TyCtxt<'a, 'gcx, 'gcx>, span: Span @@ -223,7 +208,7 @@ impl<'a, 'gcx: 'tcx, 'tcx> MutVisitor<'tcx> for GlobalizeMir<'a, 'gcx> { } } - fn visit_substs(&mut self, substs: &mut &'tcx Substs<'tcx>, _: Location) { + fn visit_substs(&mut self, substs: &mut SubstsRef<'tcx>, _: Location) { if let Some(lifted) = self.tcx.lift(substs) { *substs = lifted; } else { @@ -235,11 +220,11 @@ impl<'a, 'gcx: 'tcx, 'tcx> MutVisitor<'tcx> for GlobalizeMir<'a, 'gcx> { } fn create_constructor_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ctor_id: ast::NodeId, + ctor_id: hir::HirId, v: &'tcx hir::VariantData) -> Mir<'tcx> { - let span = tcx.hir().span(ctor_id); + let span = tcx.hir().span_by_hir_id(ctor_id); if let hir::VariantData::Tuple(ref fields, ctor_id) = *v { tcx.infer_ctxt().enter(|infcx| { let mut mir = shim::build_adt_ctor(&infcx, ctor_id, fields, span); @@ -252,11 +237,11 @@ fn create_constructor_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }; globalizer.visit_mir(&mut mir); let mir = unsafe { - mem::transmute::>(mir) + mem::transmute::, Mir<'tcx>>(mir) }; mir_util::dump_mir(tcx, None, "mir_map", &0, - MirSource::item(tcx.hir().local_def_id(ctor_id)), + MirSource::item(tcx.hir().local_def_id_from_hir_id(ctor_id)), &mir, |_, _| Ok(()) ); mir @@ -270,11 +255,10 @@ fn create_constructor_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // BuildMir -- walks a crate, looking for fn items and methods to build MIR from fn liberated_closure_env_ty<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, - closure_expr_id: ast::NodeId, + closure_expr_id: hir::HirId, body_id: hir::BodyId) -> Ty<'tcx> { - let closure_expr_hir_id = tcx.hir().node_to_hir_id(closure_expr_id); - let closure_ty = tcx.body_tables(body_id).node_id_to_type(closure_expr_hir_id); + let closure_ty = tcx.body_tables(body_id).node_type(closure_expr_id); let (closure_def_id, closure_substs) = match closure_ty.sty { ty::Closure(closure_def_id, closure_substs) => (closure_def_id, closure_substs), @@ -346,68 +330,69 @@ struct Builder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { fn_span: Span, arg_count: usize, - /// the current set of scopes, updated as we traverse; - /// see the `scope` module for more details + /// The current set of scopes, updated as we traverse; + /// see the `scope` module for more details. scopes: Vec>, - /// the block-context: each time we build the code within an hair::Block, + /// The block-context: each time we build the code within an hair::Block, /// we push a frame here tracking whether we are building a statement or /// if we are pushing the tail expression of the block. This is used to /// embed information in generated temps about whether they were created /// for a block tail expression or not. /// /// It would be great if we could fold this into `self.scopes` - /// somehow; but right now I think that is very tightly tied to + /// somehow, but right now I think that is very tightly tied to /// the code generation in ways that we cannot (or should not) /// start just throwing new entries onto that vector in order to /// distinguish the context of EXPR1 from the context of EXPR2 in - /// `{ STMTS; EXPR1 } + EXPR2` + /// `{ STMTS; EXPR1 } + EXPR2`. block_context: BlockContext, /// The current unsafe block in scope, even if it is hidden by - /// a PushUnsafeBlock + /// a `PushUnsafeBlock`. unpushed_unsafe: Safety, - /// The number of `push_unsafe_block` levels in scope + /// The number of `push_unsafe_block` levels in scope. push_unsafe_count: usize, - /// the current set of breakables; see the `scope` module for more - /// details + /// The current set of breakables; see the `scope` module for more + /// details. breakable_scopes: Vec>, - /// the vector of all scopes that we have created thus far; - /// we track this for debuginfo later + /// The vector of all scopes that we have created thus far; + /// we track this for debuginfo later. source_scopes: IndexVec, source_scope_local_data: IndexVec, source_scope: SourceScope, - /// the guard-context: each time we build the guard expression for + /// The guard-context: each time we build the guard expression for /// a match arm, we push onto this stack, and then pop when we /// finish building it. guard_context: Vec, - /// Maps node ids of variable bindings to the `Local`s created for them. + /// Maps `HirId`s of variable bindings to the `Local`s created for them. /// (A match binding can have two locals; the 2nd is for the arm's guard.) - var_indices: NodeMap, + var_indices: HirIdMap, local_decls: IndexVec>, + canonical_user_type_annotations: ty::CanonicalUserTypeAnnotations<'tcx>, upvar_decls: Vec, unit_temp: Option>, - /// cached block with the RESUME terminator; this is created + /// Cached block with the `RESUME` terminator; this is created /// when first set of cleanups are built. cached_resume_block: Option, - /// cached block with the RETURN terminator + /// Cached block with the `RETURN` terminator. cached_return_block: Option, - /// cached block with the UNREACHABLE terminator + /// Cached block with the `UNREACHABLE` terminator. cached_unreachable_block: Option, } impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { - fn is_bound_var_in_guard(&self, id: ast::NodeId) -> bool { + fn is_bound_var_in_guard(&self, id: hir::HirId) -> bool { self.guard_context.iter().any(|frame| frame.locals.iter().any(|local| local.id == id)) } - fn var_local_id(&self, id: ast::NodeId, for_guard: ForGuard) -> Local { + fn var_local_id(&self, id: hir::HirId, for_guard: ForGuard) -> Local { self.var_indices[&id].local_id(for_guard) } } @@ -417,7 +402,7 @@ impl BlockContext { fn push(&mut self, bf: BlockFrame) { self.0.push(bf); } fn pop(&mut self) -> Option { self.0.pop() } - /// Traverses the frames on the BlockContext, searching for either + /// Traverses the frames on the `BlockContext`, searching for either /// the first block-tail expression frame with no intervening /// statement frame. /// @@ -463,14 +448,13 @@ impl BlockContext { #[derive(Debug)] enum LocalsForNode { - /// In the usual case, a node-id for an identifier maps to at most - /// one Local declaration. + /// In the usual case, a `HirId` for an identifier maps to at most + /// one `Local` declaration. One(Local), /// The exceptional case is identifiers in a match arm's pattern /// that are referenced in a guard of that match arm. For these, - /// we can have `2+k` Locals, where `k` is the number of candidate - /// patterns (separated by `|`) in the arm. + /// we have `2` Locals. /// /// * `for_arm_body` is the Local used in the arm body (which is /// just like the `One` case above), @@ -478,25 +462,16 @@ enum LocalsForNode { /// * `ref_for_guard` is the Local used in the arm's guard (which /// is a reference to a temp that is an alias of /// `for_arm_body`). - /// - /// * `vals_for_guard` is the `k` Locals; at most one of them will - /// get initialized by the arm's execution, and after it is - /// initialized, `ref_for_guard` will be assigned a reference to - /// it. - /// - /// There reason we have `k` Locals rather than just 1 is to - /// accommodate some restrictions imposed by two-phase borrows, - /// which apply when we have a `ref mut` pattern. - ForGuard { vals_for_guard: Vec, ref_for_guard: Local, for_arm_body: Local }, + ForGuard { ref_for_guard: Local, for_arm_body: Local }, } #[derive(Debug)] struct GuardFrameLocal { - id: ast::NodeId, + id: hir::HirId, } impl GuardFrameLocal { - fn new(id: ast::NodeId, _binding_mode: BindingMode) -> Self { + fn new(id: hir::HirId, _binding_mode: BindingMode) -> Self { GuardFrameLocal { id: id, } @@ -515,21 +490,16 @@ struct GuardFrame { /// P1(id1) if (... (match E2 { P2(id2) if ... => B2 })) => B1, /// } /// - /// here, when building for FIXME + /// here, when building for FIXME. locals: Vec, } -/// ForGuard indicates whether we are talking about: -/// 1. the temp for a local binding used solely within guard expressions, -/// 2. the temp that holds reference to (1.), which is actually what the -/// guard expressions see, or -/// 3. the temp for use outside of guard expressions. +/// `ForGuard` indicates whether we are talking about: +/// 1. The variable for use outside of guard expressions, or +/// 2. The temp that holds reference to (1.), which is actually what the +/// guard expressions see. #[derive(Copy, Clone, Debug, PartialEq, Eq)] enum ForGuard { - /// The `usize` identifies for which candidate pattern we want the - /// local binding. We keep a temp per-candidate to accommodate - /// two-phase borrows (see `LocalsForNode` documentation). - ValWithinGuard(usize), RefWithinGuard, OutsideGuard, } @@ -542,11 +512,6 @@ impl LocalsForNode { (&LocalsForNode::ForGuard { for_arm_body: local_id, .. }, ForGuard::OutsideGuard) => local_id, - (&LocalsForNode::ForGuard { ref vals_for_guard, .. }, - ForGuard::ValWithinGuard(pat_idx)) => - vals_for_guard[pat_idx], - - (&LocalsForNode::One(_), ForGuard::ValWithinGuard(_)) | (&LocalsForNode::One(_), ForGuard::RefWithinGuard) => bug!("anything with one local should never be within a guard."), } @@ -611,6 +576,10 @@ fn should_abort_on_panic<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, // Not callable from C, so we can safely unwind through these if abi == Abi::Rust || abi == Abi::RustCall { return false; } + // Validate `#[unwind]` syntax regardless of platform-specific panic strategy + let attrs = &tcx.get_attrs(fn_def_id); + let unwind_attr = attr::find_unwind_attr(Some(tcx.sess.diagnostic()), attrs); + // We never unwind, so it's not relevant to stop an unwind if tcx.sess.panic_strategy() != PanicStrategy::Unwind { return false; } @@ -619,8 +588,7 @@ fn should_abort_on_panic<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, // This is a special case: some functions have a C abi but are meant to // unwind anyway. Don't stop them. - let attrs = &tcx.get_attrs(fn_def_id); - match attr::find_unwind_attr(Some(tcx.sess.diagnostic()), attrs) { + match unwind_attr { None => true, Some(UnwindAttr::Allowed) => false, Some(UnwindAttr::Aborts) => true, @@ -636,7 +604,7 @@ struct ArgInfo<'gcx>(Ty<'gcx>, Option); fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>, - fn_id: ast::NodeId, + fn_id: hir::HirId, arguments: A, safety: Safety, abi: Abi, @@ -650,21 +618,29 @@ fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>, let arguments: Vec<_> = arguments.collect(); let tcx = hir.tcx(); - let span = tcx.hir().span(fn_id); + let tcx_hir = tcx.hir(); + let span = tcx_hir.span_by_hir_id(fn_id); + + let hir_tables = hir.tables(); + let fn_def_id = tcx_hir.local_def_id_from_hir_id(fn_id); // Gather the upvars of a closure, if any. - let upvar_decls: Vec<_> = tcx.with_freevars(fn_id, |freevars| { - freevars.iter().map(|fv| { - let var_id = fv.var_id(); - let var_hir_id = tcx.hir().node_to_hir_id(var_id); - let closure_expr_id = tcx.hir().local_def_id(fn_id); - let capture = hir.tables().upvar_capture(ty::UpvarId { - var_path: ty::UpvarPath {hir_id: var_hir_id}, - closure_expr_id: LocalDefId::from_def_id(closure_expr_id), - }); + // In analyze_closure() in upvar.rs we gathered a list of upvars used by a + // closure and we stored in a map called upvar_list in TypeckTables indexed + // with the closure's DefId. Here, we run through that vec of UpvarIds for + // the given closure and use the necessary information to create UpvarDecl. + let upvar_decls: Vec<_> = hir_tables + .upvar_list + .get(&fn_def_id) + .into_iter() + .flatten() + .map(|upvar_id| { + let var_hir_id = upvar_id.var_path.hir_id; + let var_node_id = tcx_hir.hir_to_node_id(var_hir_id); + let capture = hir_tables.upvar_capture(*upvar_id); let by_ref = match capture { ty::UpvarCapture::ByValue => false, - ty::UpvarCapture::ByRef(..) => true + ty::UpvarCapture::ByRef(..) => true, }; let mut decl = UpvarDecl { debug_name: keywords::Invalid.name(), @@ -672,10 +648,9 @@ fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>, by_ref, mutability: Mutability::Not, }; - if let Some(Node::Binding(pat)) = tcx.hir().find(var_id) { + if let Some(Node::Binding(pat)) = tcx_hir.find(var_node_id) { if let hir::PatKind::Binding(_, _, ident, _) = pat.node { decl.debug_name = ident.name; - if let Some(&bm) = hir.tables.pat_binding_modes().get(pat.hir_id) { if bm == ty::BindByValue(hir::MutMutable) { decl.mutability = Mutability::Mut; @@ -688,8 +663,8 @@ fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>, } } decl - }).collect() - }); + }) + .collect(); let mut builder = Builder::new(hir, span, @@ -699,7 +674,6 @@ fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>, return_ty_span, upvar_decls); - let fn_def_id = tcx.hir().local_def_id(fn_id); let call_site_scope = region::Scope { id: body.value.hir_id.local_id, data: region::ScopeData::CallSite @@ -742,9 +716,8 @@ fn construct_fn<'a, 'gcx, 'tcx, A>(hir: Cx<'a, 'gcx, 'tcx>, // RustCall pseudo-ABI untuples the last argument. spread_arg = Some(Local::new(arguments.len())); } - let closure_expr_id = tcx.hir().local_def_id(fn_id); - info!("fn_id {:?} has attrs {:?}", closure_expr_id, - tcx.get_attrs(closure_expr_id)); + info!("fn_id {:?} has attrs {:?}", fn_def_id, + tcx.get_attrs(fn_def_id)); let mut mir = builder.finish(yield_ty); mir.spread_arg = spread_arg; @@ -765,7 +738,7 @@ fn construct_const<'a, 'gcx, 'tcx>( let mut block = START_BLOCK; let expr = builder.hir.mirror(ast_expr); - unpack!(block = builder.into_expr(&Place::Local(RETURN_PLACE), block, expr)); + unpack!(block = builder.into_expr(&Place::RETURN_PLACE, block, expr)); let source_info = builder.source_info(span); builder.cfg.terminate(block, source_info, TerminatorKind::Return); @@ -823,6 +796,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { LocalDecl::new_return_place(return_ty, return_span), 1, ), + canonical_user_type_annotations: IndexVec::new(), upvar_decls, var_indices: Default::default(), unit_temp: None, @@ -849,15 +823,18 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } } - Mir::new(self.cfg.basic_blocks, - self.source_scopes, - ClearCrossCrate::Set(self.source_scope_local_data), - IndexVec::new(), - yield_ty, - self.local_decls, - self.arg_count, - self.upvar_decls, - self.fn_span + Mir::new( + self.cfg.basic_blocks, + self.source_scopes, + ClearCrossCrate::Set(self.source_scope_local_data), + IndexVec::new(), + yield_ty, + self.local_decls, + self.canonical_user_type_annotations, + self.arg_count, + self.upvar_decls, + self.fn_span, + self.hir.control_flow_destroyed(), ) } @@ -907,9 +884,16 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { for (index, arg_info) in arguments.iter().enumerate() { // Function arguments always get the first Local indices after the return place let local = Local::new(index + 1); - let place = Place::Local(local); + let place = Place::Base(PlaceBase::Local(local)); let &ArgInfo(ty, opt_ty_info, pattern, ref self_binding) = arg_info; + // Make sure we drop (parts of) the argument even when not matched on. + self.schedule_drop( + pattern.as_ref().map_or(ast_body.span, |pat| pat.span), + argument_scope, &place, ty, + DropKind::Value { cached_block: CachedBlock::default() }, + ); + if let Some(pattern) = pattern { let pattern = self.hir.pattern_from_hir(pattern); let span = pattern.span; @@ -934,20 +918,13 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } _ => { scope = self.declare_bindings(scope, ast_body.span, - LintLevel::Inherited, &[pattern.clone()], + LintLevel::Inherited, &pattern, matches::ArmHasGuard(false), Some((Some(&place), span))); unpack!(block = self.place_into_pattern(block, pattern, &place, false)); } } } - - // Make sure we drop (parts of) the argument even when not matched on. - self.schedule_drop( - pattern.as_ref().map_or(ast_body.span, |pat| pat.span), - argument_scope, &place, ty, - DropKind::Value { cached_block: CachedBlock::default() }, - ); } // Enter the argument pattern bindings source scope, if it exists. @@ -956,7 +933,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { } let body = self.hir.mirror(ast_body); - self.into(&Place::Local(RETURN_PLACE), block, body) + self.into(&Place::RETURN_PLACE, block, body) } fn get_unit_temp(&mut self) -> Place<'tcx> { diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index 8c948766314e8..4aa463b37ab77 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /*! Managing the scope stack. The scopes are tied to lexical scopes, so as we descend the HAIR, we push a scope on the stack, build its @@ -87,12 +77,11 @@ should go to. */ -use build::{BlockAnd, BlockAndExtension, Builder, CFG}; -use hair::LintLevel; +use crate::build::{BlockAnd, BlockAndExtension, Builder, CFG}; +use crate::hair::LintLevel; use rustc::middle::region; use rustc::ty::Ty; use rustc::hir; -use rustc::hir::def_id::LOCAL_CRATE; use rustc::mir::*; use syntax_pos::{Span}; use rustc_data_structures::fx::FxHashMap; @@ -220,7 +209,7 @@ impl DropKind { } impl<'tcx> Scope<'tcx> { - /// Invalidate all the cached blocks in the scope. + /// Invalidates all the cached blocks in the scope. /// /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a /// larger extent of code. @@ -318,23 +307,26 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { debug!("in_scope(region_scope={:?}, block={:?})", region_scope, block); let source_scope = self.source_scope; let tcx = self.hir.tcx(); - if let LintLevel::Explicit(node_id) = lint_level { - let same_lint_scopes = tcx.dep_graph.with_ignore(|| { - let sets = tcx.lint_levels(LOCAL_CRATE); - let parent_hir_id = - tcx.hir().definitions().node_to_hir_id( - self.source_scope_local_data[source_scope].lint_root - ); - let current_hir_id = - tcx.hir().definitions().node_to_hir_id(node_id); - sets.lint_level_set(parent_hir_id) == - sets.lint_level_set(current_hir_id) - }); - - if !same_lint_scopes { - self.source_scope = - self.new_source_scope(region_scope.1.span, lint_level, - None); + if let LintLevel::Explicit(current_hir_id) = lint_level { + // Use `maybe_lint_level_root_bounded` with `root_lint_level` as a bound + // to avoid adding Hir dependences on our parents. + // We estimate the true lint roots here to avoid creating a lot of source scopes. + + let parent_root = tcx.maybe_lint_level_root_bounded( + self.source_scope_local_data[source_scope].lint_root, + self.hir.root_lint_level, + ); + let current_root = tcx.maybe_lint_level_root_bounded( + current_hir_id, + self.hir.root_lint_level + ); + + if parent_root != current_root { + self.source_scope = self.new_source_scope( + region_scope.1.span, + LintLevel::Explicit(current_root), + None + ); } } self.push_scope(region_scope); @@ -400,7 +392,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { /// Branch out of `block` to `target`, exiting all scopes up to - /// and including `region_scope`. This will insert whatever drops are + /// and including `region_scope`. This will insert whatever drops are /// needed. See module comment for details. pub fn exit_scope(&mut self, span: Span, @@ -623,6 +615,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { hir::BodyOwnerKind::Static(_) => // No need to free storage in this context. None, + hir::BodyOwnerKind::Closure | hir::BodyOwnerKind::Fn => Some(self.topmost_scope()), } @@ -677,7 +670,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { DropKind::Value { .. } => if !needs_drop { return }, DropKind::Storage => { match *place { - Place::Local(index) => if index.index() <= self.arg_count { + Place::Base(PlaceBase::Local(index)) => if index.index() <= self.arg_count { span_bug!( span, "`schedule_drop` called with index {} and arg_count {}", index.index(), @@ -855,7 +848,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> { next_target.unit() } - /// Create an Assert terminator and return the success block. + /// Creates an Assert terminator and return the success block. /// If the boolean condition operand is not the expected value, /// a runtime panic will be caused with the given message. pub fn assert(&mut self, block: BasicBlock, @@ -951,7 +944,7 @@ fn build_scope_drops<'tcx>( // Drop the storage for both value and storage drops. // Only temps and vars need their storage dead. match drop_data.location { - Place::Local(index) if index.index() > arg_count => { + Place::Base(PlaceBase::Local(index)) if index.index() > arg_count => { cfg.push(block, Statement { source_info, kind: StatementKind::StorageDead(index) diff --git a/src/librustc_mir/const_eval.rs b/src/librustc_mir/const_eval.rs index 248c5d2db4917..4c8ab361e04ea 100644 --- a/src/librustc_mir/const_eval.rs +++ b/src/librustc_mir/const_eval.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // Not in interpret to make sure we do not use private implementation details use std::fmt; @@ -20,20 +10,20 @@ use rustc::hir::{self, def_id::DefId}; use rustc::hir::def::Def; use rustc::mir::interpret::{ConstEvalErr, ErrorHandled}; use rustc::mir; -use rustc::ty::{self, TyCtxt, Instance, query::TyCtxtAt}; -use rustc::ty::layout::{self, LayoutOf, TyLayout, VariantIdx}; +use rustc::ty::{self, TyCtxt, query::TyCtxtAt}; +use rustc::ty::layout::{self, LayoutOf, VariantIdx}; use rustc::ty::subst::Subst; use rustc::traits::Reveal; -use rustc_data_structures::indexed_vec::IndexVec; -use rustc_data_structures::fx::FxHashMap; use rustc::util::common::ErrorReported; +use rustc_data_structures::fx::FxHashMap; use syntax::ast::Mutability; use syntax::source_map::{Span, DUMMY_SP}; use crate::interpret::{self, - PlaceTy, MPlaceTy, MemPlace, OpTy, Operand, Immediate, Scalar, RawConst, ConstValue, Pointer, - EvalResult, EvalError, EvalErrorKind, GlobalId, EvalContext, StackPopCleanup, + PlaceTy, MPlaceTy, MemPlace, OpTy, ImmTy, Immediate, Scalar, Pointer, + RawConst, ConstValue, + EvalResult, EvalError, InterpError, GlobalId, InterpretCx, StackPopCleanup, Allocation, AllocId, MemoryKind, snapshot, RefTracking, }; @@ -45,49 +35,20 @@ const STEPS_UNTIL_DETECTOR_ENABLED: isize = 1_000_000; /// Should be a power of two for performance reasons. const DETECTOR_SNAPSHOT_PERIOD: isize = 256; -pub fn mk_borrowck_eval_cx<'a, 'mir, 'tcx>( +/// The `InterpretCx` is only meant to be used to do field and index projections into constants for +/// `simd_shuffle` and const patterns in match arms. +/// +/// The function containing the `match` that is currently being analyzed may have generic bounds +/// that inform us about the generic bounds of the constant. E.g., using an associated constant +/// of a function's generic parameter will require knowledge about the bounds on the generic +/// parameter. These bounds are passed to `mk_eval_cx` via the `ParamEnv` argument. +pub(crate) fn mk_eval_cx<'a, 'mir, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, - instance: Instance<'tcx>, - mir: &'mir mir::Mir<'tcx>, span: Span, -) -> EvalResult<'tcx, CompileTimeEvalContext<'a, 'mir, 'tcx>> { - debug!("mk_borrowck_eval_cx: {:?}", instance); - let param_env = tcx.param_env(instance.def_id()); - let mut ecx = EvalContext::new(tcx.at(span), param_env, CompileTimeInterpreter::new()); - // insert a stack frame so any queries have the correct substs - // cannot use `push_stack_frame`; if we do `const_prop` explodes - ecx.stack.push(interpret::Frame { - block: mir::START_BLOCK, - locals: IndexVec::new(), - instance, - span, - mir, - return_place: None, - return_to_block: StackPopCleanup::Goto(None), // never pop - stmt: 0, - extra: (), - }); - Ok(ecx) -} - -pub fn mk_eval_cx<'a, 'tcx>( - tcx: TyCtxt<'a, 'tcx, 'tcx>, - instance: Instance<'tcx>, param_env: ty::ParamEnv<'tcx>, -) -> EvalResult<'tcx, CompileTimeEvalContext<'a, 'tcx, 'tcx>> { - debug!("mk_eval_cx: {:?}, {:?}", instance, param_env); - let span = tcx.def_span(instance.def_id()); - let mut ecx = EvalContext::new(tcx.at(span), param_env, CompileTimeInterpreter::new()); - let mir = ecx.load_mir(instance.def)?; - // insert a stack frame so any queries have the correct substs - ecx.push_stack_frame( - instance, - mir.span, - mir, - None, - StackPopCleanup::Goto(None), // never pop - )?; - Ok(ecx) +) -> CompileTimeEvalContext<'a, 'mir, 'tcx> { + debug!("mk_eval_cx: {:?}", param_env); + InterpretCx::new(tcx.at(span), param_env, CompileTimeInterpreter::new()) } pub(crate) fn eval_promoted<'a, 'mir, 'tcx>( @@ -96,62 +57,54 @@ pub(crate) fn eval_promoted<'a, 'mir, 'tcx>( mir: &'mir mir::Mir<'tcx>, param_env: ty::ParamEnv<'tcx>, ) -> EvalResult<'tcx, MPlaceTy<'tcx>> { - let mut ecx = mk_borrowck_eval_cx(tcx, cid.instance, mir, DUMMY_SP).unwrap(); + let span = tcx.def_span(cid.instance.def_id()); + let mut ecx = mk_eval_cx(tcx, span, param_env); eval_body_using_ecx(&mut ecx, cid, Some(mir), param_env) } -// FIXME: These two conversion functions are bad hacks. We should just always use allocations. -pub fn op_to_const<'tcx>( +fn mplace_to_const<'tcx>( + ecx: &CompileTimeEvalContext<'_, '_, 'tcx>, + mplace: MPlaceTy<'tcx>, +) -> ty::Const<'tcx> { + let MemPlace { ptr, align, meta } = *mplace; + // extract alloc-offset pair + assert!(meta.is_none()); + let ptr = ptr.to_ptr().unwrap(); + let alloc = ecx.memory.get(ptr.alloc_id).unwrap(); + assert!(alloc.align >= align); + assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= mplace.layout.size.bytes()); + let mut alloc = alloc.clone(); + alloc.align = align; + // FIXME shouldn't it be the case that `mark_static_initialized` has already + // interned this? I thought that is the entire point of that `FinishStatic` stuff? + let alloc = ecx.tcx.intern_const_alloc(alloc); + let val = ConstValue::ByRef(ptr, alloc); + ty::Const { val, ty: mplace.layout.ty } +} + +fn op_to_const<'tcx>( ecx: &CompileTimeEvalContext<'_, '_, 'tcx>, op: OpTy<'tcx>, - may_normalize: bool, -) -> EvalResult<'tcx, &'tcx ty::Const<'tcx>> { - // We do not normalize just any data. Only scalar layout and fat pointers. - let normalize = may_normalize - && match op.layout.abi { - layout::Abi::Scalar(..) => true, - layout::Abi::ScalarPair(..) => { - // Must be a fat pointer - op.layout.ty.builtin_deref(true).is_some() - }, - _ => false, - }; +) -> ty::Const<'tcx> { + // We do not normalize just any data. Only non-union scalars and slices. + let normalize = match op.layout.abi { + layout::Abi::Scalar(..) => op.layout.ty.ty_adt_def().map_or(true, |adt| !adt.is_union()), + layout::Abi::ScalarPair(..) => op.layout.ty.is_slice(), + _ => false, + }; let normalized_op = if normalize { - ecx.try_read_immediate(op)? + Err(*ecx.read_immediate(op).expect("normalization works on validated constants")) } else { - match op.op { - Operand::Indirect(mplace) => Err(mplace), - Operand::Immediate(val) => Ok(val) - } + op.try_as_mplace() }; let val = match normalized_op { - Err(MemPlace { ptr, align, meta }) => { - // extract alloc-offset pair - assert!(meta.is_none()); - let ptr = ptr.to_ptr()?; - let alloc = ecx.memory.get(ptr.alloc_id)?; - assert!(alloc.align >= align); - assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= op.layout.size.bytes()); - let mut alloc = alloc.clone(); - alloc.align = align; - // FIXME shouldn't it be the case that `mark_static_initialized` has already - // interned this? I thought that is the entire point of that `FinishStatic` stuff? - let alloc = ecx.tcx.intern_const_alloc(alloc); - ConstValue::ByRef(ptr.alloc_id, alloc, ptr.offset) - }, - Ok(Immediate::Scalar(x)) => - ConstValue::Scalar(x.not_undef()?), - Ok(Immediate::ScalarPair(a, b)) => - ConstValue::ScalarPair(a.not_undef()?, b.not_undef()?), + Ok(mplace) => return mplace_to_const(ecx, mplace), + Err(Immediate::Scalar(x)) => + ConstValue::Scalar(x.not_undef().unwrap()), + Err(Immediate::ScalarPair(a, b)) => + ConstValue::Slice(a.not_undef().unwrap(), b.to_usize(ecx).unwrap()), }; - Ok(ty::Const::from_const_value(ecx.tcx.tcx, val, op.layout.ty)) -} -pub fn const_to_op<'tcx>( - ecx: &CompileTimeEvalContext<'_, '_, 'tcx>, - cnst: &ty::Const<'tcx>, -) -> EvalResult<'tcx, OpTy<'tcx>> { - let op = ecx.const_value_to_op(cnst.val)?; - Ok(OpTy { op, layout: ecx.layout_of(cnst.ty)? }) + ty::Const { val, ty: op.layout.ty } } fn eval_body_and_ecx<'a, 'mir, 'tcx>( @@ -164,7 +117,7 @@ fn eval_body_and_ecx<'a, 'mir, 'tcx>( // and try improving it down the road when more information is available let span = tcx.def_span(cid.instance.def_id()); let span = mir.map(|mir| mir.span).unwrap_or(span); - let mut ecx = EvalContext::new(tcx.at(span), param_env, CompileTimeInterpreter::new()); + let mut ecx = InterpretCx::new(tcx.at(span), param_env, CompileTimeInterpreter::new()); let r = eval_body_using_ecx(&mut ecx, cid, mir, param_env); (r, ecx) } @@ -187,9 +140,9 @@ fn eval_body_using_ecx<'mir, 'tcx>( } let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?; assert!(!layout.is_unsized()); - let ret = ecx.allocate(layout, MemoryKind::Stack)?; + let ret = ecx.allocate(layout, MemoryKind::Stack); - let name = ty::tls::with(|tcx| tcx.item_path_str(cid.instance.def_id())); + let name = ty::tls::with(|tcx| tcx.def_path_str(cid.instance.def_id())); let prom = cid.promoted.map_or(String::new(), |p| format!("::promoted[{:?}]", p)); trace!("eval_body_using_ecx: pushing stack frame for global: {}{}", name, prom); assert!(mir.arg_count == 0); @@ -220,7 +173,7 @@ fn eval_body_using_ecx<'mir, 'tcx>( impl<'tcx> Into> for ConstEvalError { fn into(self) -> EvalError<'tcx> { - EvalErrorKind::MachineError(self.to_string()).into() + InterpError::MachineError(self.to_string()).into() } } @@ -230,7 +183,7 @@ enum ConstEvalError { } impl fmt::Display for ConstEvalError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { use self::ConstEvalError::*; match *self { NeedsRfc(ref msg) => { @@ -340,7 +293,7 @@ impl interpret::AllocMap for FxHashMap { } type CompileTimeEvalContext<'a, 'mir, 'tcx> = - EvalContext<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>; + InterpretCx<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>; impl interpret::MayLeak for ! { #[inline(always)] @@ -365,37 +318,40 @@ impl<'a, 'mir, 'tcx> interpret::Machine<'a, 'mir, 'tcx> const STATIC_KIND: Option = None; // no copying of statics allowed #[inline(always)] - fn enforce_validity(_ecx: &EvalContext<'a, 'mir, 'tcx, Self>) -> bool { + fn enforce_validity(_ecx: &InterpretCx<'a, 'mir, 'tcx, Self>) -> bool { false // for now, we don't enforce validity } fn find_fn( - ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx>], dest: Option>, ret: Option, ) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>> { debug!("eval_fn_call: {:?}", instance); - // Execution might have wandered off into other crates, so we cannot to a stability- - // sensitive check here. But we can at least rule out functions that are not const - // at all. - if !ecx.tcx.is_const_fn_raw(instance.def_id()) { - // Some functions we support even if they are non-const -- but avoid testing - // that for const fn! We certainly do *not* want to actually call the fn - // though, so be sure we return here. - return if ecx.hook_fn(instance, args, dest)? { - ecx.goto_block(ret)?; // fully evaluated and done - Ok(None) - } else { - err!(MachineError(format!("calling non-const function `{}`", instance))) - }; + // Only check non-glue functions + if let ty::InstanceDef::Item(def_id) = instance.def { + // Execution might have wandered off into other crates, so we cannot to a stability- + // sensitive check here. But we can at least rule out functions that are not const + // at all. + if !ecx.tcx.is_const_fn_raw(def_id) { + // Some functions we support even if they are non-const -- but avoid testing + // that for const fn! We certainly do *not* want to actually call the fn + // though, so be sure we return here. + return if ecx.hook_fn(instance, args, dest)? { + ecx.goto_block(ret)?; // fully evaluated and done + Ok(None) + } else { + err!(MachineError(format!("calling non-const function `{}`", instance))) + }; + } } // This is a const fn. Call it. Ok(Some(match ecx.load_mir(instance.def) { Ok(mir) => mir, Err(err) => { - if let EvalErrorKind::NoMirFor(ref path) = err.kind { + if let InterpError::NoMirFor(ref path) = err.kind { return Err( ConstEvalError::NeedsRfc(format!("calling extern function `{}`", path)) .into(), @@ -407,7 +363,7 @@ impl<'a, 'mir, 'tcx> interpret::Machine<'a, 'mir, 'tcx> } fn call_intrinsic( - ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx>], dest: PlaceTy<'tcx>, @@ -423,12 +379,10 @@ impl<'a, 'mir, 'tcx> interpret::Machine<'a, 'mir, 'tcx> } fn ptr_op( - _ecx: &EvalContext<'a, 'mir, 'tcx, Self>, + _ecx: &InterpretCx<'a, 'mir, 'tcx, Self>, _bin_op: mir::BinOp, - _left: Scalar, - _left_layout: TyLayout<'tcx>, - _right: Scalar, - _right_layout: TyLayout<'tcx>, + _left: ImmTy<'tcx>, + _right: ImmTy<'tcx>, ) -> EvalResult<'tcx, (Scalar, bool)> { Err( ConstEvalError::NeedsRfc("pointer arithmetic or comparison".to_string()).into(), @@ -453,7 +407,7 @@ impl<'a, 'mir, 'tcx> interpret::Machine<'a, 'mir, 'tcx> } fn box_alloc( - _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + _ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>, _dest: PlaceTy<'tcx>, ) -> EvalResult<'tcx> { Err( @@ -461,7 +415,7 @@ impl<'a, 'mir, 'tcx> interpret::Machine<'a, 'mir, 'tcx> ) } - fn before_terminator(ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>) -> EvalResult<'tcx> { + fn before_terminator(ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>) -> EvalResult<'tcx> { { let steps = &mut ecx.machine.steps_since_detector_enabled; @@ -487,76 +441,71 @@ impl<'a, 'mir, 'tcx> interpret::Machine<'a, 'mir, 'tcx> #[inline(always)] fn tag_new_allocation( - _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + _ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>, ptr: Pointer, _kind: MemoryKind, - ) -> EvalResult<'tcx, Pointer> { - Ok(ptr) + ) -> Pointer { + ptr } #[inline(always)] fn stack_push( - _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + _ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>, ) -> EvalResult<'tcx> { Ok(()) } - /// Called immediately before a stack frame gets popped + /// Called immediately before a stack frame gets popped. #[inline(always)] fn stack_pop( - _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + _ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>, _extra: (), ) -> EvalResult<'tcx> { Ok(()) } } -/// Project to a field of a (variant of a) const +/// Projects to a field of a (variant of a) const. +// this function uses `unwrap` copiously, because an already validated constant must have valid +// fields and can thus never fail outside of compiler bugs pub fn const_field<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, - instance: ty::Instance<'tcx>, variant: Option, field: mir::Field, - value: &'tcx ty::Const<'tcx>, -) -> ::rustc::mir::interpret::ConstEvalResult<'tcx> { - trace!("const_field: {:?}, {:?}, {:?}", instance, field, value); - let ecx = mk_eval_cx(tcx, instance, param_env).unwrap(); - let result = (|| { - // get the operand again - let op = const_to_op(&ecx, value)?; - // downcast - let down = match variant { - None => op, - Some(variant) => ecx.operand_downcast(op, variant)? - }; - // then project - let field = ecx.operand_field(down, field.index() as u64)?; - // and finally move back to the const world, always normalizing because - // this is not called for statics. - op_to_const(&ecx, field, true) - })(); - result.map_err(|error| { - let err = error_to_const_error(&ecx, error); - err.report_as_error(ecx.tcx, "could not access field of constant"); - ErrorHandled::Reported - }) + value: ty::Const<'tcx>, +) -> ty::Const<'tcx> { + trace!("const_field: {:?}, {:?}", field, value); + let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env); + // get the operand again + let op = ecx.eval_const_to_op(value, None).unwrap(); + // downcast + let down = match variant { + None => op, + Some(variant) => ecx.operand_downcast(op, variant).unwrap(), + }; + // then project + let field = ecx.operand_field(down, field.index() as u64).unwrap(); + // and finally move back to the const world, always normalizing because + // this is not called for statics. + op_to_const(&ecx, field) } +// this function uses `unwrap` copiously, because an already validated constant must have valid +// fields and can thus never fail outside of compiler bugs pub fn const_variant_index<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, - instance: ty::Instance<'tcx>, - val: &'tcx ty::Const<'tcx>, -) -> EvalResult<'tcx, VariantIdx> { - trace!("const_variant_index: {:?}, {:?}", instance, val); - let ecx = mk_eval_cx(tcx, instance, param_env).unwrap(); - let op = const_to_op(&ecx, val)?; - Ok(ecx.read_discriminant(op)?.1) + val: ty::Const<'tcx>, +) -> VariantIdx { + trace!("const_variant_index: {:?}", val); + let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env); + let op = ecx.eval_const_to_op(val, None).unwrap(); + ecx.read_discriminant(op).unwrap().1 } pub fn error_to_const_error<'a, 'mir, 'tcx>( - ecx: &EvalContext<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>, + ecx: &InterpretCx<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>, mut error: EvalError<'tcx> ) -> ConstEvalErr<'tcx> { error.print_backtrace(); @@ -570,24 +519,25 @@ fn validate_and_turn_into_const<'a, 'tcx>( key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>, ) -> ::rustc::mir::interpret::ConstEvalResult<'tcx> { let cid = key.value; - let ecx = mk_eval_cx(tcx, cid.instance, key.param_env).unwrap(); + let ecx = mk_eval_cx(tcx, tcx.def_span(key.value.instance.def_id()), key.param_env); let val = (|| { - let op = ecx.raw_const_to_mplace(constant)?.into(); - // FIXME: Once the visitor infrastructure landed, change validation to - // work directly on `MPlaceTy`. - let mut ref_tracking = RefTracking::new(op); - while let Some((op, path)) = ref_tracking.todo.pop() { + let mplace = ecx.raw_const_to_mplace(constant)?; + let mut ref_tracking = RefTracking::new(mplace); + while let Some((mplace, path)) = ref_tracking.todo.pop() { ecx.validate_operand( - op, + mplace.into(), path, Some(&mut ref_tracking), - /* const_mode */ true, + true, // const mode )?; } - // Now that we validated, turn this into a proper constant + // Now that we validated, turn this into a proper constant. let def_id = cid.instance.def.def_id(); - let normalize = tcx.is_static(def_id).is_none() && cid.promoted.is_none(); - op_to_const(&ecx, op, normalize) + if tcx.is_static(def_id).is_some() || cid.promoted.is_some() { + Ok(mplace_to_const(&ecx, mplace)) + } else { + Ok(op_to_const(&ecx, mplace.into())) + } })(); val.map_err(|error| { @@ -652,35 +602,22 @@ pub fn const_eval_raw_provider<'a, 'tcx>( other => return other, } } - // the first trace is for replicating an ice - // There's no tracking issue, but the next two lines concatenated link to the discussion on - // zulip. It's not really possible to test this, because it doesn't show up in diagnostics - // or MIR. - // https://rust-lang.zulipchat.com/#narrow/stream/146212-t-compiler.2Fconst-eval/ - // subject/anon_const_instance_printing/near/135980032 - trace!("const eval: {}", key.value.instance); - trace!("const eval: {:?}", key); + if cfg!(debug_assertions) { + // Make sure we format the instance even if we do not print it. + // This serves as a regression test against an ICE on printing. + // The next two lines concatenated contain some discussion: + // https://rust-lang.zulipchat.com/#narrow/stream/146212-t-compiler.2Fconst-eval/ + // subject/anon_const_instance_printing/near/135980032 + let instance = key.value.instance.to_string(); + trace!("const eval: {:?} ({})", key, instance); + } let cid = key.value; let def_id = cid.instance.def.def_id(); - if let Some(id) = tcx.hir().as_local_node_id(def_id) { - let tables = tcx.typeck_tables_of(def_id); - - // Do match-check before building MIR - if let Err(ErrorReported) = tcx.check_match(def_id) { - return Err(ErrorHandled::Reported) - } - - if let hir::BodyOwnerKind::Const = tcx.hir().body_owner_kind(id) { - tcx.mir_const_qualif(def_id); - } - - // Do not continue into miri if typeck errors occurred; it will fail horribly - if tables.tainted_by_errors { - return Err(ErrorHandled::Reported) - } - }; + if def_id.is_local() && tcx.typeck_tables_of(def_id).tainted_by_errors { + return Err(ErrorHandled::Reported); + } let (res, ecx) = eval_body_and_ecx(tcx, cid, None, key.param_env); res.and_then(|place| { @@ -692,12 +629,21 @@ pub fn const_eval_raw_provider<'a, 'tcx>( let err = error_to_const_error(&ecx, error); // errors in statics are always emitted as fatal errors if tcx.is_static(def_id).is_some() { - let err = err.report_as_error(ecx.tcx, "could not evaluate static initializer"); - // check that a static never produces `TooGeneric` - if tcx.sess.err_count() == 0 { - span_bug!(ecx.tcx.span, "static eval failure didn't emit an error: {:#?}", err); + // Ensure that if the above error was either `TooGeneric` or `Reported` + // an error must be reported. + let reported_err = tcx.sess.track_errors(|| { + err.report_as_error(ecx.tcx, + "could not evaluate static initializer") + }); + match reported_err { + Ok(v) => { + tcx.sess.delay_span_bug(err.span, + &format!("static eval failure did not emit an error: {:#?}", + v)); + v + }, + Err(ErrorReported) => ErrorHandled::Reported, } - err } else if def_id.is_local() { // constant defined in this crate, we can figure out a lint level! match tcx.describe_def(def_id) { @@ -708,18 +654,19 @@ pub fn const_eval_raw_provider<'a, 'tcx>( // because any code that existed before validation could not have failed validation // thus preventing such a hard error from being a backwards compatibility hazard Some(Def::Const(_)) | Some(Def::AssociatedConst(_)) => { - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); err.report_as_lint( tcx.at(tcx.def_span(def_id)), "any use of this value will cause an error", - node_id, + hir_id, + Some(err.span), ) }, // promoting runtime code is only allowed to error if it references broken constants // any other kind of error will be reported to the user as a deny-by-default lint _ => if let Some(p) = cid.promoted { let span = tcx.optimized_mir(def_id).promoted[p].span; - if let EvalErrorKind::ReferencedConstant = err.error { + if let InterpError::ReferencedConstant = err.error { err.report_as_error( tcx.at(span), "evaluation of constant expression failed", @@ -728,7 +675,8 @@ pub fn const_eval_raw_provider<'a, 'tcx>( err.report_as_lint( tcx.at(span), "reaching this expression at runtime will panic or abort", - tcx.hir().as_local_node_id(def_id).unwrap(), + tcx.hir().as_local_hir_id(def_id).unwrap(), + Some(err.span), ) } // anything else (array lengths, enum initializers, constant patterns) are reported diff --git a/src/librustc_mir/dataflow/at_location.rs b/src/librustc_mir/dataflow/at_location.rs index 52eae58152874..d43fa4257e06c 100644 --- a/src/librustc_mir/dataflow/at_location.rs +++ b/src/librustc_mir/dataflow/at_location.rs @@ -1,21 +1,11 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A nice wrapper to consume dataflow results at several CFG //! locations. use rustc::mir::{BasicBlock, Location}; use rustc_data_structures::bit_set::{BitIter, BitSet, HybridBitSet}; -use dataflow::{BitDenotation, BlockSets, DataflowResults}; -use dataflow::move_paths::{HasMoveData, MovePathIndex}; +use crate::dataflow::{BitDenotation, BlockSets, DataflowResults}; +use crate::dataflow::move_paths::{HasMoveData, MovePathIndex}; use std::iter; @@ -36,14 +26,14 @@ pub trait FlowsAtLocation { /// effects don't apply to the unwind edge). fn reset_to_exit_of(&mut self, bb: BasicBlock); - /// Build gen + kill sets for statement at `loc`. + /// Builds gen and kill sets for statement at `loc`. /// /// Note that invoking this method alone does not change the /// `curr_state` -- you must invoke `apply_local_effect` /// afterwards. fn reconstruct_statement_effect(&mut self, loc: Location); - /// Build gen + kill sets for terminator for `loc`. + /// Builds gen and kill sets for terminator for `loc`. /// /// Note that invoking this method alone does not change the /// `curr_state` -- you must invoke `apply_local_effect` @@ -70,19 +60,19 @@ pub trait FlowsAtLocation { /// (e.g., via `reconstruct_statement_effect` and /// `reconstruct_terminator_effect`; don't forget to call /// `apply_local_effect`). -pub struct FlowAtLocation +pub struct FlowAtLocation<'tcx, BD> where - BD: BitDenotation, + BD: BitDenotation<'tcx>, { - base_results: DataflowResults, + base_results: DataflowResults<'tcx, BD>, curr_state: BitSet, stmt_gen: HybridBitSet, stmt_kill: HybridBitSet, } -impl FlowAtLocation +impl<'tcx, BD> FlowAtLocation<'tcx, BD> where - BD: BitDenotation, + BD: BitDenotation<'tcx>, { /// Iterate over each bit set in the current state. pub fn each_state_bit(&self, f: F) @@ -102,7 +92,7 @@ where self.stmt_gen.iter().for_each(f) } - pub fn new(results: DataflowResults) -> Self { + pub fn new(results: DataflowResults<'tcx, BD>) -> Self { let bits_per_block = results.sets().bits_per_block(); let curr_state = BitSet::new_empty(bits_per_block); let stmt_gen = HybridBitSet::new_empty(bits_per_block); @@ -125,7 +115,7 @@ where } /// Returns an iterator over the elements present in the current state. - pub fn iter_incoming(&self) -> iter::Peekable> { + pub fn iter_incoming(&self) -> iter::Peekable> { self.curr_state.iter().peekable() } @@ -134,7 +124,7 @@ where /// Invokes `f` with an iterator over the resulting state. pub fn with_iter_outgoing(&self, f: F) where - F: FnOnce(BitIter), + F: FnOnce(BitIter<'_, BD::Idx>), { let mut curr_state = self.curr_state.clone(); curr_state.union(&self.stmt_gen); @@ -143,8 +133,8 @@ where } } -impl FlowsAtLocation for FlowAtLocation - where BD: BitDenotation +impl<'tcx, BD> FlowsAtLocation for FlowAtLocation<'tcx, BD> + where BD: BitDenotation<'tcx> { fn reset_to_entry_of(&mut self, bb: BasicBlock) { self.curr_state.overwrite(self.base_results.sets().on_entry_set_for(bb.index())); @@ -213,9 +203,9 @@ impl FlowsAtLocation for FlowAtLocation } -impl<'tcx, T> FlowAtLocation +impl<'tcx, T> FlowAtLocation<'tcx, T> where - T: HasMoveData<'tcx> + BitDenotation, + T: HasMoveData<'tcx> + BitDenotation<'tcx, Idx = MovePathIndex>, { pub fn has_any_child_of(&self, mpi: T::Idx) -> Option { // We process `mpi` before the loop below, for two reasons: diff --git a/src/librustc_mir/dataflow/drop_flag_effects.rs b/src/librustc_mir/dataflow/drop_flag_effects.rs index 7af1daae4cad5..aae4590a387a7 100644 --- a/src/librustc_mir/dataflow/drop_flag_effects.rs +++ b/src/librustc_mir/dataflow/drop_flag_effects.rs @@ -1,16 +1,6 @@ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::mir::{self, Mir, Location}; use rustc::ty::{self, TyCtxt}; -use util::elaborate_drops::DropFlagState; +use crate::util::elaborate_drops::DropFlagState; use super::{MoveDataParamEnv}; use super::indexes::MovePathIndex; @@ -54,12 +44,12 @@ pub fn move_path_children_matching<'tcx, F>(move_data: &MoveData<'tcx>, /// In both cases, the contents can only be accessed if and only if /// their parents are initialized. This implies for example that there /// is no need to maintain separate drop flags to track such state. -/// -/// FIXME: we have to do something for moving slice patterns. +// +// FIXME: we have to do something for moving slice patterns. fn place_contents_drop_state_cannot_differ<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, mir: &Mir<'tcx>, place: &mir::Place<'tcx>) -> bool { - let ty = place.ty(mir, tcx).to_ty(tcx); + let ty = place.ty(mir, tcx).ty; match ty.sty { ty::Array(..) => { debug!("place_contents_drop_state_cannot_differ place: {:?} ty: {:?} => false", @@ -151,7 +141,7 @@ pub(crate) fn on_all_drop_children_bits<'a, 'gcx, 'tcx, F>( { on_all_children_bits(tcx, mir, &ctxt.move_data, path, |child| { let place = &ctxt.move_data.move_paths[path].place; - let ty = place.ty(mir, tcx).to_ty(tcx); + let ty = place.ty(mir, tcx).ty; debug!("on_all_drop_children_bits({:?}, {:?} : {:?})", path, place, ty); let gcx = tcx.global_tcx(); @@ -173,7 +163,7 @@ pub(crate) fn drop_flag_effects_for_function_entry<'a, 'gcx, 'tcx, F>( { let move_data = &ctxt.move_data; for arg in mir.args_iter() { - let place = mir::Place::Local(arg); + let place = mir::Place::Base(mir::PlaceBase::Local(arg)); let lookup_result = move_data.rev_lookup.find(&place); on_lookup_result_bits(tcx, mir, move_data, lookup_result, diff --git a/src/librustc_mir/dataflow/graphviz.rs b/src/librustc_mir/dataflow/graphviz.rs index f6a9d46b5e2cb..d68377681f1ca 100644 --- a/src/librustc_mir/dataflow/graphviz.rs +++ b/src/librustc_mir/dataflow/graphviz.rs @@ -1,43 +1,33 @@ -// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Hook into libgraphviz for rendering dataflow graphs for MIR. -use syntax::ast::NodeId; +use rustc::hir::def_id::DefId; use rustc::mir::{BasicBlock, Mir}; -use dot; - use std::fs; use std::io; use std::marker::PhantomData; use std::path::Path; +use crate::util::graphviz_safe_def_name; + use super::{BitDenotation, DataflowState}; use super::DataflowBuilder; use super::DebugFormatted; pub trait MirWithFlowState<'tcx> { - type BD: BitDenotation; - fn node_id(&self) -> NodeId; + type BD: BitDenotation<'tcx>; + fn def_id(&self) -> DefId; fn mir(&self) -> &Mir<'tcx>; - fn flow_state(&self) -> &DataflowState; + fn flow_state(&self) -> &DataflowState<'tcx, Self::BD>; } impl<'a, 'tcx, BD> MirWithFlowState<'tcx> for DataflowBuilder<'a, 'tcx, BD> - where BD: BitDenotation + where BD: BitDenotation<'tcx> { type BD = BD; - fn node_id(&self) -> NodeId { self.node_id } + fn def_id(&self) -> DefId { self.def_id } fn mir(&self) -> &Mir<'tcx> { self.flow_state.mir() } - fn flow_state(&self) -> &DataflowState { &self.flow_state.flow_state } + fn flow_state(&self) -> &DataflowState<'tcx, Self::BD> { &self.flow_state.flow_state } } struct Graph<'a, 'tcx, MWF:'a, P> where @@ -53,14 +43,14 @@ pub(crate) fn print_borrowck_graph_to<'a, 'tcx, BD, P>( path: &Path, render_idx: P) -> io::Result<()> - where BD: BitDenotation, - P: Fn(&BD, BD::Idx) -> DebugFormatted + where BD: BitDenotation<'tcx>, + P: Fn(&BD, BD::Idx) -> DebugFormatted, { let g = Graph { mbcx, phantom: PhantomData, render_idx }; let mut v = Vec::new(); dot::render(&g, &mut v)?; - debug!("print_borrowck_graph_to path: {} node_id: {}", - path.display(), mbcx.node_id); + debug!("print_borrowck_graph_to path: {} def_id: {:?}", + path.display(), mbcx.def_id); fs::write(path, v) } @@ -69,29 +59,28 @@ pub type Node = BasicBlock; #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub struct Edge { source: BasicBlock, index: usize } -fn outgoing(mir: &Mir, bb: BasicBlock) -> Vec { +fn outgoing(mir: &Mir<'_>, bb: BasicBlock) -> Vec { (0..mir[bb].terminator().successors().count()) .map(|index| Edge { source: bb, index: index}).collect() } impl<'a, 'tcx, MWF, P> dot::Labeller<'a> for Graph<'a, 'tcx, MWF, P> where MWF: MirWithFlowState<'tcx>, - P: Fn(&MWF::BD, ::Idx) -> DebugFormatted, + P: Fn(&MWF::BD, >::Idx) -> DebugFormatted, { type Node = Node; type Edge = Edge; - fn graph_id(&self) -> dot::Id { - dot::Id::new(format!("graph_for_node_{}", - self.mbcx.node_id())) - .unwrap() + fn graph_id(&self) -> dot::Id<'_> { + let name = graphviz_safe_def_name(self.mbcx.def_id()); + dot::Id::new(format!("graph_for_def_id_{}", name)).unwrap() } - fn node_id(&self, n: &Node) -> dot::Id { + fn node_id(&self, n: &Node) -> dot::Id<'_> { dot::Id::new(format!("bb_{}", n.index())) .unwrap() } - fn node_label(&self, n: &Node) -> dot::LabelText { + fn node_label(&self, n: &Node) -> dot::LabelText<'_> { // Node label is something like this: // +---------+----------------------------------+------------------+------------------+ // | ENTRY | MIR | GEN | KILL | @@ -115,7 +104,7 @@ impl<'a, 'tcx, MWF, P> dot::Labeller<'a> for Graph<'a, 'tcx, MWF, P> } - fn node_shape(&self, _n: &Node) -> Option { + fn node_shape(&self, _n: &Node) -> Option> { Some(dot::LabelText::label("none")) } @@ -128,14 +117,14 @@ impl<'a, 'tcx, MWF, P> dot::Labeller<'a> for Graph<'a, 'tcx, MWF, P> impl<'a, 'tcx, MWF, P> Graph<'a, 'tcx, MWF, P> where MWF: MirWithFlowState<'tcx>, - P: Fn(&MWF::BD, ::Idx) -> DebugFormatted, + P: Fn(&MWF::BD, >::Idx) -> DebugFormatted, { /// Generate the node label fn node_label_internal(&self, n: &Node, w: &mut W, block: BasicBlock, - mir: &Mir) -> io::Result<()> { + mir: &Mir<'_>) -> io::Result<()> { // Header rows const HDRS: [&str; 4] = ["ENTRY", "MIR", "BLOCK GENS", "BLOCK KILLS"]; const HDR_FMT: &str = "bgcolor=\"grey\""; @@ -155,12 +144,12 @@ where MWF: MirWithFlowState<'tcx>, Ok(()) } - /// Build the verbose row: full MIR data, and detailed gen/kill/entry sets + /// Builds the verbose row: full MIR data, and detailed gen/kill/entry sets. fn node_label_verbose_row(&self, n: &Node, w: &mut W, block: BasicBlock, - mir: &Mir) + mir: &Mir<'_>) -> io::Result<()> { let i = n.index(); @@ -205,12 +194,12 @@ where MWF: MirWithFlowState<'tcx>, Ok(()) } - /// Build the summary row: terminator, gen/kill/entry bit sets + /// Builds the summary row: terminator, gen/kill/entry bit sets. fn node_label_final_row(&self, n: &Node, w: &mut W, block: BasicBlock, - mir: &Mir) + mir: &Mir<'_>) -> io::Result<()> { let i = n.index(); @@ -251,7 +240,7 @@ impl<'a, 'tcx, MWF, P> dot::GraphWalk<'a> for Graph<'a, 'tcx, MWF, P> { type Node = Node; type Edge = Edge; - fn nodes(&self) -> dot::Nodes { + fn nodes(&self) -> dot::Nodes<'_, Node> { self.mbcx.mir() .basic_blocks() .indices() @@ -259,7 +248,7 @@ impl<'a, 'tcx, MWF, P> dot::GraphWalk<'a> for Graph<'a, 'tcx, MWF, P> .into() } - fn edges(&self) -> dot::Edges { + fn edges(&self) -> dot::Edges<'_, Edge> { let mir = self.mbcx.mir(); mir.basic_blocks() diff --git a/src/librustc_mir/dataflow/impls/borrowed_locals.rs b/src/librustc_mir/dataflow/impls/borrowed_locals.rs index 1e279d8dd9708..9d4600d13ac13 100644 --- a/src/librustc_mir/dataflow/impls/borrowed_locals.rs +++ b/src/librustc_mir/dataflow/impls/borrowed_locals.rs @@ -1,18 +1,8 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - pub use super::*; use rustc::mir::*; use rustc::mir::visit::Visitor; -use dataflow::BitDenotation; +use crate::dataflow::BitDenotation; /// This calculates if any part of a MIR local could have previously been borrowed. /// This means that once a local has been borrowed, its bit will be set @@ -36,7 +26,7 @@ impl<'a, 'tcx: 'a> HaveBeenBorrowedLocals<'a, 'tcx> { } } -impl<'a, 'tcx> BitDenotation for HaveBeenBorrowedLocals<'a, 'tcx> { +impl<'a, 'tcx> BitDenotation<'tcx> for HaveBeenBorrowedLocals<'a, 'tcx> { type Idx = Local; fn name() -> &'static str { "has_been_borrowed_locals" } fn bits_per_block(&self) -> usize { @@ -48,7 +38,7 @@ impl<'a, 'tcx> BitDenotation for HaveBeenBorrowedLocals<'a, 'tcx> { } fn statement_effect(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, Local>, loc: Location) { let stmt = &self.mir[loc.block].statements[loc.statement_index]; @@ -64,18 +54,20 @@ impl<'a, 'tcx> BitDenotation for HaveBeenBorrowedLocals<'a, 'tcx> { } fn terminator_effect(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, Local>, loc: Location) { BorrowedLocalsVisitor { sets, }.visit_terminator(loc.block, self.mir[loc.block].terminator(), loc); } - fn propagate_call_return(&self, - _in_out: &mut BitSet, - _call_bb: mir::BasicBlock, - _dest_bb: mir::BasicBlock, - _dest_place: &mir::Place) { + fn propagate_call_return( + &self, + _in_out: &mut BitSet, + _call_bb: mir::BasicBlock, + _dest_bb: mir::BasicBlock, + _dest_place: &mir::Place<'tcx>, + ) { // Nothing to do when a call returns successfully } } @@ -100,9 +92,8 @@ struct BorrowedLocalsVisitor<'b, 'c: 'b> { fn find_local<'tcx>(place: &Place<'tcx>) -> Option { match *place { - Place::Local(l) => Some(l), - Place::Promoted(_) | - Place::Static(..) => None, + Place::Base(PlaceBase::Local(l)) => Some(l), + Place::Base(PlaceBase::Static(..)) => None, Place::Projection(ref proj) => { match proj.elem { ProjectionElem::Deref => None, diff --git a/src/librustc_mir/dataflow/impls/borrows.rs b/src/librustc_mir/dataflow/impls/borrows.rs index 5e78ef03c2c6b..5a3105ed16049 100644 --- a/src/librustc_mir/dataflow/impls/borrows.rs +++ b/src/librustc_mir/dataflow/impls/borrows.rs @@ -1,18 +1,7 @@ -// Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::borrow_set::{BorrowSet, BorrowData}; -use borrow_check::place_ext::PlaceExt; - -use rustc; -use rustc::mir::{self, Location, Place, Mir}; +use crate::borrow_check::borrow_set::{BorrowSet, BorrowData}; +use crate::borrow_check::place_ext::PlaceExt; + +use rustc::mir::{self, Location, Place, PlaceBase, Mir}; use rustc::ty::TyCtxt; use rustc::ty::RegionVid; @@ -20,10 +9,11 @@ use rustc_data_structures::bit_set::{BitSet, BitSetOperator}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -use dataflow::{BitDenotation, BlockSets, InitialFlow}; -pub use dataflow::indexes::BorrowIndex; -use borrow_check::nll::region_infer::RegionInferenceContext; -use borrow_check::nll::ToRegionVid; +use crate::dataflow::{BitDenotation, BlockSets, InitialFlow}; +pub use crate::dataflow::indexes::BorrowIndex; +use crate::borrow_check::nll::region_infer::RegionInferenceContext; +use crate::borrow_check::nll::ToRegionVid; +use crate::borrow_check::places_conflict; use std::rc::Rc; @@ -173,7 +163,7 @@ impl<'a, 'gcx, 'tcx> Borrows<'a, 'gcx, 'tcx> { /// Add all borrows to the kill set, if those borrows are out of scope at `location`. /// That means they went out of a nonlexical scope fn kill_loans_out_of_scope_at_location(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, BorrowIndex>, location: Location) { // NOTE: The state associated with a given `location` // reflects the dataflow on entry to the statement. @@ -191,17 +181,55 @@ impl<'a, 'gcx, 'tcx> Borrows<'a, 'gcx, 'tcx> { } } - fn kill_borrows_on_local(&self, - sets: &mut BlockSets, - local: &rustc::mir::Local) - { - if let Some(borrow_indexes) = self.borrow_set.local_map.get(local) { - sets.kill_all(borrow_indexes); + /// Kill any borrows that conflict with `place`. + fn kill_borrows_on_place( + &self, + sets: &mut BlockSets<'_, BorrowIndex>, + place: &Place<'tcx> + ) { + debug!("kill_borrows_on_place: place={:?}", place); + // Handle the `Place::Local(..)` case first and exit early. + if let Place::Base(PlaceBase::Local(local)) = place { + if let Some(borrow_indices) = self.borrow_set.local_map.get(&local) { + debug!("kill_borrows_on_place: borrow_indices={:?}", borrow_indices); + sets.kill_all(borrow_indices); + return; + } + } + + // Otherwise, look at all borrows that are live and if they conflict with the assignment + // into our place then we can kill them. + let mut borrows = sets.on_entry.clone(); + let _ = borrows.union(sets.gen_set); + for borrow_index in borrows.iter() { + let borrow_data = &self.borrows()[borrow_index]; + debug!( + "kill_borrows_on_place: borrow_index={:?} borrow_data={:?}", + borrow_index, borrow_data, + ); + + // By passing `PlaceConflictBias::NoOverlap`, we conservatively assume that any given + // pair of array indices are unequal, so that when `places_conflict` returns true, we + // will be assured that two places being compared definitely denotes the same sets of + // locations. + if places_conflict::places_conflict( + self.tcx, + self.mir, + &borrow_data.borrowed_place, + place, + places_conflict::PlaceConflictBias::NoOverlap, + ) { + debug!( + "kill_borrows_on_place: (kill) borrow_index={:?} borrow_data={:?}", + borrow_index, borrow_data, + ); + sets.kill(borrow_index); + } } } } -impl<'a, 'gcx, 'tcx> BitDenotation for Borrows<'a, 'gcx, 'tcx> { +impl<'a, 'gcx, 'tcx> BitDenotation<'tcx> for Borrows<'a, 'gcx, 'tcx> { type Idx = BorrowIndex; fn name() -> &'static str { "borrows" } fn bits_per_block(&self) -> usize { @@ -215,14 +243,14 @@ impl<'a, 'gcx, 'tcx> BitDenotation for Borrows<'a, 'gcx, 'tcx> { } fn before_statement_effect(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, BorrowIndex>, location: Location) { debug!("Borrows::before_statement_effect sets: {:?} location: {:?}", sets, location); self.kill_loans_out_of_scope_at_location(sets, location); } - fn statement_effect(&self, sets: &mut BlockSets, location: Location) { - debug!("Borrows::statement_effect sets: {:?} location: {:?}", sets, location); + fn statement_effect(&self, sets: &mut BlockSets<'_, BorrowIndex>, location: Location) { + debug!("Borrows::statement_effect: sets={:?} location={:?}", sets, location); let block = &self.mir.basic_blocks().get(location.block).unwrap_or_else(|| { panic!("could not find block at location {:?}", location); @@ -231,20 +259,12 @@ impl<'a, 'gcx, 'tcx> BitDenotation for Borrows<'a, 'gcx, 'tcx> { panic!("could not find statement at location {:?}"); }); + debug!("Borrows::statement_effect: stmt={:?}", stmt); match stmt.kind { mir::StatementKind::Assign(ref lhs, ref rhs) => { // Make sure there are no remaining borrows for variables // that are assigned over. - if let Place::Local(ref local) = *lhs { - // FIXME: Handle the case in which we're assigning over - // a projection (`foo.bar`). - self.kill_borrows_on_local(sets, local); - } - - // NOTE: if/when the Assign case is revised to inspect - // the assigned_place here, make sure to also - // re-consider the current implementations of the - // propagate_call_return method. + self.kill_borrows_on_place(sets, lhs); if let mir::Rvalue::Ref(_, _, ref place) = **rhs { if place.ignore_borrow( @@ -259,39 +279,19 @@ impl<'a, 'gcx, 'tcx> BitDenotation for Borrows<'a, 'gcx, 'tcx> { }); sets.gen(*index); - - // Issue #46746: Two-phase borrows handles - // stmts of form `Tmp = &mut Borrow` ... - match lhs { - Place::Promoted(_) | - Place::Local(..) | Place::Static(..) => {} // okay - Place::Projection(..) => { - // ... can assign into projections, - // e.g., `box (&mut _)`. Current - // conservative solution: force - // immediate activation here. - sets.gen(*index); - } - } } } mir::StatementKind::StorageDead(local) => { // Make sure there are no remaining borrows for locals that // are gone out of scope. - self.kill_borrows_on_local(sets, &local) + self.kill_borrows_on_place(sets, &Place::Base(PlaceBase::Local(local))); } - mir::StatementKind::InlineAsm { ref outputs, ref asm, .. } => { - for (output, kind) in outputs.iter().zip(&asm.outputs) { + mir::StatementKind::InlineAsm(ref asm) => { + for (output, kind) in asm.outputs.iter().zip(&asm.asm.outputs) { if !kind.is_indirect && !kind.is_rw { - // Make sure there are no remaining borrows for direct - // output variables. - if let Place::Local(ref local) = *output { - // FIXME: Handle the case in which we're assigning over - // a projection (`foo.bar`). - self.kill_borrows_on_local(sets, local); - } + self.kill_borrows_on_place(sets, output); } } } @@ -300,7 +300,6 @@ impl<'a, 'gcx, 'tcx> BitDenotation for Borrows<'a, 'gcx, 'tcx> { mir::StatementKind::SetDiscriminant { .. } | mir::StatementKind::StorageLive(..) | mir::StatementKind::Retag { .. } | - mir::StatementKind::EscapeToRaw { .. } | mir::StatementKind::AscribeUserType(..) | mir::StatementKind::Nop => {} @@ -308,24 +307,21 @@ impl<'a, 'gcx, 'tcx> BitDenotation for Borrows<'a, 'gcx, 'tcx> { } fn before_terminator_effect(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, BorrowIndex>, location: Location) { debug!("Borrows::before_terminator_effect sets: {:?} location: {:?}", sets, location); self.kill_loans_out_of_scope_at_location(sets, location); } - fn terminator_effect(&self, _: &mut BlockSets, _: Location) {} + fn terminator_effect(&self, _: &mut BlockSets<'_, BorrowIndex>, _: Location) {} - fn propagate_call_return(&self, - _in_out: &mut BitSet, - _call_bb: mir::BasicBlock, - _dest_bb: mir::BasicBlock, - _dest_place: &mir::Place) { - // there are no effects on borrows from method call return... - // - // ... but if overwriting a place can affect flow state, then - // latter is not true; see NOTE on Assign case in - // statement_effect_on_borrows. + fn propagate_call_return( + &self, + _in_out: &mut BitSet, + _call_bb: mir::BasicBlock, + _dest_bb: mir::BasicBlock, + _dest_place: &mir::Place<'tcx>, + ) { } } @@ -342,4 +338,3 @@ impl<'a, 'gcx, 'tcx> InitialFlow for Borrows<'a, 'gcx, 'tcx> { false // bottom = nothing is reserved or activated yet } } - diff --git a/src/librustc_mir/dataflow/impls/mod.rs b/src/librustc_mir/dataflow/impls/mod.rs index c29a855b1d2eb..4dcfb3f1a7fc3 100644 --- a/src/librustc_mir/dataflow/impls/mod.rs +++ b/src/librustc_mir/dataflow/impls/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Dataflow analyses are built upon some interpretation of the //! bitvectors attached to each basic block, represented via a //! zero-sized structure. @@ -19,7 +9,7 @@ use rustc_data_structures::indexed_vec::Idx; use super::MoveDataParamEnv; -use util::elaborate_drops::DropFlagState; +use crate::util::elaborate_drops::DropFlagState; use super::move_paths::{HasMoveData, MoveData, MovePathIndex, InitIndex}; use super::move_paths::{LookupResult, InitKind}; @@ -153,13 +143,6 @@ impl<'a, 'gcx, 'tcx> HasMoveData<'tcx> for MaybeUninitializedPlaces<'a, 'gcx, 't /// initialized upon reaching a particular point in the control flow /// for a function. /// -/// FIXME: Note that once flow-analysis is complete, this should be -/// the set-complement of MaybeUninitializedPlaces; thus we can get rid -/// of one or the other of these two. I'm inclined to get rid of -/// MaybeUninitializedPlaces, simply because the sets will tend to be -/// smaller in this analysis and thus easier for humans to process -/// when debugging. -/// /// For example, in code like the following, we have corresponding /// dataflow information shown in the right-hand comments. /// @@ -261,7 +244,7 @@ impl<'a, 'gcx, 'tcx> HasMoveData<'tcx> for EverInitializedPlaces<'a, 'gcx, 'tcx> impl<'a, 'gcx, 'tcx> MaybeInitializedPlaces<'a, 'gcx, 'tcx> { - fn update_bits(sets: &mut BlockSets, path: MovePathIndex, + fn update_bits(sets: &mut BlockSets<'_, MovePathIndex>, path: MovePathIndex, state: DropFlagState) { match state { @@ -272,7 +255,7 @@ impl<'a, 'gcx, 'tcx> MaybeInitializedPlaces<'a, 'gcx, 'tcx> { } impl<'a, 'gcx, 'tcx> MaybeUninitializedPlaces<'a, 'gcx, 'tcx> { - fn update_bits(sets: &mut BlockSets, path: MovePathIndex, + fn update_bits(sets: &mut BlockSets<'_, MovePathIndex>, path: MovePathIndex, state: DropFlagState) { match state { @@ -283,7 +266,7 @@ impl<'a, 'gcx, 'tcx> MaybeUninitializedPlaces<'a, 'gcx, 'tcx> { } impl<'a, 'gcx, 'tcx> DefinitelyInitializedPlaces<'a, 'gcx, 'tcx> { - fn update_bits(sets: &mut BlockSets, path: MovePathIndex, + fn update_bits(sets: &mut BlockSets<'_, MovePathIndex>, path: MovePathIndex, state: DropFlagState) { match state { @@ -293,7 +276,7 @@ impl<'a, 'gcx, 'tcx> DefinitelyInitializedPlaces<'a, 'gcx, 'tcx> { } } -impl<'a, 'gcx, 'tcx> BitDenotation for MaybeInitializedPlaces<'a, 'gcx, 'tcx> { +impl<'a, 'gcx, 'tcx> BitDenotation<'tcx> for MaybeInitializedPlaces<'a, 'gcx, 'tcx> { type Idx = MovePathIndex; fn name() -> &'static str { "maybe_init" } fn bits_per_block(&self) -> usize { @@ -310,7 +293,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for MaybeInitializedPlaces<'a, 'gcx, 'tcx> { } fn statement_effect(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, MovePathIndex>, location: Location) { drop_flag_effects_for_location( @@ -321,7 +304,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for MaybeInitializedPlaces<'a, 'gcx, 'tcx> { } fn terminator_effect(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, MovePathIndex>, location: Location) { drop_flag_effects_for_location( @@ -331,11 +314,13 @@ impl<'a, 'gcx, 'tcx> BitDenotation for MaybeInitializedPlaces<'a, 'gcx, 'tcx> { ) } - fn propagate_call_return(&self, - in_out: &mut BitSet, - _call_bb: mir::BasicBlock, - _dest_bb: mir::BasicBlock, - dest_place: &mir::Place) { + fn propagate_call_return( + &self, + in_out: &mut BitSet, + _call_bb: mir::BasicBlock, + _dest_bb: mir::BasicBlock, + dest_place: &mir::Place<'tcx>, + ) { // when a call returns successfully, that means we need to set // the bits for that dest_place to 1 (initialized). on_lookup_result_bits(self.tcx, self.mir, self.move_data(), @@ -344,7 +329,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for MaybeInitializedPlaces<'a, 'gcx, 'tcx> { } } -impl<'a, 'gcx, 'tcx> BitDenotation for MaybeUninitializedPlaces<'a, 'gcx, 'tcx> { +impl<'a, 'gcx, 'tcx> BitDenotation<'tcx> for MaybeUninitializedPlaces<'a, 'gcx, 'tcx> { type Idx = MovePathIndex; fn name() -> &'static str { "maybe_uninit" } fn bits_per_block(&self) -> usize { @@ -366,7 +351,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for MaybeUninitializedPlaces<'a, 'gcx, 'tcx> } fn statement_effect(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, MovePathIndex>, location: Location) { drop_flag_effects_for_location( @@ -377,7 +362,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for MaybeUninitializedPlaces<'a, 'gcx, 'tcx> } fn terminator_effect(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, MovePathIndex>, location: Location) { drop_flag_effects_for_location( @@ -387,11 +372,13 @@ impl<'a, 'gcx, 'tcx> BitDenotation for MaybeUninitializedPlaces<'a, 'gcx, 'tcx> ) } - fn propagate_call_return(&self, - in_out: &mut BitSet, - _call_bb: mir::BasicBlock, - _dest_bb: mir::BasicBlock, - dest_place: &mir::Place) { + fn propagate_call_return( + &self, + in_out: &mut BitSet, + _call_bb: mir::BasicBlock, + _dest_bb: mir::BasicBlock, + dest_place: &mir::Place<'tcx>, + ) { // when a call returns successfully, that means we need to set // the bits for that dest_place to 0 (initialized). on_lookup_result_bits(self.tcx, self.mir, self.move_data(), @@ -400,7 +387,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for MaybeUninitializedPlaces<'a, 'gcx, 'tcx> } } -impl<'a, 'gcx, 'tcx> BitDenotation for DefinitelyInitializedPlaces<'a, 'gcx, 'tcx> { +impl<'a, 'gcx, 'tcx> BitDenotation<'tcx> for DefinitelyInitializedPlaces<'a, 'gcx, 'tcx> { type Idx = MovePathIndex; fn name() -> &'static str { "definite_init" } fn bits_per_block(&self) -> usize { @@ -420,7 +407,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for DefinitelyInitializedPlaces<'a, 'gcx, 'tc } fn statement_effect(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, MovePathIndex>, location: Location) { drop_flag_effects_for_location( @@ -431,7 +418,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for DefinitelyInitializedPlaces<'a, 'gcx, 'tc } fn terminator_effect(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, MovePathIndex>, location: Location) { drop_flag_effects_for_location( @@ -441,11 +428,13 @@ impl<'a, 'gcx, 'tcx> BitDenotation for DefinitelyInitializedPlaces<'a, 'gcx, 'tc ) } - fn propagate_call_return(&self, - in_out: &mut BitSet, - _call_bb: mir::BasicBlock, - _dest_bb: mir::BasicBlock, - dest_place: &mir::Place) { + fn propagate_call_return( + &self, + in_out: &mut BitSet, + _call_bb: mir::BasicBlock, + _dest_bb: mir::BasicBlock, + dest_place: &mir::Place<'tcx>, + ) { // when a call returns successfully, that means we need to set // the bits for that dest_place to 1 (initialized). on_lookup_result_bits(self.tcx, self.mir, self.move_data(), @@ -454,7 +443,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for DefinitelyInitializedPlaces<'a, 'gcx, 'tc } } -impl<'a, 'gcx, 'tcx> BitDenotation for EverInitializedPlaces<'a, 'gcx, 'tcx> { +impl<'a, 'gcx, 'tcx> BitDenotation<'tcx> for EverInitializedPlaces<'a, 'gcx, 'tcx> { type Idx = InitIndex; fn name() -> &'static str { "ever_init" } fn bits_per_block(&self) -> usize { @@ -468,7 +457,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for EverInitializedPlaces<'a, 'gcx, 'tcx> { } fn statement_effect(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, InitIndex>, location: Location) { let (_, mir, move_data) = (self.tcx, self.mir, self.move_data()); let stmt = &mir[location.block].statements[location.statement_index]; @@ -504,7 +493,8 @@ impl<'a, 'gcx, 'tcx> BitDenotation for EverInitializedPlaces<'a, 'gcx, 'tcx> { // storagedeads after everything ends, so if we don't regard the // storagelive as killing storage, we would have a multiple assignment // to immutable data error. - if let LookupResult::Exact(mpi) = rev_lookup.find(&mir::Place::Local(local)) { + if let LookupResult::Exact(mpi) = + rev_lookup.find(&mir::Place::Base(mir::PlaceBase::Local(local))) { debug!("stmt {:?} at loc {:?} clears the ever initialized status of {:?}", stmt, location, &init_path_map[mpi]); sets.kill_all(&init_path_map[mpi]); @@ -515,7 +505,7 @@ impl<'a, 'gcx, 'tcx> BitDenotation for EverInitializedPlaces<'a, 'gcx, 'tcx> { } fn terminator_effect(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, InitIndex>, location: Location) { let (mir, move_data) = (self.mir, self.move_data()); @@ -530,11 +520,13 @@ impl<'a, 'gcx, 'tcx> BitDenotation for EverInitializedPlaces<'a, 'gcx, 'tcx> { ); } - fn propagate_call_return(&self, - in_out: &mut BitSet, - call_bb: mir::BasicBlock, - _dest_bb: mir::BasicBlock, - _dest_place: &mir::Place) { + fn propagate_call_return( + &self, + in_out: &mut BitSet, + call_bb: mir::BasicBlock, + _dest_bb: mir::BasicBlock, + _dest_place: &mir::Place<'tcx>, + ) { let move_data = self.move_data(); let bits_per_block = self.bits_per_block(); let init_loc_map = &move_data.init_loc_map; diff --git a/src/librustc_mir/dataflow/impls/storage_liveness.rs b/src/librustc_mir/dataflow/impls/storage_liveness.rs index c8faa34df8a2b..6b8eb6f17f6c1 100644 --- a/src/librustc_mir/dataflow/impls/storage_liveness.rs +++ b/src/librustc_mir/dataflow/impls/storage_liveness.rs @@ -1,17 +1,7 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - pub use super::*; use rustc::mir::*; -use dataflow::BitDenotation; +use crate::dataflow::BitDenotation; #[derive(Copy, Clone)] pub struct MaybeStorageLive<'a, 'tcx: 'a> { @@ -29,7 +19,7 @@ impl<'a, 'tcx: 'a> MaybeStorageLive<'a, 'tcx> { } } -impl<'a, 'tcx> BitDenotation for MaybeStorageLive<'a, 'tcx> { +impl<'a, 'tcx> BitDenotation<'tcx> for MaybeStorageLive<'a, 'tcx> { type Idx = Local; fn name() -> &'static str { "maybe_storage_live" } fn bits_per_block(&self) -> usize { @@ -41,7 +31,7 @@ impl<'a, 'tcx> BitDenotation for MaybeStorageLive<'a, 'tcx> { } fn statement_effect(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, Local>, loc: Location) { let stmt = &self.mir[loc.block].statements[loc.statement_index]; @@ -53,16 +43,18 @@ impl<'a, 'tcx> BitDenotation for MaybeStorageLive<'a, 'tcx> { } fn terminator_effect(&self, - _sets: &mut BlockSets, + _sets: &mut BlockSets<'_, Local>, _loc: Location) { // Terminators have no effect } - fn propagate_call_return(&self, - _in_out: &mut BitSet, - _call_bb: mir::BasicBlock, - _dest_bb: mir::BasicBlock, - _dest_place: &mir::Place) { + fn propagate_call_return( + &self, + _in_out: &mut BitSet, + _call_bb: mir::BasicBlock, + _dest_bb: mir::BasicBlock, + _dest_place: &mir::Place<'tcx>, + ) { // Nothing to do when a call returns successfully } } diff --git a/src/librustc_mir/dataflow/mod.rs b/src/librustc_mir/dataflow/mod.rs index bd842669a1f9b..af0e3f5a27091 100644 --- a/src/librustc_mir/dataflow/mod.rs +++ b/src/librustc_mir/dataflow/mod.rs @@ -1,19 +1,10 @@ -// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use syntax::ast::{self, MetaItem}; use rustc_data_structures::bit_set::{BitSet, BitSetOperator, HybridBitSet}; use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::work_queue::WorkQueue; +use rustc::hir::def_id::DefId; use rustc::ty::{self, TyCtxt}; use rustc::mir::{self, Mir, BasicBlock, BasicBlockData, Location, Statement, Terminator}; use rustc::mir::traversal; @@ -44,9 +35,11 @@ pub mod move_paths; pub(crate) use self::move_paths::indexes; -pub(crate) struct DataflowBuilder<'a, 'tcx: 'a, BD> where BD: BitDenotation +pub(crate) struct DataflowBuilder<'a, 'tcx: 'a, BD> +where + BD: BitDenotation<'tcx> { - node_id: ast::NodeId, + def_id: DefId, flow_state: DataflowAnalysis<'a, 'tcx, BD>, print_preflow_to: Option, print_postflow_to: Option, @@ -66,12 +59,12 @@ impl DebugFormatted { } impl fmt::Debug for DebugFormatted { - fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result { write!(w, "{}", self.0) } } -pub(crate) trait Dataflow { +pub(crate) trait Dataflow<'tcx, BD: BitDenotation<'tcx>> { /// Sets up and runs the dataflow problem, using `p` to render results if /// implementation so chooses. fn dataflow

(&mut self, p: P) where P: Fn(&BD, BD::Idx) -> DebugFormatted { @@ -87,7 +80,9 @@ pub(crate) trait Dataflow { fn propagate(&mut self); } -impl<'a, 'tcx: 'a, BD> Dataflow for DataflowBuilder<'a, 'tcx, BD> where BD: BitDenotation +impl<'a, 'tcx: 'a, BD> Dataflow<'tcx, BD> for DataflowBuilder<'a, 'tcx, BD> +where + BD: BitDenotation<'tcx> { fn dataflow

(&mut self, p: P) where P: Fn(&BD, BD::Idx) -> DebugFormatted { self.flow_state.build_sets(); @@ -122,26 +117,26 @@ pub struct MoveDataParamEnv<'gcx, 'tcx> { pub(crate) fn do_dataflow<'a, 'gcx, 'tcx, BD, P>(tcx: TyCtxt<'a, 'gcx, 'tcx>, mir: &'a Mir<'tcx>, - node_id: ast::NodeId, + def_id: DefId, attributes: &[ast::Attribute], dead_unwinds: &BitSet, bd: BD, p: P) - -> DataflowResults - where BD: BitDenotation + InitialFlow, + -> DataflowResults<'tcx, BD> + where BD: BitDenotation<'tcx> + InitialFlow, P: Fn(&BD, BD::Idx) -> DebugFormatted { let flow_state = DataflowAnalysis::new(mir, dead_unwinds, bd); - flow_state.run(tcx, node_id, attributes, p) + flow_state.run(tcx, def_id, attributes, p) } -impl<'a, 'gcx: 'tcx, 'tcx: 'a, BD> DataflowAnalysis<'a, 'tcx, BD> where BD: BitDenotation +impl<'a, 'gcx: 'tcx, 'tcx: 'a, BD> DataflowAnalysis<'a, 'tcx, BD> where BD: BitDenotation<'tcx> { pub(crate) fn run

(self, tcx: TyCtxt<'a, 'gcx, 'tcx>, - node_id: ast::NodeId, + def_id: DefId, attributes: &[ast::Attribute], - p: P) -> DataflowResults + p: P) -> DataflowResults<'tcx, BD> where P: Fn(&BD, BD::Idx) -> DebugFormatted { let name_found = |sess: &Session, attrs: &[ast::Attribute], name| -> Option { @@ -151,7 +146,7 @@ impl<'a, 'gcx: 'tcx, 'tcx: 'a, BD> DataflowAnalysis<'a, 'tcx, BD> where BD: BitD } else { sess.span_err( item.span, - &format!("{} attribute requires a path", item.ident)); + &format!("{} attribute requires a path", item.path)); return None; } } @@ -164,7 +159,7 @@ impl<'a, 'gcx: 'tcx, 'tcx: 'a, BD> DataflowAnalysis<'a, 'tcx, BD> where BD: BitD name_found(tcx.sess, attributes, "borrowck_graphviz_postflow"); let mut mbcx = DataflowBuilder { - node_id, + def_id, print_preflow_to, print_postflow_to, flow_state: self, }; @@ -173,12 +168,12 @@ impl<'a, 'gcx: 'tcx, 'tcx: 'a, BD> DataflowAnalysis<'a, 'tcx, BD> where BD: BitD } } -struct PropagationContext<'b, 'a: 'b, 'tcx: 'a, O> where O: 'b + BitDenotation +struct PropagationContext<'b, 'a: 'b, 'tcx: 'a, O> where O: 'b + BitDenotation<'tcx> { builder: &'b mut DataflowAnalysis<'a, 'tcx, O>, } -impl<'a, 'tcx: 'a, BD> DataflowAnalysis<'a, 'tcx, BD> where BD: BitDenotation +impl<'a, 'tcx: 'a, BD> DataflowAnalysis<'a, 'tcx, BD> where BD: BitDenotation<'tcx> { fn propagate(&mut self) { let mut temp = BitSet::new_empty(self.flow_state.sets.bits_per_block); @@ -228,7 +223,7 @@ impl<'a, 'tcx: 'a, BD> DataflowAnalysis<'a, 'tcx, BD> where BD: BitDenotation } } -impl<'b, 'a: 'b, 'tcx: 'a, BD> PropagationContext<'b, 'a, 'tcx, BD> where BD: BitDenotation +impl<'b, 'a: 'b, 'tcx: 'a, BD> PropagationContext<'b, 'a, 'tcx, BD> where BD: BitDenotation<'tcx> { fn walk_cfg(&mut self, in_out: &mut BitSet) { let mut dirty_queue: WorkQueue = @@ -259,7 +254,7 @@ fn dataflow_path(context: &str, path: &str) -> PathBuf { path } -impl<'a, 'tcx: 'a, BD> DataflowBuilder<'a, 'tcx, BD> where BD: BitDenotation +impl<'a, 'tcx: 'a, BD> DataflowBuilder<'a, 'tcx, BD> where BD: BitDenotation<'tcx> { fn pre_dataflow_instrumentation

{}
", class, tooltip).unwrap(); } - write_header(class, &mut out).unwrap(); - - let lexer = match lexer::StringReader::new_without_err(&sess, fm, None, "Output from rustc:") { - Ok(l) => l, - Err(_) => { - let first_line = src.lines().next().unwrap_or_else(|| ""); - let mut err = sess.span_diagnostic - .struct_warn(&format!("Invalid doc comment starting with: `{}`\n\ - (Ignoring this codeblock)", - first_line)); - err.emit(); - return String::new(); + + let sess = parse::ParseSess::new(FilePathMapping::empty()); + let fm = sess.source_map().new_source_file( + FileName::Custom(String::from("rustdoc-highlighting")), + src.to_owned(), + ); + let highlight_result = + lexer::StringReader::new_or_buffered_errs(&sess, fm, None).and_then(|lexer| { + let mut classifier = Classifier::new(lexer, sess.source_map()); + + let mut highlighted_source = vec![]; + if classifier.write_source(&mut highlighted_source).is_err() { + Err(classifier.lexer.buffer_fatal_errors()) + } else { + Ok(String::from_utf8_lossy(&highlighted_source).into_owned()) + } + }); + + match highlight_result { + Ok(highlighted_source) => { + write_header(class, &mut out).unwrap(); + write!(out, "{}", highlighted_source).unwrap(); + if let Some(extension) = extension { + write!(out, "{}", extension).unwrap(); + } + write_footer(&mut out).unwrap(); } - }; - let mut classifier = Classifier::new(lexer, sess.source_map()); - if classifier.write_source(&mut out).is_err() { - classifier.lexer.emit_fatal_errors(); - return format!("
{}
", src); - } + Err(errors) => { + // If errors are encountered while trying to highlight, cancel the errors and just emit + // the unhighlighted source. The errors will have already been reported in the + // `check-code-block-syntax` pass. + for mut error in errors { + error.cancel(); + } - if let Some(extension) = extension { - write!(out, "{}", extension).unwrap(); + write!(out, "
{}
", src).unwrap(); + } } - write_footer(&mut out).unwrap(); + String::from_utf8_lossy(&out[..]).into_owned() } @@ -123,7 +124,7 @@ trait Writer { /// Called at the end of a span of highlighted text. fn exit_span(&mut self) -> io::Result<()>; - /// Called for a span of text. If the text should be highlighted differently from the + /// Called for a span of text. If the text should be highlighted differently from the /// surrounding text, then the `Class` argument will be a value other than `None`. /// /// The following sequences of callbacks are equivalent: @@ -161,6 +162,17 @@ impl Writer for U { } } +enum HighlightError { + LexError, + IoError(io::Error), +} + +impl From for HighlightError { + fn from(err: io::Error) -> Self { + HighlightError::IoError(err) + } +} + impl<'a> Classifier<'a> { fn new(lexer: lexer::StringReader<'a>, source_map: &'a SourceMap) -> Classifier<'a> { Classifier { @@ -172,17 +184,11 @@ impl<'a> Classifier<'a> { } } - /// Gets the next token out of the lexer, emitting fatal errors if lexing fails. - fn try_next_token(&mut self) -> io::Result { + /// Gets the next token out of the lexer. + fn try_next_token(&mut self) -> Result { match self.lexer.try_next_token() { Ok(tas) => Ok(tas), - Err(_) => { - let mut err = self.lexer.sess.span_diagnostic - .struct_warn("Backing out of syntax highlighting"); - err.note("You probably did not intend to render this as a rust code-block"); - err.emit(); - Err(io::Error::new(io::ErrorKind::Other, "")) - } + Err(_) => Err(HighlightError::LexError), } } @@ -195,7 +201,7 @@ impl<'a> Classifier<'a> { /// source. fn write_source(&mut self, out: &mut W) - -> io::Result<()> { + -> Result<(), HighlightError> { loop { let next = self.try_next_token()?; if next.tok == token::Eof { @@ -212,7 +218,7 @@ impl<'a> Classifier<'a> { fn write_token(&mut self, out: &mut W, tas: TokenAndSpan) - -> io::Result<()> { + -> Result<(), HighlightError> { let klass = match tas.tok { token::Shebang(s) => { out.string(Escape(&s.as_str()), Class::None)?; @@ -306,7 +312,7 @@ impl<'a> Classifier<'a> { token::Literal(lit, _suf) => { match lit { // Text literals. - token::Byte(..) | token::Char(..) | + token::Byte(..) | token::Char(..) | token::Err(..) | token::ByteStr(..) | token::ByteStrRaw(..) | token::Str_(..) | token::StrRaw(..) => Class::String, @@ -351,7 +357,9 @@ impl<'a> Classifier<'a> { // Anything that didn't return above is the simple case where we the // class just spans a single token, so we can use the `string` method. - out.string(Escape(&self.snip(tas.sp)), klass) + out.string(Escape(&self.snip(tas.sp)), klass)?; + + Ok(()) } // Helper function to get a snippet from the source_map. diff --git a/src/librustdoc/html/item_type.rs b/src/librustdoc/html/item_type.rs index acb8f6a66dfcb..366e60b3ad920 100644 --- a/src/librustdoc/html/item_type.rs +++ b/src/librustdoc/html/item_type.rs @@ -1,18 +1,8 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Item types. use std::fmt; use syntax::ext::base::MacroKind; -use clean; +use crate::clean; /// Item type. Corresponds to `clean::ItemEnum` variants. /// @@ -25,7 +15,7 @@ use clean; /// module headings. If you are adding to this enum and want to ensure that the sidebar also prints /// a heading, edit the listing in `html/render.rs`, function `sidebar_module`. This uses an /// ordering based on a helper function inside `item_module`, in the same file. -#[derive(Copy, PartialEq, Clone, Debug)] +#[derive(Copy, PartialEq, Eq, Clone, Debug, PartialOrd, Ord)] pub enum ItemType { Module = 0, ExternCrate = 1, @@ -52,6 +42,7 @@ pub enum ItemType { Existential = 22, ProcAttribute = 23, ProcDerive = 24, + TraitAlias = 25, } @@ -96,6 +87,7 @@ impl<'a> From<&'a clean::Item> for ItemType { clean::AssociatedTypeItem(..) => ItemType::AssociatedType, clean::ForeignTypeItem => ItemType::ForeignType, clean::KeywordItem(..) => ItemType::Keyword, + clean::TraitAliasItem(..) => ItemType::TraitAlias, clean::ProcMacroItem(ref mac) => match mac.kind { MacroKind::Bang => ItemType::Macro, MacroKind::Attr => ItemType::ProcAttribute, @@ -110,20 +102,21 @@ impl<'a> From<&'a clean::Item> for ItemType { impl From for ItemType { fn from(kind: clean::TypeKind) -> ItemType { match kind { - clean::TypeKind::Struct => ItemType::Struct, - clean::TypeKind::Union => ItemType::Union, - clean::TypeKind::Enum => ItemType::Enum, - clean::TypeKind::Function => ItemType::Function, - clean::TypeKind::Trait => ItemType::Trait, - clean::TypeKind::Module => ItemType::Module, - clean::TypeKind::Static => ItemType::Static, - clean::TypeKind::Const => ItemType::Constant, - clean::TypeKind::Variant => ItemType::Variant, - clean::TypeKind::Typedef => ItemType::Typedef, - clean::TypeKind::Foreign => ItemType::ForeignType, - clean::TypeKind::Macro => ItemType::Macro, - clean::TypeKind::Attr => ItemType::ProcAttribute, - clean::TypeKind::Derive => ItemType::ProcDerive, + clean::TypeKind::Struct => ItemType::Struct, + clean::TypeKind::Union => ItemType::Union, + clean::TypeKind::Enum => ItemType::Enum, + clean::TypeKind::Function => ItemType::Function, + clean::TypeKind::Trait => ItemType::Trait, + clean::TypeKind::Module => ItemType::Module, + clean::TypeKind::Static => ItemType::Static, + clean::TypeKind::Const => ItemType::Constant, + clean::TypeKind::Variant => ItemType::Variant, + clean::TypeKind::Typedef => ItemType::Typedef, + clean::TypeKind::Foreign => ItemType::ForeignType, + clean::TypeKind::Macro => ItemType::Macro, + clean::TypeKind::Attr => ItemType::ProcAttribute, + clean::TypeKind::Derive => ItemType::ProcDerive, + clean::TypeKind::TraitAlias => ItemType::TraitAlias, } } } @@ -156,6 +149,7 @@ impl ItemType { ItemType::Existential => "existential", ItemType::ProcAttribute => "attr", ItemType::ProcDerive => "derive", + ItemType::TraitAlias => "traitalias", } } @@ -170,6 +164,7 @@ impl ItemType { ItemType::Primitive | ItemType::AssociatedType | ItemType::Existential | + ItemType::TraitAlias | ItemType::ForeignType => NameSpace::Type, ItemType::ExternCrate | @@ -194,7 +189,7 @@ impl ItemType { } impl fmt::Display for ItemType { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.css_class().fmt(f) } } @@ -216,7 +211,7 @@ impl NameSpace { } impl fmt::Display for NameSpace { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.to_static_str().fmt(f) } } diff --git a/src/librustdoc/html/layout.rs b/src/librustdoc/html/layout.rs index 37ff693bdf167..6ff3917a265ed 100644 --- a/src/librustdoc/html/layout.rs +++ b/src/librustdoc/html/layout.rs @@ -1,18 +1,9 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::fmt; use std::io; use std::path::PathBuf; -use externalfiles::ExternalHtml; +use crate::externalfiles::ExternalHtml; +use crate::html::render::SlashChecker; #[derive(Clone)] pub struct Layout { @@ -26,16 +17,25 @@ pub struct Page<'a> { pub title: &'a str, pub css_class: &'a str, pub root_path: &'a str, + pub static_root_path: Option<&'a str>, pub description: &'a str, pub keywords: &'a str, pub resource_suffix: &'a str, + pub extra_scripts: &'a [&'a str], + pub static_extra_scripts: &'a [&'a str], } pub fn render( - dst: &mut dyn io::Write, layout: &Layout, page: &Page, sidebar: &S, t: &T, - css_file_extension: bool, themes: &[PathBuf], extra_scripts: &[&str]) - -> io::Result<()> -{ + dst: &mut dyn io::Write, + layout: &Layout, + page: &Page<'_>, + sidebar: &S, + t: &T, + css_file_extension: bool, + themes: &[PathBuf], + generate_search_filter: bool, +) -> io::Result<()> { + let static_root_path = page.static_root_path.unwrap_or(page.root_path); write!(dst, "\ \ @@ -46,20 +46,20 @@ pub fn render( \ \ {title}\ - \ - \ + \ {themes}\ - \ - \ + \ - \ - \ + \ + \ {css_extension}\ {favicon}\ {in_header}\ \ \ \ @@ -77,18 +77,17 @@ pub fn render( \
\ \
\
\ - \ + \

(&self, p: P) -> io::Result<()> where P: Fn(&BD, BD::Idx) -> DebugFormatted @@ -347,10 +342,10 @@ pub(crate) trait DataflowResultsConsumer<'a, 'tcx: 'a> { fn mir(&self) -> &'a Mir<'tcx>; } -pub fn state_for_location<'tcx, T: BitDenotation>(loc: Location, - analysis: &T, - result: &DataflowResults, - mir: &Mir<'tcx>) +pub fn state_for_location<'tcx, T: BitDenotation<'tcx>>(loc: Location, + analysis: &T, + result: &DataflowResults<'tcx, T>, + mir: &Mir<'tcx>) -> BitSet { let mut on_entry = result.sets().on_entry_set_for(loc.block.index()).to_owned(); let mut kill_set = on_entry.to_hybrid(); @@ -381,25 +376,25 @@ pub fn state_for_location<'tcx, T: BitDenotation>(loc: Location, gen_set.to_dense() } -pub struct DataflowAnalysis<'a, 'tcx: 'a, O> where O: BitDenotation +pub struct DataflowAnalysis<'a, 'tcx: 'a, O> where O: BitDenotation<'tcx> { - flow_state: DataflowState, + flow_state: DataflowState<'tcx, O>, dead_unwinds: &'a BitSet, mir: &'a Mir<'tcx>, } -impl<'a, 'tcx: 'a, O> DataflowAnalysis<'a, 'tcx, O> where O: BitDenotation +impl<'a, 'tcx: 'a, O> DataflowAnalysis<'a, 'tcx, O> where O: BitDenotation<'tcx> { - pub fn results(self) -> DataflowResults { + pub fn results(self) -> DataflowResults<'tcx, O> { DataflowResults(self.flow_state) } pub fn mir(&self) -> &'a Mir<'tcx> { self.mir } } -pub struct DataflowResults(pub(crate) DataflowState) where O: BitDenotation; +pub struct DataflowResults<'tcx, O>(pub(crate) DataflowState<'tcx, O>) where O: BitDenotation<'tcx>; -impl DataflowResults { +impl<'tcx, O: BitDenotation<'tcx>> DataflowResults<'tcx, O> { pub fn sets(&self) -> &AllSets { &self.0.sets } @@ -411,7 +406,7 @@ impl DataflowResults { /// State of a dataflow analysis; couples a collection of bit sets /// with operator used to initialize and merge bits during analysis. -pub struct DataflowState +pub struct DataflowState<'tcx, O: BitDenotation<'tcx>> { /// All the sets for the analysis. (Factored into its /// own structure so that we can borrow it mutably @@ -422,7 +417,7 @@ pub struct DataflowState pub(crate) operator: O, } -impl DataflowState { +impl<'tcx, O: BitDenotation<'tcx>> DataflowState<'tcx, O> { pub(crate) fn interpret_set<'c, P>(&self, o: &'c O, set: &BitSet, @@ -531,7 +526,7 @@ impl<'a, E:Idx> BlockSets<'a, E> { impl AllSets { pub fn bits_per_block(&self) -> usize { self.bits_per_block } - pub fn for_block(&mut self, block_idx: usize) -> BlockSets { + pub fn for_block(&mut self, block_idx: usize) -> BlockSets<'_, E> { BlockSets { on_entry: &mut self.on_entry_sets[block_idx], gen_set: &mut self.gen_sets[block_idx], @@ -561,7 +556,7 @@ pub trait InitialFlow { fn bottom_value() -> bool; } -pub trait BitDenotation: BitSetOperator { +pub trait BitDenotation<'tcx>: BitSetOperator { /// Specifies what index type is used to access the bitvector. type Idx: Idx; @@ -580,21 +575,21 @@ pub trait BitDenotation: BitSetOperator { /// the block's start, not necessarily the state immediately prior /// to the statement/terminator under analysis. /// - /// In either case, the passed reference is mutable; but this is a + /// In either case, the passed reference is mutable, but this is a /// wart from using the `BlockSets` type in the API; the intention /// is that the `statement_effect` and `terminator_effect` methods /// mutate only the gen/kill sets. - /// - /// FIXME: We should consider enforcing the intention described in - /// the previous paragraph by passing the three sets in separate - /// parameters to encode their distinct mutabilities. + // + // FIXME: we should consider enforcing the intention described in + // the previous paragraph by passing the three sets in separate + // parameters to encode their distinct mutabilities. fn accumulates_intrablock_state() -> bool { false } /// A name describing the dataflow analysis that this - /// BitDenotation is supporting. The name should be something - /// suitable for plugging in as part of a filename e.g., avoid + /// `BitDenotation` is supporting. The name should be something + /// suitable for plugging in as part of a filename (i.e., avoid /// space-characters or other things that tend to look bad on a - /// file system, like slashes or periods. It is also better for + /// file system, like slashes or periods). It is also better for /// the name to be reasonably short, again because it will be /// plugged into a filename. fn name() -> &'static str; @@ -622,7 +617,7 @@ pub trait BitDenotation: BitSetOperator { /// applied, in that order, before moving for the next /// statement. fn before_statement_effect(&self, - _sets: &mut BlockSets, + _sets: &mut BlockSets<'_, Self::Idx>, _location: Location) {} /// Mutates the block-sets (the flow sets for the given @@ -636,7 +631,7 @@ pub trait BitDenotation: BitSetOperator { /// `bb_data` is the sequence of statements identified by `bb` in /// the MIR. fn statement_effect(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, Self::Idx>, location: Location); /// Similar to `terminator_effect`, except it applies @@ -651,7 +646,7 @@ pub trait BitDenotation: BitSetOperator { /// applied, in that order, before moving for the next /// terminator. fn before_terminator_effect(&self, - _sets: &mut BlockSets, + _sets: &mut BlockSets<'_, Self::Idx>, _location: Location) {} /// Mutates the block-sets (the flow sets for the given @@ -665,7 +660,7 @@ pub trait BitDenotation: BitSetOperator { /// The effects applied here cannot depend on which branch the /// terminator took. fn terminator_effect(&self, - sets: &mut BlockSets, + sets: &mut BlockSets<'_, Self::Idx>, location: Location); /// Mutates the block-sets according to the (flow-dependent) @@ -682,19 +677,21 @@ pub trait BitDenotation: BitSetOperator { /// flow-dependent, the current MIR cannot encode them via just /// GEN and KILL sets attached to the block, and so instead we add /// this extra machinery to represent the flow-dependent effect. - /// - /// FIXME: Right now this is a bit of a wart in the API. It might - /// be better to represent this as an additional gen- and - /// kill-sets associated with each edge coming out of the basic - /// block. - fn propagate_call_return(&self, - in_out: &mut BitSet, - call_bb: mir::BasicBlock, - dest_bb: mir::BasicBlock, - dest_place: &mir::Place); + // + // FIXME: right now this is a bit of a wart in the API. It might + // be better to represent this as an additional gen- and + // kill-sets associated with each edge coming out of the basic + // block. + fn propagate_call_return( + &self, + in_out: &mut BitSet, + call_bb: mir::BasicBlock, + dest_bb: mir::BasicBlock, + dest_place: &mir::Place<'tcx>, + ); } -impl<'a, 'tcx, D> DataflowAnalysis<'a, 'tcx, D> where D: BitDenotation +impl<'a, 'tcx, D> DataflowAnalysis<'a, 'tcx, D> where D: BitDenotation<'tcx> { pub fn new(mir: &'a Mir<'tcx>, dead_unwinds: &'a BitSet, @@ -726,8 +723,7 @@ impl<'a, 'tcx, D> DataflowAnalysis<'a, 'tcx, D> where D: BitDenotation } } -impl<'a, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> where D: BitDenotation -{ +impl<'a, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> where D: BitDenotation<'tcx> { /// Propagates the bits of `in_out` into all the successors of `bb`, /// using bitwise operator denoted by `self.operator`. /// @@ -744,7 +740,7 @@ impl<'a, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> where D: BitDenotation fn propagate_bits_into_graph_successors_of( &mut self, in_out: &mut BitSet, - (bb, bb_data): (mir::BasicBlock, &mir::BasicBlockData), + (bb, bb_data): (mir::BasicBlock, &mir::BasicBlockData<'tcx>), dirty_list: &mut WorkQueue) { match bb_data.terminator().kind { diff --git a/src/librustc_mir/dataflow/move_paths/abs_domain.rs b/src/librustc_mir/dataflow/move_paths/abs_domain.rs index 186e5f5f5f0ad..b26547c4ff77e 100644 --- a/src/librustc_mir/dataflow/move_paths/abs_domain.rs +++ b/src/librustc_mir/dataflow/move_paths/abs_domain.rs @@ -1,22 +1,12 @@ -// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The move-analysis portion of borrowck needs to work in an abstract -//! domain of lifted Places. Most of the Place variants fall into a +//! domain of lifted `Place`s. Most of the `Place` variants fall into a //! one-to-one mapping between the concrete and abstract (e.g., a -//! field-deref on a local-variable, `x.field`, has the same meaning -//! in both domains). Indexed-Projections are the exception: `a[x]` +//! field-deref on a local variable, `x.field`, has the same meaning +//! in both domains). Indexed projections are the exception: `a[x]` //! needs to be treated as mapping to the same move path as `a[y]` as -//! well as `a[13]`, et cetera. +//! well as `a[13]`, etc. //! -//! (In theory the analysis could be extended to work with sets of +//! (In theory, the analysis could be extended to work with sets of //! paths, so that `a[0]` and `a[13]` could be kept distinct, while //! `a[x]` would still overlap them both. But that is not this //! representation does today.) @@ -28,8 +18,7 @@ use rustc::ty::Ty; pub struct AbstractOperand; #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub struct AbstractType; -pub type AbstractElem<'tcx> = - ProjectionElem<'tcx, AbstractOperand, AbstractType>; +pub type AbstractElem = ProjectionElem; pub trait Lift { type Abstract; @@ -48,7 +37,7 @@ impl<'tcx> Lift for Ty<'tcx> { fn lift(&self) -> Self::Abstract { AbstractType } } impl<'tcx> Lift for PlaceElem<'tcx> { - type Abstract = AbstractElem<'tcx>; + type Abstract = AbstractElem; fn lift(&self) -> Self::Abstract { match *self { ProjectionElem::Deref => @@ -66,7 +55,7 @@ impl<'tcx> Lift for PlaceElem<'tcx> { from_end, }, ProjectionElem::Downcast(a, u) => - ProjectionElem::Downcast(a.clone(), u.clone()), + ProjectionElem::Downcast(a, u.clone()), } } } diff --git a/src/librustc_mir/dataflow/move_paths/builder.rs b/src/librustc_mir/dataflow/move_paths/builder.rs index 7fe27e97d3d3b..2471c01e3f3d0 100644 --- a/src/librustc_mir/dataflow/move_paths/builder.rs +++ b/src/librustc_mir/dataflow/move_paths/builder.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::ty::{self, TyCtxt}; use rustc::mir::*; use rustc::mir::tcx::RvalueInitializationState; @@ -43,13 +33,13 @@ impl<'a, 'gcx, 'tcx> MoveDataBuilder<'a, 'gcx, 'tcx> { moves: IndexVec::new(), loc_map: LocationMap::new(mir), rev_lookup: MovePathLookup { - locals: mir.local_decls.indices().map(Place::Local).map(|v| { + locals: mir.local_decls.indices().map(PlaceBase::Local).map(|v| { Self::new_move_path( &mut move_paths, &mut path_map, &mut init_path_map, None, - v, + Place::Base(v), ) }).collect(), projections: Default::default(), @@ -106,9 +96,8 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> { { debug!("lookup({:?})", place); match *place { - Place::Local(local) => Ok(self.builder.data.rev_lookup.locals[local]), - Place::Promoted(..) | - Place::Static(..) => { + Place::Base(PlaceBase::Local(local)) => Ok(self.builder.data.rev_lookup.locals[local]), + Place::Base(PlaceBase::Static(..)) => { Err(MoveError::cannot_move_out_of(self.loc, Static)) } Place::Projection(ref proj) => { @@ -131,7 +120,7 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> { let base = self.move_path_for(&proj.base)?; let mir = self.builder.mir; let tcx = self.builder.tcx; - let place_ty = proj.base.ty(mir, tcx).to_ty(tcx); + let place_ty = proj.base.ty(mir, tcx).ty; match place_ty.sty { ty::Ref(..) | ty::RawPtr(..) => return Err(MoveError::cannot_move_out_of( @@ -283,26 +272,25 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> { StatementKind::FakeRead(_, ref place) => { self.create_move_path(place); } - StatementKind::InlineAsm { ref outputs, ref inputs, ref asm } => { - for (output, kind) in outputs.iter().zip(&asm.outputs) { + StatementKind::InlineAsm(ref asm) => { + for (output, kind) in asm.outputs.iter().zip(&asm.asm.outputs) { if !kind.is_indirect { self.gather_init(output, InitKind::Deep); } } - for (_, input) in inputs.iter() { + for (_, input) in asm.inputs.iter() { self.gather_operand(input); } } StatementKind::StorageLive(_) => {} StatementKind::StorageDead(local) => { - self.gather_move(&Place::Local(local)); + self.gather_move(&Place::Base(PlaceBase::Local(local))); } StatementKind::SetDiscriminant{ .. } => { span_bug!(stmt.source_info.span, "SetDiscriminant should not exist during borrowck"); } StatementKind::Retag { .. } | - StatementKind::EscapeToRaw { .. } | StatementKind::AscribeUserType(..) | StatementKind::Nop => {} } @@ -356,7 +344,7 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> { TerminatorKind::Unreachable => { } TerminatorKind::Return => { - self.gather_move(&Place::Local(RETURN_PLACE)); + self.gather_move(&Place::RETURN_PLACE); } TerminatorKind::Assert { ref cond, .. } => { @@ -436,8 +424,8 @@ impl<'b, 'a, 'gcx, 'tcx> Gatherer<'b, 'a, 'gcx, 'tcx> { Place::Projection(box Projection { base, elem: ProjectionElem::Field(_, _), - }) if match base.ty(self.builder.mir, self.builder.tcx).to_ty(self.builder.tcx).sty { - ty::TyKind::Adt(def, _) if def.is_union() => true, + }) if match base.ty(self.builder.mir, self.builder.tcx).ty.sty { + ty::Adt(def, _) if def.is_union() => true, _ => false, } => base, // Otherwise, lookup the place. diff --git a/src/librustc_mir/dataflow/move_paths/mod.rs b/src/librustc_mir/dataflow/move_paths/mod.rs index 2a026b8f52c2a..5806a01c687cb 100644 --- a/src/librustc_mir/dataflow/move_paths/mod.rs +++ b/src/librustc_mir/dataflow/move_paths/mod.rs @@ -1,14 +1,3 @@ -// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - use rustc::ty::{self, TyCtxt}; use rustc::mir::*; use rustc::util::nodemap::FxHashMap; @@ -34,7 +23,7 @@ pub(crate) mod indexes { use rustc_data_structures::indexed_vec::Idx; macro_rules! new_index { - ($Index:ident, $debug_name:expr) => { + ($(#[$attrs:meta])* $Index:ident, $debug_name:expr) => { #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] pub struct $Index(NonZeroUsize); @@ -48,24 +37,36 @@ pub(crate) mod indexes { } impl fmt::Debug for $Index { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "{}{}", $debug_name, self.index()) } } } } - /// Index into MovePathData.move_paths - new_index!(MovePathIndex, "mp"); - - /// Index into MoveData.moves. - new_index!(MoveOutIndex, "mo"); - - /// Index into MoveData.inits. - new_index!(InitIndex, "in"); - - /// Index into Borrows.locations - new_index!(BorrowIndex, "bw"); + new_index!( + /// Index into MovePathData.move_paths + MovePathIndex, + "mp" + ); + + new_index!( + /// Index into MoveData.moves. + MoveOutIndex, + "mo" + ); + + new_index!( + /// Index into MoveData.inits. + InitIndex, + "in" + ); + + new_index!( + /// Index into Borrows.locations + BorrowIndex, + "bw" + ); } pub use self::indexes::MovePathIndex; @@ -73,7 +74,7 @@ pub use self::indexes::MoveOutIndex; pub use self::indexes::InitIndex; impl MoveOutIndex { - pub fn move_path_index(&self, move_data: &MoveData) -> MovePathIndex { + pub fn move_path_index(&self, move_data: &MoveData<'_>) -> MovePathIndex { move_data.moves[*self].path } } @@ -99,7 +100,10 @@ pub struct MovePath<'tcx> { } impl<'tcx> MovePath<'tcx> { - pub fn parents(&self, move_paths: &IndexVec) -> Vec { + pub fn parents( + &self, + move_paths: &IndexVec>, + ) -> Vec { let mut parents = Vec::new(); let mut curr_parent = self.parent; @@ -113,7 +117,7 @@ impl<'tcx> MovePath<'tcx> { } impl<'tcx> fmt::Debug for MovePath<'tcx> { - fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result { write!(w, "MovePath {{")?; if let Some(parent) = self.parent { write!(w, " parent: {:?},", parent)?; @@ -129,7 +133,7 @@ impl<'tcx> fmt::Debug for MovePath<'tcx> { } impl<'tcx> fmt::Display for MovePath<'tcx> { - fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result { write!(w, "{:?}", self.place) } } @@ -144,7 +148,7 @@ pub struct MoveData<'tcx> { /// particular path being moved.) pub loc_map: LocationMap>, pub path_map: IndexVec>, - pub rev_lookup: MovePathLookup<'tcx>, + pub rev_lookup: MovePathLookup, pub inits: IndexVec, /// Each Location `l` is mapped to the Inits that are effects /// of executing the code at `l`. @@ -177,7 +181,7 @@ impl IndexMut for LocationMap { } impl LocationMap where T: Default + Clone { - fn new(mir: &Mir) -> Self { + fn new(mir: &Mir<'_>) -> Self { LocationMap { map: mir.basic_blocks().iter().map(|block| { vec![T::default(); block.statements.len()+1] @@ -201,7 +205,7 @@ pub struct MoveOut { } impl fmt::Debug for MoveOut { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "{:?}@{:?}", self.path, self.source) } } @@ -238,7 +242,7 @@ pub enum InitKind { } impl fmt::Debug for Init { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { write!(fmt, "{:?}@{:?} ({:?})", self.path, self.location, self.kind) } } @@ -254,7 +258,7 @@ impl Init { /// Tables mapping from a place to its MovePathIndex. #[derive(Debug)] -pub struct MovePathLookup<'tcx> { +pub struct MovePathLookup { locals: IndexVec, /// projections are made from a base-place and a projection @@ -263,7 +267,7 @@ pub struct MovePathLookup<'tcx> { /// subsequent search so that it is solely relative to that /// base-place). For the remaining lookup, we map the projection /// elem to the associated MovePathIndex. - projections: FxHashMap<(MovePathIndex, AbstractElem<'tcx>), MovePathIndex> + projections: FxHashMap<(MovePathIndex, AbstractElem), MovePathIndex> } mod builder; @@ -274,16 +278,15 @@ pub enum LookupResult { Parent(Option) } -impl<'tcx> MovePathLookup<'tcx> { +impl MovePathLookup { // Unlike the builder `fn move_path_for` below, this lookup // alternative will *not* create a MovePath on the fly for an // unknown place, but will rather return the nearest available // parent. pub fn find(&self, place: &Place<'tcx>) -> LookupResult { match *place { - Place::Local(local) => LookupResult::Exact(self.locals[local]), - Place::Promoted(_) | - Place::Static(..) => LookupResult::Parent(None), + Place::Base(PlaceBase::Local(local)) => LookupResult::Exact(self.locals[local]), + Place::Base(PlaceBase::Static(..)) => LookupResult::Parent(None), Place::Projection(ref proj) => { match self.find(&proj.base) { LookupResult::Exact(base_path) => { @@ -355,7 +358,7 @@ impl<'a, 'gcx, 'tcx> MoveData<'tcx> { pub fn base_local(&self, mut mpi: MovePathIndex) -> Option { loop { let path = &self.move_paths[mpi]; - if let Place::Local(l) = path.place { return Some(l); } + if let Place::Base(PlaceBase::Local(l)) = path.place { return Some(l); } if let Some(parent) = path.parent { mpi = parent; continue } else { return None } } } diff --git a/src/librustc_mir/diagnostics.rs b/src/librustc_mir/diagnostics.rs index ec5617d705248..c8836fe51931e 100644 --- a/src/librustc_mir/diagnostics.rs +++ b/src/librustc_mir/diagnostics.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_snake_case)] register_long_diagnostics! { @@ -335,11 +325,13 @@ match Some(42) { "##, E0162: r##" +#### Note: this error code is no longer emitted by the compiler. + An if-let pattern attempts to match the pattern, and enters the body if the match was successful. If the match is irrefutable (when it cannot fail to match), use a regular `let`-binding instead. For instance: -```compile_fail,E0162 +```compile_pass struct Irrefutable(i32); let irr = Irrefutable(0); @@ -362,11 +354,13 @@ println!("{}", x); "##, E0165: r##" +#### Note: this error code is no longer emitted by the compiler. + A while-let pattern attempts to match the pattern, and enters the body if the match was successful. If the match is irrefutable (when it cannot fail to match), use a regular `let`-binding inside a `loop` instead. For instance: -```compile_fail,E0165 +```compile_pass,no_run struct Irrefutable(i32); let irr = Irrefutable(0); @@ -579,7 +573,7 @@ const Y: i32 = A; ``` "##, -// FIXME(#24111) Change the language here when const fn stabilizes +// FIXME(#57563) Change the language here when const fn stabilizes E0015: r##" The only functions that can be called in static or constant expressions are `const` functions, and struct/enum constructors. `const` functions are only @@ -696,7 +690,7 @@ fn main() { } ``` -See also https://doc.rust-lang.org/book/first-edition/unsafe.html +See also https://doc.rust-lang.org/book/ch19-01-unsafe-rust.html "##, E0373: r##" @@ -879,7 +873,7 @@ that at most one writer or multiple readers can access the data at any one time. If you wish to learn more about ownership in Rust, start with the chapter in the Book: -https://doc.rust-lang.org/book/first-edition/ownership.html +https://doc.rust-lang.org/book/ch04-00-understanding-ownership.html "##, E0383: r##" @@ -1133,9 +1127,9 @@ A borrow of a constant containing interior mutability was attempted. Erroneous code example: ```compile_fail,E0492 -use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; +use std::sync::atomic::AtomicUsize; -const A: AtomicUsize = ATOMIC_USIZE_INIT; +const A: AtomicUsize = AtomicUsize::new(0); static B: &'static AtomicUsize = &A; // error: cannot borrow a constant which may contain interior mutability, // create a static instead @@ -1151,9 +1145,9 @@ explicitly a single memory location, which can be mutated at will. So, in order to solve this error, either use statics which are `Sync`: ``` -use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT}; +use std::sync::atomic::AtomicUsize; -static A: AtomicUsize = ATOMIC_USIZE_INIT; +static A: AtomicUsize = AtomicUsize::new(0); static B: &'static AtomicUsize = &A; // ok! ``` @@ -1213,7 +1207,7 @@ let mut a = &mut i; Please note that in rust, you can either have many immutable references, or one mutable reference. Take a look at -https://doc.rust-lang.org/stable/book/references-and-borrowing.html for more +https://doc.rust-lang.org/book/ch04-02-references-and-borrowing.html for more information. Example: @@ -1380,7 +1374,7 @@ fn foo(a: &mut i32) { ``` For more information on the rust ownership system, take a look at -https://doc.rust-lang.org/stable/book/references-and-borrowing.html. +https://doc.rust-lang.org/book/ch04-02-references-and-borrowing.html. "##, E0503: r##" @@ -1436,7 +1430,7 @@ fn main() { ``` You can find more information about borrowing in the rust-book: -http://doc.rust-lang.org/stable/book/references-and-borrowing.html +http://doc.rust-lang.org/book/ch04-02-references-and-borrowing.html "##, E0504: r##" @@ -1551,20 +1545,22 @@ Erroneous code example: ```compile_fail,E0505 struct Value {} +fn borrow(val: &Value) {} + fn eat(val: Value) {} fn main() { let x = Value{}; - { - let _ref_to_val: &Value = &x; - eat(x); - } + let _ref_to_val: &Value = &x; + eat(x); + borrow(_ref_to_val); } ``` -Here, the function `eat` takes the ownership of `x`. However, -`x` cannot be moved because it was borrowed to `_ref_to_val`. -To fix that you can do few different things: +Here, the function `eat` takes ownership of `x`. However, +`x` cannot be moved because the borrow to `_ref_to_val` +needs to last till the function `borrow`. +To fix that you can do a few different things: * Try to avoid moving the variable. * Release borrow before move. @@ -1575,14 +1571,15 @@ Examples: ``` struct Value {} +fn borrow(val: &Value) {} + fn eat(val: &Value) {} fn main() { let x = Value{}; - { - let _ref_to_val: &Value = &x; - eat(&x); // pass by reference, if it's possible - } + let _ref_to_val: &Value = &x; + eat(&x); // pass by reference, if it's possible + borrow(_ref_to_val); } ``` @@ -1591,12 +1588,15 @@ Or: ``` struct Value {} +fn borrow(val: &Value) {} + fn eat(val: Value) {} fn main() { let x = Value{}; { let _ref_to_val: &Value = &x; + borrow(_ref_to_val); } eat(x); // release borrow and then move it. } @@ -1608,19 +1608,20 @@ Or: #[derive(Clone, Copy)] // implement Copy trait struct Value {} +fn borrow(val: &Value) {} + fn eat(val: Value) {} fn main() { let x = Value{}; - { - let _ref_to_val: &Value = &x; - eat(x); // it will be copied here. - } + let _ref_to_val: &Value = &x; + eat(x); // it will be copied here. + borrow(_ref_to_val); } ``` You can find more information about borrowing in the rust-book: -http://doc.rust-lang.org/stable/book/references-and-borrowing.html +http://doc.rust-lang.org/book/ch04-02-references-and-borrowing.html "##, E0506: r##" @@ -1831,7 +1832,7 @@ mem::replace(&mut borrowed.knight, TheDarkKnight).nothing_is_true(); // ok! ``` You can find more information about borrowing in the rust-book: -http://doc.rust-lang.org/book/first-edition/references-and-borrowing.html +http://doc.rust-lang.org/book/ch04-02-references-and-borrowing.html "##, E0508: r##" @@ -1984,8 +1985,8 @@ could cause the match to be non-exhaustive: let mut x = Some(0); match x { None => (), - Some(v) if { x = None; false } => (), - Some(_) => (), // No longer matches + Some(_) if { x = None; false } => (), + Some(v) => (), // No longer matches } ``` @@ -2125,14 +2126,15 @@ This error occurs because a borrow in a generator persists across a yield point. ```compile_fail,E0626 -# #![feature(generators, generator_trait)] +# #![feature(generators, generator_trait, pin)] # use std::ops::Generator; +# use std::pin::Pin; let mut b = || { let a = &String::new(); // <-- This borrow... yield (); // ...is still in scope here, when the yield occurs. println!("{}", a); }; -unsafe { b.resume() }; +Pin::new(&mut b).resume(); ``` At present, it is not permitted to have a yield that occurs while a @@ -2143,14 +2145,15 @@ resolve the previous example by removing the borrow and just storing the integer by value: ``` -# #![feature(generators, generator_trait)] +# #![feature(generators, generator_trait, pin)] # use std::ops::Generator; +# use std::pin::Pin; let mut b = || { let a = 3; yield (); println!("{}", a); }; -unsafe { b.resume() }; +Pin::new(&mut b).resume(); ``` This is a very simple case, of course. In more complex cases, we may @@ -2160,37 +2163,40 @@ in those cases, something like the `Rc` or `Arc` types may be useful. This error also frequently arises with iteration: ```compile_fail,E0626 -# #![feature(generators, generator_trait)] +# #![feature(generators, generator_trait, pin)] # use std::ops::Generator; +# use std::pin::Pin; let mut b = || { let v = vec![1,2,3]; for &x in &v { // <-- borrow of `v` is still in scope... yield x; // ...when this yield occurs. } }; -unsafe { b.resume() }; +Pin::new(&mut b).resume(); ``` Such cases can sometimes be resolved by iterating "by value" (or using `into_iter()`) to avoid borrowing: ``` -# #![feature(generators, generator_trait)] +# #![feature(generators, generator_trait, pin)] # use std::ops::Generator; +# use std::pin::Pin; let mut b = || { let v = vec![1,2,3]; for x in v { // <-- Take ownership of the values instead! yield x; // <-- Now yield is OK. } }; -unsafe { b.resume() }; +Pin::new(&mut b).resume(); ``` If taking ownership is not an option, using indices can work too: ``` -# #![feature(generators, generator_trait)] +# #![feature(generators, generator_trait, pin)] # use std::ops::Generator; +# use std::pin::Pin; let mut b = || { let v = vec![1,2,3]; let len = v.len(); // (*) @@ -2199,7 +2205,7 @@ let mut b = || { yield x; // <-- Now yield is OK. } }; -unsafe { b.resume() }; +Pin::new(&mut b).resume(); // (*) -- Unfortunately, these temporaries are currently required. // See . @@ -2301,10 +2307,10 @@ let q = *p; ``` Here, the expression `&foo()` is borrowing the expression -`foo()`. As `foo()` is call to a function, and not the name of +`foo()`. As `foo()` is a call to a function, and not the name of a variable, this creates a **temporary** -- that temporary stores the return value from `foo()` so that it can be borrowed. -So you might imagine that `let p = bar(&foo())` is equivalent +You could imagine that `let p = bar(&foo());` is equivalent to this: ```compile_fail,E0597 @@ -2344,7 +2350,7 @@ local variable that already exists, and hence no temporary is created. Temporaries are not always dropped at the end of the enclosing statement. In simple cases where the `&` expression is immediately stored into a variable, the compiler will automatically extend -the lifetime of the temporary until the end of the enclosinb +the lifetime of the temporary until the end of the enclosing block. Therefore, an alternative way to fix the original program is to write `let tmp = &foo()` and not `let tmp = foo()`: @@ -2371,6 +2377,37 @@ let value = (&foo(), &foo()); ``` "##, +E0723: r##" +An feature unstable in `const` contexts was used. + +Erroneous code example: + +```compile_fail,E0723 +trait T {} + +impl T for () {} + +const fn foo() -> impl T { // error: `impl Trait` in const fn is unstable + () +} +``` + +To enable this feature on a nightly version of rustc, add the `const_fn` +feature flag: + +``` +#![feature(const_fn)] + +trait T {} + +impl T for () {} + +const fn foo() -> impl T { + () +} +``` +"##, + } register_diagnostics! { diff --git a/src/librustc_mir/hair/constant.rs b/src/librustc_mir/hair/constant.rs index c98ef31c2bae2..caadc6055b5c6 100644 --- a/src/librustc_mir/hair/constant.rs +++ b/src/librustc_mir/hair/constant.rs @@ -14,15 +14,14 @@ crate fn lit_to_const<'a, 'gcx, 'tcx>( tcx: TyCtxt<'a, 'gcx, 'tcx>, ty: Ty<'tcx>, neg: bool, -) -> Result<&'tcx ty::Const<'tcx>, LitToConstError> { +) -> Result, LitToConstError> { use syntax::ast::*; let trunc = |n| { let param_ty = ParamEnv::reveal_all().and(tcx.lift_to_global(&ty).unwrap()); let width = tcx.layout_of(param_ty).map_err(|_| LitToConstError::Reported)?.size; trace!("trunc {} with size {} and shift {}", n, width.bits(), 128 - width.bits()); - let shift = 128 - width.bits(); - let result = (n << shift) >> shift; + let result = truncate(n, width); trace!("trunc result: {}", result); Ok(ConstValue::Scalar(Scalar::Bits { bits: result, @@ -35,7 +34,15 @@ crate fn lit_to_const<'a, 'gcx, 'tcx>( LitKind::Str(ref s, _) => { let s = s.as_str(); let id = tcx.allocate_bytes(s.as_bytes()); - ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64, &tcx) + ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64) + }, + LitKind::Err(ref s) => { + let s = s.as_str(); + let id = tcx.allocate_bytes(s.as_bytes()); + return Ok(ty::Const { + val: ConstValue::new_slice(Scalar::Ptr(id.into()), s.len() as u64), + ty: tcx.types.err, + }); }, LitKind::ByteStr(ref data) => { let id = tcx.allocate_bytes(data); @@ -64,7 +71,7 @@ crate fn lit_to_const<'a, 'gcx, 'tcx>( LitKind::Bool(b) => ConstValue::Scalar(Scalar::from_bool(b)), LitKind::Char(c) => ConstValue::Scalar(Scalar::from_char(c)), }; - Ok(ty::Const::from_const_value(tcx, lit, ty)) + Ok(ty::Const { val: lit, ty }) } fn parse_float<'tcx>( diff --git a/src/librustc_mir/hair/cx/block.rs b/src/librustc_mir/hair/cx/block.rs index beb035d82b1c8..17fab6c5ddcff 100644 --- a/src/librustc_mir/hair/cx/block.rs +++ b/src/librustc_mir/hair/cx/block.rs @@ -1,18 +1,9 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hair::*; -use hair::cx::Cx; -use hair::cx::to_ref::ToRef; +use crate::hair::{self, *}; +use crate::hair::cx::Cx; +use crate::hair::cx::to_ref::ToRef; use rustc::middle::region; use rustc::hir; +use rustc::ty; use rustc_data_structures::indexed_vec::Idx; @@ -39,7 +30,7 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Block { hir::BlockCheckMode::DefaultBlock => BlockSafety::Safe, hir::BlockCheckMode::UnsafeBlock(..) => - BlockSafety::ExplicitUnsafe(self.id), + BlockSafety::ExplicitUnsafe(self.hir_id), hir::BlockCheckMode::PushUnsafeBlock(..) => BlockSafety::PushUnsafe, hir::BlockCheckMode::PopUnsafeBlock(..) => @@ -55,12 +46,12 @@ fn mirror_stmts<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, -> Vec> { let mut result = vec![]; for (index, stmt) in stmts.iter().enumerate() { - let hir_id = cx.tcx.hir().node_to_hir_id(stmt.node.id()); + let hir_id = stmt.hir_id; let opt_dxn_ext = cx.region_scope_tree.opt_destruction_scope(hir_id.local_id); - let stmt_span = StatementSpan(cx.tcx.hir().span(stmt.node.id())); + let stmt_span = StatementSpan(cx.tcx.hir().span_by_hir_id(hir_id)); match stmt.node { - hir::StmtKind::Expr(ref expr, _) | - hir::StmtKind::Semi(ref expr, _) => { + hir::StmtKind::Expr(ref expr) | + hir::StmtKind::Semi(ref expr) => { result.push(StmtRef::Mirror(Box::new(Stmt { kind: StmtKind::Expr { scope: region::Scope { @@ -73,50 +64,50 @@ fn mirror_stmts<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, span: stmt_span, }))) } - hir::StmtKind::Decl(ref decl, _) => { - match decl.node { - hir::DeclKind::Item(..) => { - // ignore for purposes of the MIR - } - hir::DeclKind::Local(ref local) => { - let remainder_scope = region::Scope { - id: block_id, - data: region::ScopeData::Remainder( - region::FirstStatementIndex::new(index)), - }; - - let mut pattern = cx.pattern_from_hir(&local.pat); + hir::StmtKind::Item(..) => { + // ignore for purposes of the MIR + } + hir::StmtKind::Local(ref local) => { + let remainder_scope = region::Scope { + id: block_id, + data: region::ScopeData::Remainder( + region::FirstStatementIndex::new(index)), + }; - if let Some(ty) = &local.ty { - if let Some(&user_ty) = cx.tables.user_provided_tys().get(ty.hir_id) { - pattern = Pattern { - ty: pattern.ty, - span: pattern.span, - kind: Box::new(PatternKind::AscribeUserType { - user_ty: PatternTypeProjection::from_canonical_ty(user_ty), - user_ty_span: ty.span, - subpattern: pattern - }) - }; - } - } + let mut pattern = cx.pattern_from_hir(&local.pat); - result.push(StmtRef::Mirror(Box::new(Stmt { - kind: StmtKind::Let { - remainder_scope: remainder_scope, - init_scope: region::Scope { - id: hir_id.local_id, - data: region::ScopeData::Node + if let Some(ty) = &local.ty { + if let Some(&user_ty) = cx.tables.user_provided_types().get(ty.hir_id) { + debug!("mirror_stmts: user_ty={:?}", user_ty); + pattern = Pattern { + ty: pattern.ty, + span: pattern.span, + kind: Box::new(PatternKind::AscribeUserType { + ascription: hair::pattern::Ascription { + user_ty: PatternTypeProjection::from_user_type(user_ty), + user_ty_span: ty.span, + variance: ty::Variance::Covariant, }, - pattern, - initializer: local.init.to_ref(), - lint_level: cx.lint_level_of(local.id), - }, - opt_destruction_scope: opt_dxn_ext, - span: stmt_span, - }))); + subpattern: pattern, + }) + }; } } + + result.push(StmtRef::Mirror(Box::new(Stmt { + kind: StmtKind::Let { + remainder_scope: remainder_scope, + init_scope: region::Scope { + id: hir_id.local_id, + data: region::ScopeData::Node + }, + pattern, + initializer: local.init.to_ref(), + lint_level: LintLevel::Explicit(local.hir_id), + }, + opt_destruction_scope: opt_dxn_ext, + span: stmt_span, + }))); } } } @@ -126,7 +117,7 @@ fn mirror_stmts<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, pub fn to_expr_ref<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, block: &'tcx hir::Block) -> ExprRef<'tcx> { - let block_ty = cx.tables().node_id_to_type(block.hir_id); + let block_ty = cx.tables().node_type(block.hir_id); let temp_lifetime = cx.region_scope_tree.temporary_scope(block.hir_id.local_id); let expr = Expr { ty: block_ty, diff --git a/src/librustc_mir/hair/cx/expr.rs b/src/librustc_mir/hair/cx/expr.rs index a1471adac6050..91113dc2271be 100644 --- a/src/librustc_mir/hair/cx/expr.rs +++ b/src/librustc_mir/hair/cx/expr.rs @@ -1,27 +1,17 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hair::*; +use crate::hair::*; +use crate::hair::cx::Cx; +use crate::hair::cx::block; +use crate::hair::cx::to_ref::ToRef; +use crate::hair::util::UserAnnotatedTyHelpers; use rustc_data_structures::indexed_vec::Idx; -use hair::cx::Cx; -use hair::cx::block; -use hair::cx::to_ref::ToRef; -use hair::util::UserAnnotatedTyHelpers; -use rustc::hir::def::{Def, CtorKind}; -use rustc::mir::interpret::{GlobalId, ErrorHandled}; +use rustc::hir::def::{CtorOf, Def, CtorKind}; +use rustc::mir::interpret::{GlobalId, ErrorHandled, ConstValue}; use rustc::ty::{self, AdtKind, Ty}; use rustc::ty::adjustment::{Adjustment, Adjust, AutoBorrow, AutoBorrowMutability}; -use rustc::ty::cast::CastKind as TyCastKind; +use rustc::ty::subst::{InternalSubsts, SubstsRef}; use rustc::hir; use rustc::hir::def_id::LocalDefId; -use rustc::mir::{BorrowKind}; +use rustc::mir::BorrowKind; use syntax_pos::Span; impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { @@ -34,7 +24,7 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { data: region::ScopeData::Node }; - debug!("Expr::make_mirror(): id={}, span={:?}", self.id, self.span); + debug!("Expr::make_mirror(): id={}, span={:?}", self.hir_id, self.span); let mut expr = make_mirror_unadjusted(cx, self); @@ -54,7 +44,7 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr { kind: ExprKind::Scope { region_scope: expr_scope, value: expr.to_ref(), - lint_level: cx.lint_level_of(self.id), + lint_level: LintLevel::Explicit(self.hir_id), }, }; @@ -91,14 +81,14 @@ fn apply_adjustment<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, Adjust::UnsafeFnPointer => { ExprKind::UnsafeFnPointer { source: expr.to_ref() } } - Adjust::ClosureFnPointer => { - ExprKind::ClosureFnPointer { source: expr.to_ref() } + Adjust::ClosureFnPointer(unsafety) => { + ExprKind::ClosureFnPointer { source: expr.to_ref(), unsafety } } Adjust::NeverToAny => { ExprKind::NeverToAny { source: expr.to_ref() } } Adjust::MutToConstPointer => { - ExprKind::Cast { source: expr.to_ref() } + ExprKind::MutToConstPointer { source: expr.to_ref() } } Adjust::Deref(None) => { // Adjust the span from the block, to the last expression of the @@ -134,7 +124,6 @@ fn apply_adjustment<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, }), span, kind: ExprKind::Borrow { - region: deref.region, borrow_kind: deref.mutbl.to_borrow_kind(), arg: expr.to_ref(), }, @@ -142,32 +131,24 @@ fn apply_adjustment<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, overloaded_place(cx, hir_expr, adjustment.target, Some(call), vec![expr.to_ref()]) } - Adjust::Borrow(AutoBorrow::Ref(r, m)) => { + Adjust::Borrow(AutoBorrow::Ref(_, m)) => { ExprKind::Borrow { - region: r, borrow_kind: m.to_borrow_kind(), arg: expr.to_ref(), } } Adjust::Borrow(AutoBorrow::RawPtr(m)) => { // Convert this to a suitable `&foo` and - // then an unsafe coercion. Limit the region to be just this - // expression. - let region = ty::ReScope(region::Scope { - id: hir_expr.hir_id.local_id, - data: region::ScopeData::Node - }); - let region = cx.tcx.mk_region(region); + // then an unsafe coercion. expr = Expr { temp_lifetime, - ty: cx.tcx.mk_ref(region, + ty: cx.tcx.mk_ref(cx.tcx.types.re_erased, ty::TypeAndMut { ty: expr.ty, mutbl: m, }), span, kind: ExprKind::Borrow { - region, borrow_kind: m.to_borrow_kind(), arg: expr.to_ref(), }, @@ -280,10 +261,8 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, // Tuple-like ADTs are represented as ExprKind::Call. We convert them here. expr_ty.ty_adt_def().and_then(|adt_def| { match path.def { - Def::VariantCtor(variant_id, CtorKind::Fn) => { - Some((adt_def, adt_def.variant_index_with_id(variant_id))) - } - Def::StructCtor(_, CtorKind::Fn) | + Def::Ctor(ctor_id, _, CtorKind::Fn) => + Some((adt_def, adt_def.variant_index_with_ctor_id(ctor_id))), Def::SelfCtor(..) => Some((adt_def, VariantIdx::new(0))), _ => None, } @@ -293,9 +272,16 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, }; if let Some((adt_def, index)) = adt_data { let substs = cx.tables().node_substs(fun.hir_id); - - let user_ty = cx.tables().user_substs(fun.hir_id) - .map(|user_substs| UserTypeAnnotation::TypeOf(adt_def.did, user_substs)); + let user_provided_types = cx.tables().user_provided_types(); + let user_ty = user_provided_types.get(fun.hir_id) + .map(|u_ty| *u_ty) + .map(|mut u_ty| { + if let UserType::TypeOf(ref mut did, _) = &mut u_ty.value { + *did = adt_def.did; + } + u_ty + }); + debug!("make_mirror_unadjusted: (call) user_ty={:?}", user_ty); let field_refs = args.iter() .enumerate() @@ -316,7 +302,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, } } else { ExprKind::Call { - ty: cx.tables().node_id_to_type(fun.hir_id), + ty: cx.tables().node_type(fun.hir_id), fun: fun.to_ref(), args: args.to_ref(), from_hir_call: true, @@ -326,12 +312,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, } hir::ExprKind::AddrOf(mutbl, ref expr) => { - let region = match expr_ty.sty { - ty::Ref(r, _, _) => r, - _ => span_bug!(expr.span, "type of & not region"), - }; ExprKind::Borrow { - region, borrow_kind: mutbl.to_borrow_kind(), arg: expr.to_ref(), } @@ -359,7 +340,9 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, } hir::ExprKind::Lit(ref lit) => ExprKind::Literal { - literal: cx.const_eval_literal(&lit.node, expr_ty, lit.span, false), + literal: cx.tcx.mk_const( + cx.const_eval_literal(&lit.node, expr_ty, lit.span, false) + ), user_ty: None, }, @@ -372,6 +355,10 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, // FIXME(eddyb) use logical ops in constants when // they can handle that kind of control-flow. (hir::BinOpKind::And, hir::Constness::Const) => { + cx.control_flow_destroyed.push(( + op.span, + "`&&` operator".into(), + )); ExprKind::Binary { op: BinOp::BitAnd, lhs: lhs.to_ref(), @@ -379,6 +366,10 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, } } (hir::BinOpKind::Or, hir::Constness::Const) => { + cx.control_flow_destroyed.push(( + op.span, + "`||` operator".into(), + )); ExprKind::Binary { op: BinOp::BitOr, lhs: lhs.to_ref(), @@ -449,7 +440,9 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, } else { if let hir::ExprKind::Lit(ref lit) = arg.node { ExprKind::Literal { - literal: cx.const_eval_literal(&lit.node, expr_ty, lit.span, true), + literal: cx.tcx.mk_const( + cx.const_eval_literal(&lit.node, expr_ty, lit.span, true) + ), user_ty: None, } } else { @@ -466,11 +459,14 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, ty::Adt(adt, substs) => { match adt.adt_kind() { AdtKind::Struct | AdtKind::Union => { + let user_provided_types = cx.tables().user_provided_types(); + let user_ty = user_provided_types.get(expr.hir_id).map(|u_ty| *u_ty); + debug!("make_mirror_unadjusted: (struct/union) user_ty={:?}", user_ty); ExprKind::Adt { adt_def: adt, variant_index: VariantIdx::new(0), substs, - user_ty: cx.user_substs_applied_to_adt(expr.hir_id, adt), + user_ty, fields: field_refs(cx, fields), base: base.as_ref().map(|base| { FruInfo { @@ -483,20 +479,24 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, } } AdtKind::Enum => { - let def = match *qpath { - hir::QPath::Resolved(_, ref path) => path.def, - hir::QPath::TypeRelative(..) => Def::Err, - }; + let def = cx.tables().qpath_def(qpath, expr.hir_id); match def { Def::Variant(variant_id) => { assert!(base.is_none()); let index = adt.variant_index_with_id(variant_id); + let user_provided_types = cx.tables().user_provided_types(); + let user_ty = user_provided_types.get(expr.hir_id) + .map(|u_ty| *u_ty); + debug!( + "make_mirror_unadjusted: (variant) user_ty={:?}", + user_ty + ); ExprKind::Adt { adt_def: adt, variant_index: index, substs, - user_ty: cx.user_substs_applied_to_adt(expr.hir_id, adt), + user_ty, fields: field_refs(cx, fields), base: None, } @@ -527,7 +527,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, span_bug!(expr.span, "closure expr w/o closure type: {:?}", closure_ty); } }; - let upvars = cx.tcx.with_freevars(expr.id, |freevars| { + let upvars = cx.tcx.with_freevars(expr.hir_id, |freevars| { freevars.iter() .zip(substs.upvar_tys(def_id, cx.tcx)) .map(|(fv, ty)| capture_freevar(cx, expr, fv, ty)) @@ -556,8 +556,8 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, // Now comes the rote stuff: hir::ExprKind::Repeat(ref v, ref count) => { - let def_id = cx.tcx.hir().local_def_id(count.id); - let substs = Substs::identity_for_item(cx.tcx.global_tcx(), def_id); + let def_id = cx.tcx.hir().local_def_id_from_hir_id(count.hir_id); + let substs = InternalSubsts::identity_for_item(cx.tcx.global_tcx(), def_id); let instance = ty::Instance::resolve( cx.tcx.global_tcx(), cx.param_env, @@ -588,7 +588,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, match dest.target_id { Ok(target_id) => ExprKind::Break { label: region::Scope { - id: cx.tcx.hir().node_to_hir_id(target_id).local_id, + id: target_id.local_id, data: region::ScopeData::Node }, value: value.to_ref(), @@ -600,7 +600,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, match dest.target_id { Ok(loop_id) => ExprKind::Continue { label: region::Scope { - id: cx.tcx.hir().node_to_hir_id(loop_id).local_id, + id: loop_id.local_id, data: region::ScopeData::Node }, }, @@ -609,7 +609,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, } hir::ExprKind::Match(ref discr, ref arms, _) => { ExprKind::Match { - discriminant: discr.to_ref(), + scrutinee: discr.to_ref(), arms: arms.iter().map(|a| convert_arm(cx, a)).collect(), } } @@ -635,13 +635,13 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, hir::ExprKind::Field(ref source, ..) => { ExprKind::Field { lhs: source.to_ref(), - name: Field::new(cx.tcx.field_index(expr.id, cx.tables)), + name: Field::new(cx.tcx.field_index(expr.hir_id, cx.tables)), } } hir::ExprKind::Cast(ref source, ref cast_ty) => { // Check for a user-given type annotation on this `cast` - let user_ty = cx.tables.user_provided_tys().get(cast_ty.hir_id) - .map(|&t| UserTypeAnnotation::Ty(t)); + let user_provided_types = cx.tables.user_provided_types(); + let user_ty = user_provided_types.get(cast_ty.hir_id); debug!( "cast({:?}) has ty w/ hir_id {:?} and user provided ty {:?}", @@ -652,11 +652,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, // Check to see if this cast is a "coercion cast", where the cast is actually done // using a coercion (or is a no-op). - let cast = if let Some(&TyCastKind::CoercionCast) = - cx.tables() - .cast_kinds() - .get(source.hir_id) - { + let cast = if cx.tables().is_coercion_cast(source.hir_id) { // Convert the lexpr to a vexpr. ExprKind::Use { source: source.to_ref() } } else { @@ -675,12 +671,12 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, let def = cx.tables().qpath_def(qpath, source.hir_id); cx .tables() - .node_id_to_type(source.hir_id) + .node_type(source.hir_id) .ty_adt_def() .and_then(|adt_def| { match def { - Def::VariantCtor(variant_id, CtorKind::Const) => { - let idx = adt_def.variant_index_with_id(variant_id); + Def::Ctor(variant_ctor_id, CtorOf::Variant, CtorKind::Const) => { + let idx = adt_def.variant_index_with_ctor_id(variant_ctor_id); let (d, o) = adt_def.discriminant_def_for_variant(idx); use rustc::ty::util::IntTypeExt; let ty = adt_def.repr.discr_type(); @@ -699,7 +695,10 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, temp_lifetime, ty: var_ty, span: expr.span, - kind: ExprKind::Literal { literal, user_ty: None }, + kind: ExprKind::Literal { + literal: cx.tcx.mk_const(literal), + user_ty: None + }, }.to_ref(); let offset = mk_const(ty::Const::from_bits( cx.tcx, @@ -710,13 +709,11 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, Some(did) => { // in case we are offsetting from a computed discriminant // and not the beginning of discriminants (which is always `0`) - let substs = Substs::identity_for_item(cx.tcx(), did); - let lhs = mk_const(ty::Const::unevaluated( - cx.tcx(), - did, - substs, - var_ty, - )); + let substs = InternalSubsts::identity_for_item(cx.tcx(), did); + let lhs = mk_const(ty::Const { + val: ConstValue::Unevaluated(did, substs), + ty: var_ty, + }); let bin = ExprKind::Binary { op: BinOp::Add, lhs, @@ -747,20 +744,20 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, span: expr.span, kind: cast, }; + debug!("make_mirror_unadjusted: (cast) user_ty={:?}", user_ty); ExprKind::ValueTypeAscription { source: cast_expr.to_ref(), - user_ty: Some(user_ty), + user_ty: Some(*user_ty), } } else { cast } } hir::ExprKind::Type(ref source, ref ty) => { - let user_provided_tys = cx.tables.user_provided_tys(); - let user_ty = user_provided_tys - .get(ty.hir_id) - .map(|&c_ty| UserTypeAnnotation::Ty(c_ty)); + let user_provided_types = cx.tables.user_provided_types(); + let user_ty = user_provided_types.get(ty.hir_id).map(|u_ty| *u_ty); + debug!("make_mirror_unadjusted: (type) user_ty={:?}", user_ty); if source.is_place_expr() { ExprKind::PlaceTypeAscription { source: source.to_ref(), @@ -782,6 +779,7 @@ fn make_mirror_unadjusted<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, hir::ExprKind::Tup(ref fields) => ExprKind::Tuple { fields: fields.to_ref() }, hir::ExprKind::Yield(ref v) => ExprKind::Yield { value: v.to_ref() }, + hir::ExprKind::Err => unreachable!(), }; Expr { @@ -796,25 +794,23 @@ fn user_substs_applied_to_def( cx: &mut Cx<'a, 'gcx, 'tcx>, hir_id: hir::HirId, def: &Def, -) -> Option> { - match def { +) -> Option> { + debug!("user_substs_applied_to_def: def={:?}", def); + let user_provided_type = match def { // A reference to something callable -- e.g., a fn, method, or // a tuple-struct or tuple-variant. This has the type of a // `Fn` but with the user-given substitutions. Def::Fn(_) | Def::Method(_) | - Def::StructCtor(_, CtorKind::Fn) | - Def::VariantCtor(_, CtorKind::Fn) | + Def::Ctor(_, _, CtorKind::Fn) | Def::Const(_) | - Def::AssociatedConst(_) => - Some(UserTypeAnnotation::TypeOf(def.def_id(), cx.tables().user_substs(hir_id)?)), + Def::AssociatedConst(_) => cx.tables().user_provided_types().get(hir_id).map(|u_ty| *u_ty), // A unit struct/variant which is used as a value (e.g., // `None`). This has the type of the enum/struct that defines // this variant -- but with the substitutions given by the // user. - Def::StructCtor(_def_id, CtorKind::Const) | - Def::VariantCtor(_def_id, CtorKind::Const) => + Def::Ctor(_, _, CtorKind::Const) => cx.user_substs_applied_to_ty_of_hir_id(hir_id), // `Self` is used in expression as a tuple struct constructor or an unit struct constructor @@ -823,26 +819,27 @@ fn user_substs_applied_to_def( _ => bug!("user_substs_applied_to_def: unexpected def {:?} at {:?}", def, hir_id) - } + }; + debug!("user_substs_applied_to_def: user_provided_type={:?}", user_provided_type); + user_provided_type } fn method_callee<'a, 'gcx, 'tcx>( cx: &mut Cx<'a, 'gcx, 'tcx>, expr: &hir::Expr, span: Span, - overloaded_callee: Option<(DefId, &'tcx Substs<'tcx>)>, + overloaded_callee: Option<(DefId, SubstsRef<'tcx>)>, ) -> Expr<'tcx> { let temp_lifetime = cx.region_scope_tree.temporary_scope(expr.hir_id.local_id); let (def_id, substs, user_ty) = match overloaded_callee { Some((def_id, substs)) => (def_id, substs, None), None => { - let type_dependent_defs = cx.tables().type_dependent_defs(); - let def = type_dependent_defs - .get(expr.hir_id) + let def = cx.tables().type_dependent_def(expr.hir_id) .unwrap_or_else(|| { span_bug!(expr.span, "no type-dependent def for method callee") }); - let user_ty = user_substs_applied_to_def(cx, expr.hir_id, def); + let user_ty = user_substs_applied_to_def(cx, expr.hir_id, &def); + debug!("method_callee: user_ty={:?}", user_ty); (def.def_id(), cx.tables().node_substs(expr.hir_id), user_ty) } }; @@ -852,7 +849,9 @@ fn method_callee<'a, 'gcx, 'tcx>( ty, span, kind: ExprKind::Literal { - literal: ty::Const::zero_sized(cx.tcx(), ty), + literal: cx.tcx().mk_const( + ty::Const::zero_sized(ty) + ), user_ty, }, } @@ -906,49 +905,69 @@ fn convert_path_expr<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, // A regular function, constructor function or a constant. Def::Fn(_) | Def::Method(_) | - Def::StructCtor(_, CtorKind::Fn) | - Def::VariantCtor(_, CtorKind::Fn) | + Def::Ctor(_, _, CtorKind::Fn) | Def::SelfCtor(..) => { let user_ty = user_substs_applied_to_def(cx, expr.hir_id, &def); + debug!("convert_path_expr: user_ty={:?}", user_ty); ExprKind::Literal { - literal: ty::Const::zero_sized( - cx.tcx, - cx.tables().node_id_to_type(expr.hir_id), - ), + literal: cx.tcx.mk_const(ty::Const::zero_sized( + cx.tables().node_type(expr.hir_id), + )), user_ty, } - }, + } + + Def::ConstParam(def_id) => { + let node_id = cx.tcx.hir().as_local_node_id(def_id).unwrap(); + let item_id = cx.tcx.hir().get_parent_node(node_id); + let item_def_id = cx.tcx.hir().local_def_id(item_id); + let generics = cx.tcx.generics_of(item_def_id); + let index = generics.param_def_id_to_index[&cx.tcx.hir().local_def_id(node_id)]; + let name = cx.tcx.hir().name(node_id).as_interned_str(); + let val = ConstValue::Param(ty::ParamConst::new(index, name)); + ExprKind::Literal { + literal: cx.tcx.mk_const( + ty::Const { + val, + ty: cx.tables().node_type(expr.hir_id), + } + ), + user_ty: None, + } + } Def::Const(def_id) | Def::AssociatedConst(def_id) => { let user_ty = user_substs_applied_to_def(cx, expr.hir_id, &def); + debug!("convert_path_expr: (const) user_ty={:?}", user_ty); ExprKind::Literal { - literal: ty::Const::unevaluated( - cx.tcx, - def_id, - substs, - cx.tables().node_id_to_type(expr.hir_id), - ), + literal: cx.tcx.mk_const(ty::Const { + val: ConstValue::Unevaluated(def_id, substs), + ty: cx.tcx.type_of(def_id), + }), user_ty, } }, - Def::StructCtor(def_id, CtorKind::Const) | - Def::VariantCtor(def_id, CtorKind::Const) => { - match cx.tables().node_id_to_type(expr.hir_id).sty { + Def::Ctor(def_id, _, CtorKind::Const) => { + let user_provided_types = cx.tables.user_provided_types(); + let user_provided_type = user_provided_types.get(expr.hir_id).map(|u_ty| *u_ty); + debug!("convert_path_expr: user_provided_type={:?}", user_provided_type); + let ty = cx.tables().node_type(expr.hir_id); + match ty.sty { // A unit struct/variant which is used as a value. // We return a completely different ExprKind here to account for this special case. ty::Adt(adt_def, substs) => { ExprKind::Adt { adt_def, - variant_index: adt_def.variant_index_with_id(def_id), + variant_index: adt_def.variant_index_with_ctor_id(def_id), substs, - user_ty: cx.user_substs_applied_to_adt(expr.hir_id, adt_def), + user_ty: user_provided_type, fields: vec![], base: None, } } - ref sty => bug!("unexpected sty: {:?}", sty), + _ => bug!("unexpected ty: {:?}", ty), } } @@ -967,7 +986,7 @@ fn convert_var<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, let temp_lifetime = cx.region_scope_tree.temporary_scope(expr.hir_id.local_id); match def { - Def::Local(id) => ExprKind::VarRef { id }, + Def::Local(id) => ExprKind::VarRef { id: cx.tcx.hir().node_to_hir_id(id) }, Def::Upvar(var_id, index, closure_expr_id) => { debug!("convert_var(upvar({:?}, {:?}, {:?}))", @@ -975,11 +994,11 @@ fn convert_var<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, index, closure_expr_id); let var_hir_id = cx.tcx.hir().node_to_hir_id(var_id); - let var_ty = cx.tables().node_id_to_type(var_hir_id); + let var_ty = cx.tables().node_type(var_hir_id); // FIXME free regions in closures are not right let closure_ty = cx.tables() - .node_id_to_type(cx.tcx.hir().node_to_hir_id(closure_expr_id)); + .node_type(cx.tcx.hir().node_to_hir_id(closure_expr_id)); // FIXME we're just hard-coding the idea that the // signature will be &self or &mut self and hence will @@ -1127,7 +1146,7 @@ fn overloaded_place<'a, 'gcx, 'tcx>( cx: &mut Cx<'a, 'gcx, 'tcx>, expr: &'tcx hir::Expr, place_ty: Ty<'tcx>, - overloaded_callee: Option<(DefId, &'tcx Substs<'tcx>)>, + overloaded_callee: Option<(DefId, SubstsRef<'tcx>)>, args: Vec>, ) -> ExprKind<'tcx> { // For an overloaded *x or x[y] expression of type T, the method @@ -1179,11 +1198,11 @@ fn capture_freevar<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, let var_hir_id = cx.tcx.hir().node_to_hir_id(freevar.var_id()); let upvar_id = ty::UpvarId { var_path: ty::UpvarPath { hir_id: var_hir_id }, - closure_expr_id: cx.tcx.hir().local_def_id(closure_expr.id).to_local(), + closure_expr_id: cx.tcx.hir().local_def_id_from_hir_id(closure_expr.hir_id).to_local(), }; let upvar_capture = cx.tables().upvar_capture(upvar_id); let temp_lifetime = cx.region_scope_tree.temporary_scope(closure_expr.hir_id.local_id); - let var_ty = cx.tables().node_id_to_type(var_hir_id); + let var_ty = cx.tables().node_type(var_hir_id); let captured_var = Expr { temp_lifetime, ty: var_ty, @@ -1203,7 +1222,6 @@ fn capture_freevar<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, ty: freevar_ty, span: closure_expr.span, kind: ExprKind::Borrow { - region: upvar_borrow.region, borrow_kind, arg: captured_var.to_ref(), }, @@ -1219,7 +1237,7 @@ fn field_refs<'a, 'gcx, 'tcx>(cx: &mut Cx<'a, 'gcx, 'tcx>, fields.iter() .map(|field| { FieldExprRef { - name: Field::new(cx.tcx.field_index(field.id, cx.tables)), + name: Field::new(cx.tcx.field_index(field.hir_id, cx.tables)), expr: field.expr.to_ref(), } }) diff --git a/src/librustc_mir/hair/cx/mod.rs b/src/librustc_mir/hair/cx/mod.rs index a0b2d99dfd305..71c6489d63f0d 100644 --- a/src/librustc_mir/hair/cx/mod.rs +++ b/src/librustc_mir/hair/cx/mod.rs @@ -1,48 +1,36 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This module contains the code to convert from the wacky tcx data -//! structures into the hair. The `builder` is generally ignorant of -//! the tcx etc, and instead goes through the `Cx` for most of its -//! work. -//! - -use hair::*; -use hair::util::UserAnnotatedTyHelpers; +//! This module contains the fcuntaiontliy to convert from the wacky tcx data +//! structures into the HAIR. The `builder` is generally ignorant of the tcx, +//! etc., and instead goes through the `Cx` for most of its work. + +use crate::hair::*; +use crate::hair::util::UserAnnotatedTyHelpers; use rustc_data_structures::indexed_vec::Idx; -use rustc::hir::def_id::{DefId, LOCAL_CRATE}; +use rustc::hir::def_id::DefId; use rustc::hir::Node; use rustc::middle::region; use rustc::infer::InferCtxt; use rustc::ty::subst::Subst; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::subst::{Kind, Substs}; +use rustc::ty::subst::{Kind, InternalSubsts}; use rustc::ty::layout::VariantIdx; use syntax::ast; use syntax::attr; use syntax::symbol::Symbol; use rustc::hir; use rustc_data_structures::sync::Lrc; -use hair::constant::{lit_to_const, LitToConstError}; +use crate::hair::constant::{lit_to_const, LitToConstError}; #[derive(Clone)] pub struct Cx<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { tcx: TyCtxt<'a, 'gcx, 'tcx>, infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, - pub root_lint_level: ast::NodeId, + pub root_lint_level: hir::HirId, pub param_env: ty::ParamEnv<'gcx>, - /// Identity `Substs` for use with const-evaluation. - pub identity_substs: &'gcx Substs<'gcx>, + /// Identity `InternalSubsts` for use with const-evaluation. + pub identity_substs: &'gcx InternalSubsts<'gcx>, pub region_scope_tree: Lrc, pub tables: &'a ty::TypeckTables<'gcx>, @@ -54,24 +42,28 @@ pub struct Cx<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { /// What kind of body is being compiled. pub body_owner_kind: hir::BodyOwnerKind, - /// True if this constant/function needs overflow checks. + /// Whether this constant/function needs overflow checks. check_overflow: bool, + + /// See field with the same name on `Mir`. + control_flow_destroyed: Vec<(Span, String)>, } impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, - src_id: ast::NodeId) -> Cx<'a, 'gcx, 'tcx> { + src_id: hir::HirId) -> Cx<'a, 'gcx, 'tcx> { let tcx = infcx.tcx; - let src_def_id = tcx.hir().local_def_id(src_id); - let body_owner_kind = tcx.hir().body_owner_kind(src_id); + let src_def_id = tcx.hir().local_def_id_from_hir_id(src_id); + let body_owner_kind = tcx.hir().body_owner_kind_by_hir_id(src_id); let constness = match body_owner_kind { hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_) => hir::Constness::Const, + hir::BodyOwnerKind::Closure | hir::BodyOwnerKind::Fn => hir::Constness::NotConst, }; - let attrs = tcx.hir().attrs(src_id); + let attrs = tcx.hir().attrs_by_hir_id(src_id); // Some functions always have overflow checks enabled, // however, they may not get codegen'd, depending on @@ -84,25 +76,28 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { // Constants always need overflow checks. check_overflow |= constness == hir::Constness::Const; - let lint_level = lint_level_for_hir_id(tcx, src_id); Cx { tcx, infcx, - root_lint_level: lint_level, + root_lint_level: src_id, param_env: tcx.param_env(src_def_id), - identity_substs: Substs::identity_for_item(tcx.global_tcx(), src_def_id), + identity_substs: InternalSubsts::identity_for_item(tcx.global_tcx(), src_def_id), region_scope_tree: tcx.region_scope_tree(src_def_id), tables: tcx.typeck_tables_of(src_def_id), constness, body_owner_kind, check_overflow, + control_flow_destroyed: Vec::new(), } } + pub fn control_flow_destroyed(self) -> Vec<(Span, String)> { + self.control_flow_destroyed + } } impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { - /// Normalizes `ast` into the appropriate `mirror` type. + /// Normalizes `ast` into the appropriate "mirror" type. pub fn mirror>(&mut self, ast: M) -> M::Output { ast.make_mirror(self) } @@ -112,7 +107,7 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { } pub fn usize_literal(&mut self, value: u64) -> &'tcx ty::Const<'tcx> { - ty::Const::from_usize(self.tcx, value) + self.tcx.mk_const(ty::Const::from_usize(self.tcx, value)) } pub fn bool_ty(&mut self) -> Ty<'tcx> { @@ -124,11 +119,11 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { } pub fn true_literal(&mut self) -> &'tcx ty::Const<'tcx> { - ty::Const::from_bool(self.tcx, true) + self.tcx.mk_const(ty::Const::from_bool(self.tcx, true)) } pub fn false_literal(&mut self) -> &'tcx ty::Const<'tcx> { - ty::Const::from_bool(self.tcx, false) + self.tcx.mk_const(ty::Const::from_bool(self.tcx, false)) } pub fn const_eval_literal( @@ -137,7 +132,7 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { ty: Ty<'tcx>, sp: Span, neg: bool, - ) -> &'tcx ty::Const<'tcx> { + ) -> ty::Const<'tcx> { trace!("const_eval_literal: {:#?}, {:?}, {:?}, {:?}", lit, ty, sp, neg); match lit_to_const(lit, self.tcx, ty, neg) { @@ -157,7 +152,7 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { pub fn pattern_from_hir(&mut self, p: &hir::Pat) -> Pattern<'tcx> { let tcx = self.tcx.global_tcx(); - let p = match tcx.hir().get(p.id) { + let p = match tcx.hir().get_by_hir_id(p.hir_id) { Node::Pat(p) | Node::Binding(p) => p, node => bug!("pattern became {:?}", node) }; @@ -172,14 +167,14 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { method_name: &str, self_ty: Ty<'tcx>, params: &[Kind<'tcx>]) - -> (Ty<'tcx>, &'tcx ty::Const<'tcx>) { + -> (Ty<'tcx>, ty::Const<'tcx>) { let method_name = Symbol::intern(method_name); let substs = self.tcx.mk_substs_trait(self_ty, params); for item in self.tcx.associated_items(trait_def_id) { if item.kind == ty::AssociatedKind::Method && item.ident.name == method_name { let method_ty = self.tcx.type_of(item.def_id); let method_ty = method_ty.subst(self.tcx, substs); - return (method_ty, ty::Const::zero_sized(self.tcx, method_ty)); + return (method_ty, ty::Const::zero_sized(method_ty)); } } @@ -201,19 +196,6 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { ty.needs_drop(self.tcx.global_tcx(), param_env) } - fn lint_level_of(&self, node_id: ast::NodeId) -> LintLevel { - let hir_id = self.tcx.hir().definitions().node_to_hir_id(node_id); - let has_lint_level = self.tcx.dep_graph.with_ignore(|| { - self.tcx.lint_levels(LOCAL_CRATE).lint_level_set(hir_id).is_some() - }); - - if has_lint_level { - LintLevel::Explicit(node_id) - } else { - LintLevel::Inherited - } - } - pub fn tcx(&self) -> TyCtxt<'a, 'gcx, 'tcx> { self.tcx } @@ -226,8 +208,8 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> { self.check_overflow } - pub fn type_moves_by_default(&self, ty: Ty<'tcx>, span: Span) -> bool { - self.infcx.type_moves_by_default(self.param_env, ty, span) + pub fn type_is_copy_modulo_regions(&self, ty: Ty<'tcx>, span: Span) -> bool { + self.infcx.type_is_copy_modulo_regions(self.param_env, ty, span) } } @@ -241,31 +223,6 @@ impl UserAnnotatedTyHelpers<'gcx, 'tcx> for Cx<'_, 'gcx, 'tcx> { } } -fn lint_level_for_hir_id(tcx: TyCtxt, mut id: ast::NodeId) -> ast::NodeId { - // Right now we insert a `with_ignore` node in the dep graph here to - // ignore the fact that `lint_levels` below depends on the entire crate. - // For now this'll prevent false positives of recompiling too much when - // anything changes. - // - // Once red/green incremental compilation lands we should be able to - // remove this because while the crate changes often the lint level map - // will change rarely. - tcx.dep_graph.with_ignore(|| { - let sets = tcx.lint_levels(LOCAL_CRATE); - loop { - let hir_id = tcx.hir().definitions().node_to_hir_id(id); - if sets.lint_level_set(hir_id).is_some() { - return id - } - let next = tcx.hir().get_parent_node(id); - if next == id { - bug!("lint traversal reached the root of the crate"); - } - id = next; - } - }) -} - mod block; mod expr; mod to_ref; diff --git a/src/librustc_mir/hair/cx/to_ref.rs b/src/librustc_mir/hair/cx/to_ref.rs index 6930a959d6515..a462c61c2acba 100644 --- a/src/librustc_mir/hair/cx/to_ref.rs +++ b/src/librustc_mir/hair/cx/to_ref.rs @@ -1,14 +1,4 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use hair::*; +use crate::hair::*; use rustc::hir; use syntax::ptr::P; diff --git a/src/librustc_mir/hair/mod.rs b/src/librustc_mir/hair/mod.rs index b254fce4b7684..a661649db0fd4 100644 --- a/src/librustc_mir/hair/mod.rs +++ b/src/librustc_mir/hair/mod.rs @@ -1,27 +1,17 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The MIR is built from some high-level abstract IR //! (HAIR). This section defines the HAIR along with a trait for //! accessing it. The intention is to allow MIR construction to be //! unit-tested and separated from the Rust source and compiler data //! structures. -use rustc::mir::{BinOp, BorrowKind, UserTypeAnnotation, Field, UnOp}; +use rustc::mir::{BinOp, BorrowKind, Field, UnOp}; use rustc::hir::def_id::DefId; +use rustc::infer::canonical::Canonical; use rustc::middle::region; -use rustc::ty::subst::Substs; -use rustc::ty::{AdtDef, UpvarSubsts, Region, Ty, Const}; +use rustc::ty::subst::SubstsRef; +use rustc::ty::{AdtDef, UpvarSubsts, Ty, Const, UserType}; use rustc::ty::layout::VariantIdx; use rustc::hir; -use syntax::ast; use syntax_pos::Span; use self::cx::Cx; @@ -30,14 +20,14 @@ mod constant; pub mod pattern; pub use self::pattern::{BindingMode, Pattern, PatternKind, PatternRange, FieldPattern}; -pub(crate) use self::pattern::{PatternTypeProjection, PatternTypeProjections}; +pub(crate) use self::pattern::PatternTypeProjection; mod util; #[derive(Copy, Clone, Debug)] pub enum LintLevel { Inherited, - Explicit(ast::NodeId) + Explicit(hir::HirId) } impl LintLevel { @@ -63,7 +53,7 @@ pub struct Block<'tcx> { #[derive(Copy, Clone, Debug)] pub enum BlockSafety { Safe, - ExplicitUnsafe(ast::NodeId), + ExplicitUnsafe(hir::HirId), PushUnsafe, PopUnsafe } @@ -195,10 +185,14 @@ pub enum ExprKind<'tcx> { }, ClosureFnPointer { source: ExprRef<'tcx>, + unsafety: hir::Unsafety, }, UnsafeFnPointer { source: ExprRef<'tcx>, }, + MutToConstPointer { + source: ExprRef<'tcx>, + }, Unsize { source: ExprRef<'tcx>, }, @@ -212,7 +206,7 @@ pub enum ExprKind<'tcx> { body: ExprRef<'tcx>, }, Match { - discriminant: ExprRef<'tcx>, + scrutinee: ExprRef<'tcx>, arms: Vec>, }, Block { @@ -236,7 +230,7 @@ pub enum ExprKind<'tcx> { index: ExprRef<'tcx>, }, VarRef { - id: ast::NodeId, + id: hir::HirId, }, /// first argument, used for self in a closure SelfRef, @@ -244,7 +238,6 @@ pub enum ExprKind<'tcx> { id: DefId, }, Borrow { - region: Region<'tcx>, borrow_kind: BorrowKind, arg: ExprRef<'tcx>, }, @@ -271,11 +264,11 @@ pub enum ExprKind<'tcx> { Adt { adt_def: &'tcx AdtDef, variant_index: VariantIdx, - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, /// Optional user-given substs: for something like `let x = /// Bar:: { ... }`. - user_ty: Option>, + user_ty: Option>>, fields: Vec>, base: Option> @@ -283,12 +276,12 @@ pub enum ExprKind<'tcx> { PlaceTypeAscription { source: ExprRef<'tcx>, /// Type that the user gave to this expression - user_ty: Option>, + user_ty: Option>>, }, ValueTypeAscription { source: ExprRef<'tcx>, /// Type that the user gave to this expression - user_ty: Option>, + user_ty: Option>>, }, Closure { closure_id: DefId, @@ -298,7 +291,7 @@ pub enum ExprKind<'tcx> { }, Literal { literal: &'tcx Const<'tcx>, - user_ty: Option>, + user_ty: Option>>, }, InlineAsm { asm: &'tcx hir::InlineAsm, @@ -368,7 +361,7 @@ impl<'tcx> ExprRef<'tcx> { /// Mirroring is gradual: when you mirror an outer expression like `e1 /// + e2`, the references to the inner expressions `e1` and `e2` are /// `ExprRef<'tcx>` instances, and they may or may not be eagerly -/// mirrored. This allows a single AST node from the compiler to +/// mirrored. This allows a single AST node from the compiler to /// expand into one or more Hair nodes, which lets the Hair nodes be /// simpler. pub trait Mirror<'tcx> { diff --git a/src/librustc_mir/hair/pattern/_match.rs b/src/librustc_mir/hair/pattern/_match.rs index 7ec6bbfe3c11e..a9c521f59a96c 100644 --- a/src/librustc_mir/hair/pattern/_match.rs +++ b/src/librustc_mir/hair/pattern/_match.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /// This file includes the logic for exhaustiveness and usefulness checking for /// pattern-matching. Specifically, given a list of patterns for a type, we can /// tell whether: @@ -16,8 +6,8 @@ /// /// The algorithm implemented here is a modified version of the one described in: /// http://moscova.inria.fr/~maranget/papers/warn/index.html -/// However, to save future implementors from reading the original paper, I'm going -/// to summarise the algorithm here to hopefully save time and be a little clearer +/// However, to save future implementors from reading the original paper, we +/// summarise the algorithm here to hopefully save time and be a little clearer /// (without being so rigorous). /// /// The core of the algorithm revolves about a "usefulness" check. In particular, we @@ -182,7 +172,7 @@ use rustc::ty::{self, Ty, TyCtxt, TypeFoldable, Const}; use rustc::ty::layout::{Integer, IntegerExt, VariantIdx, Size}; use rustc::mir::Field; -use rustc::mir::interpret::{ConstValue, Pointer, Scalar}; +use rustc::mir::interpret::{ConstValue, Scalar, truncate}; use rustc::util::common::ErrorReported; use syntax::attr::{SignedInt, UnsignedInt}; @@ -224,20 +214,22 @@ impl<'a, 'tcx> LiteralExpander<'a, 'tcx> { match (val, &crty.sty, &rty.sty) { // the easy case, deref a reference (ConstValue::Scalar(Scalar::Ptr(p)), x, y) if x == y => ConstValue::ByRef( - p.alloc_id, + p, self.tcx.alloc_map.lock().unwrap_memory(p.alloc_id), - p.offset, ), // unsize array to slice if pattern is array but match value or other patterns are slice (ConstValue::Scalar(Scalar::Ptr(p)), ty::Array(t, n), ty::Slice(u)) => { assert_eq!(t, u); - ConstValue::ScalarPair( + ConstValue::Slice( Scalar::Ptr(p), - n.val.try_to_scalar().unwrap(), + n.val.try_to_scalar() + .unwrap() + .to_usize(&self.tcx) + .unwrap(), ) }, // fat pointers stay the same - (ConstValue::ScalarPair(..), _, _) => val, + (ConstValue::Slice(..), _, _) => val, // FIXME(oli-obk): this is reachable for `const FOO: &&&u32 = &&&42;` being used _ => bug!("cannot deref {:#?}, {} -> {}", val, crty, rty), } @@ -261,11 +253,10 @@ impl<'a, 'tcx> PatternFolder<'tcx> for LiteralExpander<'a, 'tcx> { subpattern: Pattern { ty: rty, span: pat.span, - kind: box PatternKind::Constant { value: Const::from_const_value( - self.tcx, - self.fold_const_value_deref(*val, rty, crty), - rty, - ) }, + kind: box PatternKind::Constant { value: Const { + val: self.fold_const_value_deref(val, rty, crty), + ty: rty, + } }, } } } @@ -315,7 +306,7 @@ impl<'p, 'tcx> Matrix<'p, 'tcx> { /// + _ + [_, _, ..tail] + /// ++++++++++++++++++++++++++ impl<'p, 'tcx> fmt::Debug for Matrix<'p, 'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "\n")?; let &Matrix(ref m) = self; @@ -359,7 +350,7 @@ pub struct MatchCheckCtxt<'a, 'tcx: 'a> { /// The module in which the match occurs. This is necessary for /// checking inhabited-ness of types because whether a type is (visibly) /// inhabited can depend on whether it was defined in the current module or - /// not. eg. `struct Foo { _private: ! }` cannot be seen to be empty + /// not. E.g., `struct Foo { _private: ! }` cannot be seen to be empty /// outside it's module and should not be matchable with an empty match /// statement. pub module: DefId, @@ -408,29 +399,17 @@ impl<'a, 'tcx> MatchCheckCtxt<'a, 'tcx> { _ => false, } } - - fn is_variant_uninhabited(&self, - variant: &'tcx ty::VariantDef, - substs: &'tcx ty::subst::Substs<'tcx>) - -> bool - { - if self.tcx.features().exhaustive_patterns { - self.tcx.is_enum_variant_uninhabited_from(self.module, variant, substs) - } else { - false - } - } } #[derive(Clone, Debug, PartialEq)] -pub enum Constructor<'tcx> { +enum Constructor<'tcx> { /// The constructor of all patterns that don't vary by constructor, /// e.g., struct patterns and fixed-length arrays. Single, /// Enum variants. Variant(DefId), /// Literal values. - ConstantValue(&'tcx ty::Const<'tcx>), + ConstantValue(ty::Const<'tcx>), /// Ranges of literal values (`2...5` and `2..5`). ConstantRange(u128, u128, Ty<'tcx>, RangeEnd), /// Array patterns of length n. @@ -438,13 +417,18 @@ pub enum Constructor<'tcx> { } impl<'tcx> Constructor<'tcx> { - fn variant_index_for_adt(&self, adt: &'tcx ty::AdtDef) -> VariantIdx { + fn variant_index_for_adt<'a>( + &self, + cx: &MatchCheckCtxt<'a, 'tcx>, + adt: &'tcx ty::AdtDef, + ) -> VariantIdx { match self { - &Variant(vid) => adt.variant_index_with_id(vid), + &Variant(id) => adt.variant_index_with_id(id), &Single => { assert!(!adt.is_enum()); VariantIdx::new(0) } + &ConstantValue(c) => crate::const_eval::const_variant_index(cx.tcx, cx.param_env, c), _ => bug!("bad constructor {:?} for adt {:?}", self, adt) } } @@ -537,7 +521,6 @@ impl<'tcx> Witness<'tcx> { self.apply_constructor(cx, ctor, ty) } - /// Constructs a partial witness for a pattern given a list of /// patterns expanded by the specialization step. /// @@ -578,7 +561,7 @@ impl<'tcx> Witness<'tcx> { PatternKind::Variant { adt_def: adt, substs, - variant_index: ctor.variant_index_for_adt(adt), + variant_index: ctor.variant_index_for_adt(cx, adt), subpatterns: pats } } else { @@ -631,8 +614,8 @@ impl<'tcx> Witness<'tcx> { /// but is instead bounded by the maximum fixed length of slice patterns in /// the column of patterns being analyzed. /// -/// We make sure to omit constructors that are statically impossible. eg for -/// Option we do not include Some(_) in the returned list of constructors. +/// We make sure to omit constructors that are statically impossible. E.g., for +/// `Option`, we do not include `Some(_)` in the returned list of constructors. fn all_constructors<'a, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, pcx: PatternContext<'tcx>) -> Vec> @@ -663,8 +646,11 @@ fn all_constructors<'a, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, } ty::Adt(def, substs) if def.is_enum() => { def.variants.iter() - .filter(|v| !cx.is_variant_uninhabited(v, substs)) - .map(|v| Variant(v.did)) + .filter(|v| { + !cx.tcx.features().exhaustive_patterns || + !v.uninhabited_from(cx.tcx, substs, def.adt_kind()).contains(cx.tcx, cx.module) + }) + .map(|v| Variant(v.def_id)) .collect() } ty::Char => { @@ -683,16 +669,14 @@ fn all_constructors<'a, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, ] } ty::Int(ity) => { - // FIXME(49937): refactor these bit manipulations into interpret. let bits = Integer::from_attr(&cx.tcx, SignedInt(ity)).size().bits() as u128; let min = 1u128 << (bits - 1); - let max = (1u128 << (bits - 1)) - 1; + let max = min - 1; vec![ConstantRange(min, max, pcx.ty, RangeEnd::Included)] } ty::Uint(uty) => { - // FIXME(49937): refactor these bit manipulations into interpret. - let bits = Integer::from_attr(&cx.tcx, UnsignedInt(uty)).size().bits() as u128; - let max = !0u128 >> (128 - bits); + let size = Integer::from_attr(&cx.tcx, UnsignedInt(uty)).size(); + let max = truncate(u128::max_value(), size); vec![ConstantRange(0, max, pcx.ty, RangeEnd::Included)] } _ => { @@ -788,9 +772,9 @@ fn max_slice_length<'p, 'a: 'p, 'tcx: 'a, I>( max_fixed_len, n.unwrap_usize(cx.tcx), ), - (ConstValue::ScalarPair(_, n), ty::Slice(_)) => max_fixed_len = cmp::max( + (ConstValue::Slice(_, n), ty::Slice(_)) => max_fixed_len = cmp::max( max_fixed_len, - n.to_usize(&cx.tcx).unwrap(), + n, ), _ => {}, } @@ -868,18 +852,24 @@ impl<'tcx> IntRange<'tcx> { } fn from_pat(tcx: TyCtxt<'_, 'tcx, 'tcx>, - pat: &Pattern<'tcx>) + mut pat: &Pattern<'tcx>) -> Option> { - Self::from_ctor(tcx, &match pat.kind { - box PatternKind::Constant { value } => ConstantValue(value), - box PatternKind::Range(PatternRange { lo, hi, ty, end }) => ConstantRange( - lo.to_bits(tcx, ty::ParamEnv::empty().and(ty)).unwrap(), - hi.to_bits(tcx, ty::ParamEnv::empty().and(ty)).unwrap(), - ty, - end, - ), - _ => return None, - }) + let range = loop { + match pat.kind { + box PatternKind::Constant { value } => break ConstantValue(value), + box PatternKind::Range(PatternRange { lo, hi, ty, end }) => break ConstantRange( + lo.to_bits(tcx, ty::ParamEnv::empty().and(ty)).unwrap(), + hi.to_bits(tcx, ty::ParamEnv::empty().and(ty)).unwrap(), + ty, + end, + ), + box PatternKind::AscribeUserType { ref subpattern, .. } => { + pat = subpattern; + }, + _ => return None, + } + }; + Self::from_ctor(tcx, &range) } // The return value of `signed_bias` should be XORed with an endpoint to encode/decode it. @@ -893,7 +883,7 @@ impl<'tcx> IntRange<'tcx> { } } - /// Convert a `RangeInclusive` to a `ConstantValue` or inclusive `ConstantRange`. + /// Converts a `RangeInclusive` to a `ConstantValue` or inclusive `ConstantRange`. fn range_to_ctor( tcx: TyCtxt<'_, 'tcx, 'tcx>, ty: Ty<'tcx>, @@ -909,7 +899,7 @@ impl<'tcx> IntRange<'tcx> { } } - /// Return a collection of ranges that spans the values covered by `ranges`, subtracted + /// Returns a collection of ranges that spans the values covered by `ranges`, subtracted /// by the values covered by `self`: i.e., `ranges \ self` (in set notation). fn subtract_from(self, tcx: TyCtxt<'_, 'tcx, 'tcx>, @@ -1030,13 +1020,13 @@ fn compute_missing_ctors<'a, 'tcx: 'a>( } } -/// Algorithm from http://moscova.inria.fr/~maranget/papers/warn/index.html +/// Algorithm from http://moscova.inria.fr/~maranget/papers/warn/index.html. /// The algorithm from the paper has been modified to correctly handle empty /// types. The changes are: /// (0) We don't exit early if the pattern matrix has zero rows. We just /// continue to recurse over columns. /// (1) all_constructors will only return constructors that are statically -/// possible. eg. it will only return Ok for Result +/// possible. E.g., it will only return `Ok` for `Result`. /// /// This finds whether a (row) vector `v` of patterns is 'useful' in relation /// to a set of such vectors `m` - this is defined as there being a set of @@ -1044,8 +1034,8 @@ fn compute_missing_ctors<'a, 'tcx: 'a>( /// /// All the patterns at each column of the `matrix ++ v` matrix must /// have the same type, except that wildcard (PatternKind::Wild) patterns -/// with type TyErr are also allowed, even if the "type of the column" -/// is not TyErr. That is used to represent private fields, as using their +/// with type `TyErr` are also allowed, even if the "type of the column" +/// is not `TyErr`. That is used to represent private fields, as using their /// real type would assert that they are inhabited. /// /// This is used both for reachability checking (if a pattern isn't useful in @@ -1112,7 +1102,7 @@ pub fn is_useful<'p, 'a: 'p, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, } else { debug!("is_useful - expanding wildcard"); - let used_ctors: Vec = rows.iter().flat_map(|row| { + let used_ctors: Vec> = rows.iter().flat_map(|row| { pat_constructors(cx, row[0], pcx).unwrap_or(vec![]) }).collect(); debug!("used_ctors = {:#?}", used_ctors); @@ -1296,10 +1286,10 @@ fn is_useful_specialized<'p, 'a: 'p, 'tcx: 'a>( /// Slice patterns, however, can match slices of different lengths. For instance, /// `[a, b, ..tail]` can match a slice of length 2, 3, 4 and so on. /// -/// Returns None in case of a catch-all, which can't be specialized. +/// Returns `None` in case of a catch-all, which can't be specialized. fn pat_constructors<'tcx>(cx: &mut MatchCheckCtxt<'_, 'tcx>, pat: &Pattern<'tcx>, - pcx: PatternContext) + pcx: PatternContext<'_>) -> Option>> { match *pat.kind { @@ -1308,7 +1298,7 @@ fn pat_constructors<'tcx>(cx: &mut MatchCheckCtxt<'_, 'tcx>, PatternKind::Binding { .. } | PatternKind::Wild => None, PatternKind::Leaf { .. } | PatternKind::Deref { .. } => Some(vec![Single]), PatternKind::Variant { adt_def, variant_index, .. } => { - Some(vec![Variant(adt_def.variants[variant_index].did)]) + Some(vec![Variant(adt_def.variants[variant_index].def_id)]) } PatternKind::Constant { value } => Some(vec![ConstantValue(value)]), PatternKind::Range(PatternRange { lo, hi, ty, end }) => @@ -1338,9 +1328,9 @@ fn pat_constructors<'tcx>(cx: &mut MatchCheckCtxt<'_, 'tcx>, /// This computes the arity of a constructor. The arity of a constructor /// is how many subpattern patterns of that constructor should be expanded to. /// -/// For instance, a tuple pattern (_, 42, Some([])) has the arity of 3. +/// For instance, a tuple pattern `(_, 42, Some([]))` has the arity of 3. /// A struct pattern's arity is the number of fields it contains, etc. -fn constructor_arity(_cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> u64 { +fn constructor_arity(cx: &MatchCheckCtxt<'a, 'tcx>, ctor: &Constructor<'tcx>, ty: Ty<'tcx>) -> u64 { debug!("constructor_arity({:#?}, {:?})", ctor, ty); match ty.sty { ty::Tuple(ref fs) => fs.len() as u64, @@ -1348,10 +1338,10 @@ fn constructor_arity(_cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> u64 { Slice(length) => length, ConstantValue(_) => 0, _ => bug!("bad slice pattern {:?} {:?}", ctor, ty) - }, + } ty::Ref(..) => 1, ty::Adt(adt, _) => { - adt.variants[ctor.variant_index_for_adt(adt)].fields.len() as u64 + adt.variants[ctor.variant_index_for_adt(cx, adt)].fields.len() as u64 } _ => 0 } @@ -1362,7 +1352,7 @@ fn constructor_arity(_cx: &MatchCheckCtxt, ctor: &Constructor, ty: Ty) -> u64 { /// /// For instance, a tuple pattern (43u32, 'a') has sub pattern types [u32, char]. fn constructor_sub_pattern_tys<'a, 'tcx: 'a>(cx: &MatchCheckCtxt<'a, 'tcx>, - ctor: &Constructor, + ctor: &Constructor<'tcx>, ty: Ty<'tcx>) -> Vec> { debug!("constructor_sub_pattern_tys({:#?}, {:?})", ctor, ty); @@ -1372,18 +1362,25 @@ fn constructor_sub_pattern_tys<'a, 'tcx: 'a>(cx: &MatchCheckCtxt<'a, 'tcx>, Slice(length) => (0..length).map(|_| ty).collect(), ConstantValue(_) => vec![], _ => bug!("bad slice pattern {:?} {:?}", ctor, ty) - }, + } ty::Ref(_, rty, _) => vec![rty], ty::Adt(adt, substs) => { if adt.is_box() { // Use T as the sub pattern type of Box. vec![substs.type_at(0)] } else { - adt.variants[ctor.variant_index_for_adt(adt)].fields.iter().map(|field| { + adt.variants[ctor.variant_index_for_adt(cx, adt)].fields.iter().map(|field| { let is_visible = adt.is_enum() || field.vis.is_accessible_from(cx.module, cx.tcx); if is_visible { - field.ty(cx.tcx, substs) + let ty = field.ty(cx.tcx, substs); + match ty.sty { + // If the field type returned is an array of an unknown + // size return an TyErr. + ty::Array(_, len) if len.assert_usize(cx.tcx).is_none() => + cx.tcx.types.err, + _ => ty, + } } else { // Treat all non-visible fields as TyErr. They // can't appear in any other pattern from @@ -1406,13 +1403,13 @@ fn constructor_sub_pattern_tys<'a, 'tcx: 'a>(cx: &MatchCheckCtxt<'a, 'tcx>, fn slice_pat_covered_by_const<'tcx>( tcx: TyCtxt<'_, 'tcx, '_>, _span: Span, - const_val: &ty::Const<'tcx>, + const_val: ty::Const<'tcx>, prefix: &[Pattern<'tcx>], slice: &Option>, suffix: &[Pattern<'tcx>] ) -> Result { let data: &[u8] = match (const_val.val, &const_val.ty.sty) { - (ConstValue::ByRef(id, alloc, offset), ty::Array(t, n)) => { + (ConstValue::ByRef(ptr, alloc), ty::Array(t, n)) => { if *t != tcx.types.u8 { // FIXME(oli-obk): can't mix const patterns with slice patterns and get // any sort of exhaustiveness/unreachable check yet @@ -1420,12 +1417,11 @@ fn slice_pat_covered_by_const<'tcx>( // are definitely unreachable. return Ok(false); } - let ptr = Pointer::new(id, offset); let n = n.assert_usize(tcx).unwrap(); alloc.get_bytes(&tcx, ptr, Size::from_bytes(n)).unwrap() }, // a slice fat pointer to a zero length slice - (ConstValue::ScalarPair(Scalar::Bits { .. }, n), ty::Slice(t)) => { + (ConstValue::Slice(Scalar::Bits { .. }, 0), ty::Slice(t)) => { if *t != tcx.types.u8 { // FIXME(oli-obk): can't mix const patterns with slice patterns and get // any sort of exhaustiveness/unreachable check yet @@ -1433,11 +1429,10 @@ fn slice_pat_covered_by_const<'tcx>( // are definitely unreachable. return Ok(false); } - assert_eq!(n.to_usize(&tcx).unwrap(), 0); &[] }, // - (ConstValue::ScalarPair(Scalar::Ptr(ptr), n), ty::Slice(t)) => { + (ConstValue::Slice(Scalar::Ptr(ptr), n), ty::Slice(t)) => { if *t != tcx.types.u8 { // FIXME(oli-obk): can't mix const patterns with slice patterns and get // any sort of exhaustiveness/unreachable check yet @@ -1445,7 +1440,6 @@ fn slice_pat_covered_by_const<'tcx>( // are definitely unreachable. return Ok(false); } - let n = n.to_usize(&tcx).unwrap(); tcx.alloc_map .lock() .unwrap_memory(ptr.alloc_id) @@ -1606,7 +1600,7 @@ fn split_grouped_constructors<'p, 'a: 'p, 'tcx: 'a>( split_ctors } -/// Check whether there exists any shared value in either `ctor` or `pat` by intersecting them. +/// Checks whether there exists any shared value in either `ctor` or `pat` by intersecting them. fn constructor_intersects_pattern<'p, 'a: 'p, 'tcx: 'a>( tcx: TyCtxt<'a, 'tcx, 'tcx>, ctor: &Constructor<'tcx>, @@ -1739,11 +1733,9 @@ fn specialize<'p, 'a: 'p, 'tcx: 'a>( PatternKind::Variant { adt_def, variant_index, ref subpatterns, .. } => { let ref variant = adt_def.variants[variant_index]; - if *constructor == Variant(variant.did) { - Some(patterns_for_variant(subpatterns, wild_patterns)) - } else { - None - } + Some(Variant(variant.def_id)) + .filter(|variant_constructor| variant_constructor == constructor) + .map(|_| patterns_for_variant(subpatterns, wild_patterns)) } PatternKind::Leaf { ref subpatterns } => { @@ -1761,23 +1753,37 @@ fn specialize<'p, 'a: 'p, 'tcx: 'a>( // necessarily point to memory, they are usually just integers. The only time // they should be pointing to memory is when they are subslices of nonzero // slices - let (opt_ptr, n, ty) = match (value.val, &value.ty.sty) { - (ConstValue::ByRef(id, alloc, offset), ty::TyKind::Array(t, n)) => ( - Some(( - Pointer::new(id, offset), - alloc, - )), - n.unwrap_usize(cx.tcx), - t, - ), - (ConstValue::ScalarPair(ptr, n), ty::TyKind::Slice(t)) => ( - ptr.to_ptr().ok().map(|ptr| ( - ptr, - cx.tcx.alloc_map.lock().unwrap_memory(ptr.alloc_id), - )), - n.to_bits(cx.tcx.data_layout.pointer_size).unwrap() as u64, - t, - ), + let (opt_ptr, n, ty) = match value.ty.sty { + ty::Array(t, n) => { + match value.val { + ConstValue::ByRef(ptr, alloc) => ( + Some((ptr, alloc)), + n.unwrap_usize(cx.tcx), + t, + ), + _ => span_bug!( + pat.span, + "array pattern is {:?}", value, + ), + } + }, + ty::Slice(t) => { + match value.val { + ConstValue::Slice(ptr, n) => ( + ptr.to_ptr().ok().map(|ptr| ( + ptr, + cx.tcx.alloc_map.lock().unwrap_memory(ptr.alloc_id), + )), + n, + t, + ), + _ => span_bug!( + pat.span, + "slice pattern constant must be scalar pair but is {:?}", + value, + ), + } + }, _ => span_bug!( pat.span, "unexpected const-val {:?} with ctor {:?}", @@ -1797,7 +1803,7 @@ fn specialize<'p, 'a: 'p, 'tcx: 'a>( &cx.tcx, ptr, layout.size, ).ok()?; let scalar = scalar.not_undef().ok()?; - let value = ty::Const::from_scalar(cx.tcx, scalar, ty); + let value = ty::Const::from_scalar(scalar, ty); let pattern = Pattern { ty, span: pat.span, diff --git a/src/librustc_mir/hair/pattern/check_match.rs b/src/librustc_mir/hair/pattern/check_match.rs index bfa2e53b9e0cf..7ded973701edc 100644 --- a/src/librustc_mir/hair/pattern/check_match.rs +++ b/src/librustc_mir/hair/pattern/check_match.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::_match::{MatchCheckCtxt, Matrix, expand_pattern, is_useful}; use super::_match::Usefulness::*; use super::_match::WitnessPreference::*; @@ -21,10 +11,9 @@ use rustc::middle::mem_categorization::cmt_; use rustc::middle::region; use rustc::session::Session; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::subst::Substs; +use rustc::ty::subst::{InternalSubsts, SubstsRef}; use rustc::lint; use rustc_errors::{Applicability, DiagnosticBuilder}; -use rustc::util::common::ErrorReported; use rustc::hir::def::*; use rustc::hir::def_id::DefId; @@ -34,48 +23,23 @@ use rustc::hir::{self, Pat, PatKind}; use smallvec::smallvec; use std::slice; -use syntax::ast; use syntax::ptr::P; use syntax_pos::{Span, DUMMY_SP, MultiSpan}; -struct OuterVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx> } - -impl<'a, 'tcx> Visitor<'tcx> for OuterVisitor<'a, 'tcx> { - fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'tcx> { - NestedVisitorMap::OnlyBodies(&self.tcx.hir()) - } - - fn visit_body(&mut self, body: &'tcx hir::Body) { - intravisit::walk_body(self, body); - let def_id = self.tcx.hir().body_owner_def_id(body.id()); - let _ = self.tcx.check_match(def_id); - } -} - -pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - tcx.hir().krate().visit_all_item_likes(&mut OuterVisitor { tcx }.as_deep_visitor()); - tcx.sess.abort_if_errors(); -} - -pub(crate) fn check_match<'a, 'tcx>( - tcx: TyCtxt<'a, 'tcx, 'tcx>, - def_id: DefId, -) -> Result<(), ErrorReported> { - let body_id = if let Some(id) = tcx.hir().as_local_node_id(def_id) { +pub(crate) fn check_match<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) { + let body_id = if let Some(id) = tcx.hir().as_local_hir_id(def_id) { tcx.hir().body_owned_by(id) } else { - return Ok(()); + return; }; - tcx.sess.track_errors(|| { - MatchVisitor { - tcx, - tables: tcx.body_tables(body_id), - region_scope_tree: &tcx.region_scope_tree(def_id), - param_env: tcx.param_env(def_id), - identity_substs: Substs::identity_for_item(tcx, def_id), - }.visit_body(tcx.hir().body(body_id)); - }) + MatchVisitor { + tcx, + tables: tcx.body_tables(body_id), + region_scope_tree: &tcx.region_scope_tree(def_id), + param_env: tcx.param_env(def_id), + identity_substs: InternalSubsts::identity_for_item(tcx, def_id), + }.visit_body(tcx.hir().body(body_id)); } fn create_e0004<'a>(sess: &'a Session, sp: Span, error_message: String) -> DiagnosticBuilder<'a> { @@ -86,7 +50,7 @@ struct MatchVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, tables: &'a ty::TypeckTables<'tcx>, param_env: ty::ParamEnv<'tcx>, - identity_substs: &'tcx Substs<'tcx>, + identity_substs: SubstsRef<'tcx>, region_scope_tree: &'a region::ScopeTree, } @@ -193,7 +157,7 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { } } - let module = self.tcx.hir().get_module_parent(scrut.id); + let module = self.tcx.hir().get_module_parent_by_hir_id(scrut.hir_id); MatchCheckCtxt::create_and_enter(self.tcx, self.param_env, module, |ref mut cx| { let mut have_errors = false; @@ -224,58 +188,79 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { // Then, if the match has no arms, check whether the scrutinee // is uninhabited. - let pat_ty = self.tables.node_id_to_type(scrut.hir_id); - let module = self.tcx.hir().get_module_parent(scrut.id); + let pat_ty = self.tables.node_type(scrut.hir_id); + let module = self.tcx.hir().get_module_parent_by_hir_id(scrut.hir_id); + let mut def_span = None; + let mut missing_variants = vec![]; if inlined_arms.is_empty() { let scrutinee_is_uninhabited = if self.tcx.features().exhaustive_patterns { self.tcx.is_ty_uninhabited_from(module, pat_ty) } else { - self.conservative_is_uninhabited(pat_ty) + match pat_ty.sty { + ty::Never => true, + ty::Adt(def, _) => { + def_span = self.tcx.hir().span_if_local(def.did); + if def.variants.len() < 4 && !def.variants.is_empty() { + // keep around to point at the definition of non-covered variants + missing_variants = def.variants.iter() + .map(|variant| variant.ident) + .collect(); + } + def.variants.is_empty() + }, + _ => false + } }; if !scrutinee_is_uninhabited { // We know the type is inhabited, so this must be wrong - let mut err = create_e0004(self.tcx.sess, scrut.span, - format!("non-exhaustive patterns: type `{}` \ - is non-empty", - pat_ty)); - span_help!(&mut err, scrut.span, - "ensure that all possible cases are being handled, \ - possibly by adding wildcards or more match arms"); + let mut err = create_e0004( + self.tcx.sess, + scrut.span, + format!("non-exhaustive patterns: {}", match missing_variants.len() { + 0 => format!("type `{}` is non-empty", pat_ty), + 1 => format!( + "pattern `{}` of type `{}` is not handled", + missing_variants[0].name, + pat_ty, + ), + _ => format!("multiple patterns of type `{}` are not handled", pat_ty), + }), + ); + err.help("ensure that all possible cases are being handled, \ + possibly by adding wildcards or more match arms"); + if let Some(sp) = def_span { + err.span_label(sp, format!("`{}` defined here", pat_ty)); + } + // point at the definition of non-covered enum variants + for variant in &missing_variants { + err.span_label(variant.span, "variant not covered"); + } err.emit(); } // If the type *is* uninhabited, it's vacuously exhaustive return; } - let matrix: Matrix = inlined_arms + let matrix: Matrix<'_, '_> = inlined_arms .iter() .filter(|&&(_, guard)| guard.is_none()) .flat_map(|arm| &arm.0) .map(|pat| smallvec![pat.0]) .collect(); - let scrut_ty = self.tables.node_id_to_type(scrut.hir_id); + let scrut_ty = self.tables.node_type(scrut.hir_id); check_exhaustive(cx, scrut_ty, scrut.span, &matrix); }) } - fn conservative_is_uninhabited(&self, scrutinee_ty: Ty<'tcx>) -> bool { - // "rustc-1.0-style" uncontentious uninhabitableness check - match scrutinee_ty.sty { - ty::Never => true, - ty::Adt(def, _) => def.variants.is_empty(), - _ => false - } - } - fn check_irrefutable(&self, pat: &'tcx Pat, origin: &str) { - let module = self.tcx.hir().get_module_parent(pat.id); + let module = self.tcx.hir().get_module_parent_by_hir_id(pat.hir_id); MatchCheckCtxt::create_and_enter(self.tcx, self.param_env, module, |ref mut cx| { let mut patcx = PatternContext::new(self.tcx, self.param_env.and(self.identity_substs), self.tables); let pattern = patcx.lower_pattern(pat); let pattern_ty = pattern.ty; - let pats: Matrix = vec![smallvec![ + let pats: Matrix<'_, '_> = vec![smallvec![ expand_pattern(cx, pattern) ]].into_iter().collect(); @@ -291,7 +276,7 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { }; let pattern_string = witness[0].single_pattern().to_string(); - let mut diag = struct_span_err!( + let mut err = struct_span_err!( self.tcx.sess, pat.span, E0005, "refutable pattern in {}: `{}` not covered", origin, pattern_string @@ -304,13 +289,18 @@ impl<'a, 'tcx> MatchVisitor<'a, 'tcx> { } _ => format!("pattern `{}` not covered", pattern_string), }; - diag.span_label(pat.span, label_msg); - diag.emit(); + err.span_label(pat.span, label_msg); + if let ty::Adt(def, _) = pattern_ty.sty { + if let Some(sp) = self.tcx.hir().span_if_local(def.did){ + err.span_label(sp, format!("`{}` defined here", pattern_ty)); + } + } + err.emit(); }); } } -fn check_for_bindings_named_same_as_variants(cx: &MatchVisitor, pat: &Pat) { +fn check_for_bindings_named_same_as_variants(cx: &MatchVisitor<'_, '_>, pat: &Pat) { pat.walk(|p| { if let PatKind::Binding(_, _, ident, None) = p.node { if let Some(&bm) = cx.tables.pat_binding_modes().get(p.hir_id) { @@ -321,14 +311,14 @@ fn check_for_bindings_named_same_as_variants(cx: &MatchVisitor, pat: &Pat) { let pat_ty = cx.tables.pat_ty(p); if let ty::Adt(edef, _) = pat_ty.sty { if edef.is_enum() && edef.variants.iter().any(|variant| { - variant.name == ident.name && variant.ctor_kind == CtorKind::Const + variant.ident == ident && variant.ctor_kind == CtorKind::Const }) { - let ty_path = cx.tcx.item_path_str(edef.did); + let ty_path = cx.tcx.def_path_str(edef.did); let mut err = struct_span_warn!(cx.tcx.sess, p.span, E0170, "pattern binding `{}` is named the same as one \ of the variants of the type `{}`", ident, ty_path); - err.span_suggestion_with_applicability( + err.span_suggestion( p.span, "to match on the variant, qualify the path", format!("{}::{}", ty_path, ident), @@ -359,13 +349,13 @@ fn pat_is_catchall(pat: &Pat) -> bool { } // Check for unreachable patterns -fn check_arms<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, - arms: &[(Vec<(&'a Pattern<'tcx>, &hir::Pat)>, Option<&hir::Expr>)], - source: hir::MatchSource) -{ +fn check_arms<'a, 'tcx>( + cx: &mut MatchCheckCtxt<'a, 'tcx>, + arms: &[(Vec<(&'a Pattern<'tcx>, &hir::Pat)>, Option<&hir::Expr>)], + source: hir::MatchSource, +) { let mut seen = Matrix::empty(); let mut catchall = None; - let mut printed_if_let_err = false; for (arm_index, &(ref pats, guard)) in arms.iter().enumerate() { for &(pat, hir_pat) in pats { let v = smallvec![pat]; @@ -374,27 +364,12 @@ fn check_arms<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, NotUseful => { match source { hir::MatchSource::IfLetDesugar { .. } => { - if cx.tcx.features().irrefutable_let_patterns { - cx.tcx.lint_node( - lint::builtin::IRREFUTABLE_LET_PATTERNS, - hir_pat.id, pat.span, - "irrefutable if-let pattern"); - } else { - if printed_if_let_err { - // we already printed an irrefutable if-let pattern error. - // We don't want two, that's just confusing. - } else { - // find the first arm pattern so we can use its span - let &(ref first_arm_pats, _) = &arms[0]; - let first_pat = &first_arm_pats[0]; - let span = first_pat.0.span; - struct_span_err!(cx.tcx.sess, span, E0162, - "irrefutable if-let pattern") - .span_label(span, "irrefutable pattern") - .emit(); - printed_if_let_err = true; - } - } + cx.tcx.lint_hir( + lint::builtin::IRREFUTABLE_LET_PATTERNS, + hir_pat.hir_id, + pat.span, + "irrefutable if-let pattern", + ); } hir::MatchSource::WhileLetDesugar => { @@ -402,38 +377,29 @@ fn check_arms<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, match arm_index { // The arm with the user-specified pattern. 0 => { - cx.tcx.lint_node( + cx.tcx.lint_hir( lint::builtin::UNREACHABLE_PATTERNS, - hir_pat.id, pat.span, + hir_pat.hir_id, pat.span, "unreachable pattern"); }, // The arm with the wildcard pattern. 1 => { - if cx.tcx.features().irrefutable_let_patterns { - cx.tcx.lint_node( - lint::builtin::IRREFUTABLE_LET_PATTERNS, - hir_pat.id, pat.span, - "irrefutable while-let pattern"); - } else { - // find the first arm pattern so we can use its span - let &(ref first_arm_pats, _) = &arms[0]; - let first_pat = &first_arm_pats[0]; - let span = first_pat.0.span; - struct_span_err!(cx.tcx.sess, span, E0165, - "irrefutable while-let pattern") - .span_label(span, "irrefutable pattern") - .emit(); - } + cx.tcx.lint_hir( + lint::builtin::IRREFUTABLE_LET_PATTERNS, + hir_pat.hir_id, + pat.span, + "irrefutable while-let pattern", + ); }, _ => bug!(), } - }, + } hir::MatchSource::ForLoopDesugar | hir::MatchSource::Normal => { - let mut err = cx.tcx.struct_span_lint_node( + let mut err = cx.tcx.struct_span_lint_hir( lint::builtin::UNREACHABLE_PATTERNS, - hir_pat.id, + hir_pat.hir_id, pat.span, "unreachable pattern", ); @@ -443,7 +409,7 @@ fn check_arms<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, err.span_label(catchall, "matches any value"); } err.emit(); - }, + } // Unreachable patterns in try expressions occur when one of the arms // are an uninhabited type. Which is OK. @@ -463,10 +429,12 @@ fn check_arms<'a, 'tcx>(cx: &mut MatchCheckCtxt<'a, 'tcx>, } } -fn check_exhaustive<'p, 'a: 'p, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, - scrut_ty: Ty<'tcx>, - sp: Span, - matrix: &Matrix<'p, 'tcx>) { +fn check_exhaustive<'p, 'a: 'p, 'tcx: 'a>( + cx: &mut MatchCheckCtxt<'a, 'tcx>, + scrut_ty: Ty<'tcx>, + sp: Span, + matrix: &Matrix<'p, 'tcx>, +) { let wild_pattern = Pattern { ty: scrut_ty, span: DUMMY_SP, @@ -488,7 +456,7 @@ fn check_exhaustive<'p, 'a: 'p, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, let (tail, head) = witnesses.split_last().unwrap(); let head: Vec<_> = head.iter().map(|w| w.to_string()).collect(); format!("`{}` and `{}`", head.join("`, `"), tail) - }, + } _ => { let (head, tail) = witnesses.split_at(LIMIT); let head: Vec<_> = head.iter().map(|w| w.to_string()).collect(); @@ -498,25 +466,79 @@ fn check_exhaustive<'p, 'a: 'p, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>, let label_text = match witnesses.len() { 1 => format!("pattern {} not covered", joined_patterns), - _ => format!("patterns {} not covered", joined_patterns) + _ => format!("patterns {} not covered", joined_patterns), }; - create_e0004(cx.tcx.sess, sp, - format!("non-exhaustive patterns: {} not covered", - joined_patterns)) - .span_label(sp, label_text) - .emit(); + let mut err = create_e0004(cx.tcx.sess, sp, format!( + "non-exhaustive patterns: {} not covered", + joined_patterns, + )); + err.span_label(sp, label_text); + // point at the definition of non-covered enum variants + if let ty::Adt(def, _) = scrut_ty.sty { + if let Some(sp) = cx.tcx.hir().span_if_local(def.did){ + err.span_label(sp, format!("`{}` defined here", scrut_ty)); + } + } + let patterns = witnesses.iter().map(|p| (**p).clone()).collect::>>(); + if patterns.len() < 4 { + for sp in maybe_point_at_variant(cx, scrut_ty, patterns.as_slice()) { + err.span_label(sp, "not covered"); + } + } + err.help("ensure that all possible cases are being handled, \ + possibly by adding wildcards or more match arms"); + err.emit(); } NotUseful => { // This is good, wildcard pattern isn't reachable - }, + } _ => bug!() } } +fn maybe_point_at_variant( + cx: &mut MatchCheckCtxt<'a, 'tcx>, + ty: Ty<'tcx>, + patterns: &[Pattern<'_>], +) -> Vec { + let mut covered = vec![]; + if let ty::Adt(def, _) = ty.sty { + // Don't point at variants that have already been covered due to other patterns to avoid + // visual clutter + for pattern in patterns { + let pk: &PatternKind<'_> = &pattern.kind; + if let PatternKind::Variant { adt_def, variant_index, subpatterns, .. } = pk { + if adt_def.did == def.did { + let sp = def.variants[*variant_index].ident.span; + if covered.contains(&sp) { + continue; + } + covered.push(sp); + let subpatterns = subpatterns.iter() + .map(|field_pattern| field_pattern.pattern.clone()) + .collect::>(); + covered.extend( + maybe_point_at_variant(cx, ty, subpatterns.as_slice()), + ); + } + } + if let PatternKind::Leaf { subpatterns } = pk { + let subpatterns = subpatterns.iter() + .map(|field_pattern| field_pattern.pattern.clone()) + .collect::>(); + covered.extend(maybe_point_at_variant(cx, ty, subpatterns.as_slice())); + } + } + } + covered +} + // Legality of move bindings checking -fn check_legality_of_move_bindings(cx: &MatchVisitor, - has_guard: bool, - pats: &[P]) { +fn check_legality_of_move_bindings( + cx: &MatchVisitor<'_, '_>, + has_guard: bool, + pats: &[P], +) { let mut by_ref_span = None; for pat in pats { pat.each_binding(|_, hir_id, span, _path| { @@ -559,8 +581,8 @@ fn check_legality_of_move_bindings(cx: &MatchVisitor, if let Some(&bm) = cx.tables.pat_binding_modes().get(p.hir_id) { match bm { ty::BindByValue(..) => { - let pat_ty = cx.tables.node_id_to_type(p.hir_id); - if pat_ty.moves_by_default(cx.tcx, cx.param_env, pat.span) { + let pat_ty = cx.tables.node_type(p.hir_id); + if !pat_ty.is_copy_modulo_regions(cx.tcx, cx.param_env, pat.span) { check_move(p, sub.as_ref().map(|p| &**p), span_vec); } } @@ -589,11 +611,10 @@ fn check_legality_of_move_bindings(cx: &MatchVisitor, } } -/// Ensures that a pattern guard doesn't borrow by mutable reference or -/// assign. -/// -/// FIXME: this should be done by borrowck. -fn check_for_mutation_in_guard(cx: &MatchVisitor, guard: &hir::Guard) { +/// Ensures that a pattern guard doesn't borrow by mutable reference or assign. +// +// FIXME: this should be done by borrowck. +fn check_for_mutation_in_guard(cx: &MatchVisitor<'_, '_>, guard: &hir::Guard) { let mut checker = MutationChecker { cx, }; @@ -613,13 +634,13 @@ struct MutationChecker<'a, 'tcx: 'a> { } impl<'a, 'tcx> Delegate<'tcx> for MutationChecker<'a, 'tcx> { - fn matched_pat(&mut self, _: &Pat, _: &cmt_, _: euv::MatchMode) {} - fn consume(&mut self, _: ast::NodeId, _: Span, _: &cmt_, _: ConsumeMode) {} - fn consume_pat(&mut self, _: &Pat, _: &cmt_, _: ConsumeMode) {} + fn matched_pat(&mut self, _: &Pat, _: &cmt_<'_>, _: euv::MatchMode) {} + fn consume(&mut self, _: hir::HirId, _: Span, _: &cmt_<'_>, _: ConsumeMode) {} + fn consume_pat(&mut self, _: &Pat, _: &cmt_<'_>, _: ConsumeMode) {} fn borrow(&mut self, - _: ast::NodeId, + _: hir::HirId, span: Span, - _: &cmt_, + _: &cmt_<'_>, _: ty::Region<'tcx>, kind:ty:: BorrowKind, _: LoanCause) { @@ -639,8 +660,8 @@ impl<'a, 'tcx> Delegate<'tcx> for MutationChecker<'a, 'tcx> { ty::ImmBorrow | ty::UniqueImmBorrow => {} } } - fn decl_without_init(&mut self, _: ast::NodeId, _: Span) {} - fn mutate(&mut self, _: ast::NodeId, span: Span, _: &cmt_, mode: MutateMode) { + fn decl_without_init(&mut self, _: hir::HirId, _: Span) {} + fn mutate(&mut self, _: hir::HirId, span: Span, _: &cmt_<'_>, mode: MutateMode) { match mode { MutateMode::JustWrite | MutateMode::WriteAndRead => { struct_span_err!(self.cx.tcx.sess, span, E0302, "cannot assign in a pattern guard") @@ -655,7 +676,7 @@ impl<'a, 'tcx> Delegate<'tcx> for MutationChecker<'a, 'tcx> { /// Forbids bindings in `@` patterns. This is necessary for memory safety, /// because of the way rvalues are handled in the borrow check. (See issue /// #14587.) -fn check_legality_of_bindings_in_at_patterns(cx: &MatchVisitor, pat: &Pat) { +fn check_legality_of_bindings_in_at_patterns(cx: &MatchVisitor<'_, '_>, pat: &Pat) { AtBindingPatternVisitor { cx: cx, bindings_allowed: true }.visit_pat(pat); } diff --git a/src/librustc_mir/hair/pattern/mod.rs b/src/librustc_mir/hair/pattern/mod.rs index f78a70f6a25f4..fc12443c0923a 100644 --- a/src/librustc_mir/hair/pattern/mod.rs +++ b/src/librustc_mir/hair/pattern/mod.rs @@ -1,34 +1,24 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Code to validate patterns/matches +//! Validation of patterns/matches. mod _match; mod check_match; -pub use self::check_match::check_crate; pub(crate) use self::check_match::check_match; -use const_eval::{const_field, const_variant_index}; +use crate::const_eval::{const_field, const_variant_index}; -use hair::util::UserAnnotatedTyHelpers; -use hair::constant::*; +use crate::hair::util::UserAnnotatedTyHelpers; +use crate::hair::constant::*; use rustc::mir::{fmt_const_val, Field, BorrowKind, Mutability}; -use rustc::mir::{ProjectionElem, UserTypeAnnotation, UserTypeProjection, UserTypeProjections}; +use rustc::mir::{UserTypeProjection}; use rustc::mir::interpret::{Scalar, GlobalId, ConstValue, sign_extend}; -use rustc::ty::{self, Region, TyCtxt, AdtDef, Ty, Lift}; -use rustc::ty::subst::{Substs, Kind}; +use rustc::ty::{self, Region, TyCtxt, AdtDef, Ty, UserType, DefIdTree}; +use rustc::ty::{CanonicalUserType, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations}; +use rustc::ty::subst::{SubstsRef, Kind}; use rustc::ty::layout::VariantIdx; use rustc::hir::{self, PatKind, RangeEnd}; -use rustc::hir::def::{Def, CtorKind}; +use rustc::hir::def::{CtorOf, Def, CtorKind}; use rustc::hir::pat_util::EnumerateAndAdjustIterator; use rustc_data_structures::indexed_vec::Idx; @@ -48,9 +38,9 @@ pub enum PatternError { } #[derive(Copy, Clone, Debug)] -pub enum BindingMode<'tcx> { +pub enum BindingMode { ByValue, - ByRef(Region<'tcx>, BorrowKind), + ByRef(BorrowKind), } #[derive(Clone, Debug)] @@ -67,114 +57,58 @@ pub struct Pattern<'tcx> { } -#[derive(Clone, Debug)] -pub(crate) struct PatternTypeProjections<'tcx> { - contents: Vec<(PatternTypeProjection<'tcx>, Span)>, -} - -impl<'tcx> PatternTypeProjections<'tcx> { - pub(crate) fn user_ty(self) -> UserTypeProjections<'tcx> { - UserTypeProjections::from_projections( - self.contents.into_iter().map(|(pat_ty_proj, span)| (pat_ty_proj.user_ty(), span))) - } - - pub(crate) fn none() -> Self { - PatternTypeProjections { contents: vec![] } - } - - pub(crate) fn ref_binding(&self) -> Self { - // FIXME(#47184): ignore for now - PatternTypeProjections { contents: vec![] } - } - - fn map_projs(&self, - mut f: impl FnMut(&PatternTypeProjection<'tcx>) -> PatternTypeProjection<'tcx>) - -> Self - { - PatternTypeProjections { - contents: self.contents - .iter() - .map(|(proj, span)| (f(proj), *span)) - .collect(), } - } - - pub(crate) fn index(&self) -> Self { self.map_projs(|pat_ty_proj| pat_ty_proj.index()) } - - pub(crate) fn subslice(&self, from: u32, to: u32) -> Self { - self.map_projs(|pat_ty_proj| pat_ty_proj.subslice(from, to)) - } - - pub(crate) fn deref(&self) -> Self { self.map_projs(|pat_ty_proj| pat_ty_proj.deref()) } - - pub(crate) fn leaf(&self, field: Field) -> Self { - self.map_projs(|pat_ty_proj| pat_ty_proj.leaf(field)) - } - - pub(crate) fn variant(&self, - adt_def: &'tcx AdtDef, - variant_index: VariantIdx, - field: Field) -> Self { - self.map_projs(|pat_ty_proj| pat_ty_proj.variant(adt_def, variant_index, field)) - } - - pub(crate) fn add_user_type(&self, user_ty: &PatternTypeProjection<'tcx>, sp: Span) -> Self { - let mut new = self.clone(); - new.contents.push((user_ty.clone(), sp)); - new - } +#[derive(Copy, Clone, Debug, PartialEq)] +pub struct PatternTypeProjection<'tcx> { + pub user_ty: CanonicalUserType<'tcx>, } -#[derive(Clone, Debug)] -pub struct PatternTypeProjection<'tcx>(UserTypeProjection<'tcx>); - impl<'tcx> PatternTypeProjection<'tcx> { - pub(crate) fn index(&self) -> Self { - let mut new = self.clone(); - new.0.projs.push(ProjectionElem::Index(())); - new - } - - pub(crate) fn subslice(&self, from: u32, to: u32) -> Self { - let mut new = self.clone(); - new.0.projs.push(ProjectionElem::Subslice { from, to }); - new - } - - pub(crate) fn deref(&self) -> Self { - let mut new = self.clone(); - new.0.projs.push(ProjectionElem::Deref); - new - } - - pub(crate) fn leaf(&self, field: Field) -> Self { - let mut new = self.clone(); - new.0.projs.push(ProjectionElem::Field(field, ())); - new - } - - pub(crate) fn variant(&self, - adt_def: &'tcx AdtDef, - variant_index: VariantIdx, - field: Field) -> Self { - let mut new = self.clone(); - new.0.projs.push(ProjectionElem::Downcast(adt_def, variant_index)); - new.0.projs.push(ProjectionElem::Field(field, ())); - new - } - - pub(crate) fn from_canonical_ty(c_ty: ty::CanonicalTy<'tcx>) -> Self { - Self::from_user_type(UserTypeAnnotation::Ty(c_ty)) - } - - pub(crate) fn from_user_type(u_ty: UserTypeAnnotation<'tcx>) -> Self { - Self::from_user_type_proj(UserTypeProjection { base: u_ty, projs: vec![], }) + pub(crate) fn from_user_type(user_annotation: CanonicalUserType<'tcx>) -> Self { + Self { + user_ty: user_annotation, + } } - pub(crate) fn from_user_type_proj(u_ty: UserTypeProjection<'tcx>) -> Self { - PatternTypeProjection(u_ty) + pub(crate) fn user_ty( + self, + annotations: &mut CanonicalUserTypeAnnotations<'tcx>, + inferred_ty: Ty<'tcx>, + span: Span, + ) -> UserTypeProjection { + UserTypeProjection { + base: annotations.push(CanonicalUserTypeAnnotation { + span, + user_ty: self.user_ty, + inferred_ty, + }), + projs: Vec::new(), + } } +} - pub(crate) fn user_ty(self) -> UserTypeProjection<'tcx> { self.0 } +#[derive(Copy, Clone, Debug, PartialEq)] +pub struct Ascription<'tcx> { + pub user_ty: PatternTypeProjection<'tcx>, + /// Variance to use when relating the type `user_ty` to the **type of the value being + /// matched**. Typically, this is `Variance::Covariant`, since the value being matched must + /// have a type that is some subtype of the ascribed type. + /// + /// Note that this variance does not apply for any bindings within subpatterns. The type + /// assigned to those bindings must be exactly equal to the `user_ty` given here. + /// + /// The only place where this field is not `Covariant` is when matching constants, where + /// we currently use `Contravariant` -- this is because the constant type just needs to + /// be "comparable" to the type of the input value. So, for example: + /// + /// ```text + /// match x { "foo" => .. } + /// ``` + /// + /// requires that `&'static str <: T_x`, where `T_x` is the type of `x`. Really, we should + /// probably be checking for a `PartialEq` impl instead, but this preserves the behavior + /// of the old type-check for now. See #57280 for details. + pub variance: ty::Variance, + pub user_ty_span: Span, } #[derive(Clone, Debug)] @@ -182,46 +116,47 @@ pub enum PatternKind<'tcx> { Wild, AscribeUserType { - user_ty: PatternTypeProjection<'tcx>, + ascription: Ascription<'tcx>, subpattern: Pattern<'tcx>, - user_ty_span: Span, }, - /// x, ref x, x @ P, etc + /// `x`, `ref x`, `x @ P`, etc. Binding { mutability: Mutability, name: ast::Name, - mode: BindingMode<'tcx>, - var: ast::NodeId, + mode: BindingMode, + var: hir::HirId, ty: Ty<'tcx>, subpattern: Option>, }, - /// Foo(...) or Foo{...} or Foo, where `Foo` is a variant name from an adt with >1 variants + /// `Foo(...)` or `Foo{...}` or `Foo`, where `Foo` is a variant name from an ADT with + /// multiple variants. Variant { adt_def: &'tcx AdtDef, - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, variant_index: VariantIdx, subpatterns: Vec>, }, - /// (...), Foo(...), Foo{...}, or Foo, where `Foo` is a variant name from an adt with 1 variant + /// `(...)`, `Foo(...)`, `Foo{...}`, or `Foo`, where `Foo` is a variant name from an ADT with + /// a single variant. Leaf { subpatterns: Vec>, }, - /// box P, &P, &mut P, etc + /// `box P`, `&P`, `&mut P`, etc. Deref { subpattern: Pattern<'tcx>, }, Constant { - value: &'tcx ty::Const<'tcx>, + value: ty::Const<'tcx>, }, Range(PatternRange<'tcx>), - /// matches against a slice, checking the length and extracting elements. + /// Matches against a slice, checking the length and extracting elements. /// irrefutable when there is a slice pattern and both `prefix` and `suffix` are empty. /// e.g., `&[ref xs..]`. Slice { @@ -230,7 +165,7 @@ pub enum PatternKind<'tcx> { suffix: Vec>, }, - /// fixed match against an array, irrefutable + /// Fixed match against an array; irrefutable. Array { prefix: Vec>, slice: Option>, @@ -238,16 +173,16 @@ pub enum PatternKind<'tcx> { }, } -#[derive(Clone, Copy, Debug, PartialEq)] +#[derive(Copy, Clone, Debug, PartialEq)] pub struct PatternRange<'tcx> { - pub lo: &'tcx ty::Const<'tcx>, - pub hi: &'tcx ty::Const<'tcx>, + pub lo: ty::Const<'tcx>, + pub hi: ty::Const<'tcx>, pub ty: Ty<'tcx>, pub end: RangeEnd, } impl<'tcx> fmt::Display for Pattern<'tcx> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self.kind { PatternKind::Wild => write!(f, "_"), PatternKind::AscribeUserType { ref subpattern, .. } => @@ -255,7 +190,7 @@ impl<'tcx> fmt::Display for Pattern<'tcx> { PatternKind::Binding { mutability, name, mode, ref subpattern, .. } => { let is_mut = match mode { BindingMode::ByValue => mutability == Mutability::Mut, - BindingMode::ByRef(_, bk) => { + BindingMode::ByRef(bk) => { write!(f, "ref ")?; match bk { BorrowKind::Mut { .. } => true, _ => false } } @@ -290,7 +225,7 @@ impl<'tcx> fmt::Display for Pattern<'tcx> { let mut start_or_continue = || if first { first = false; "" } else { ", " }; if let Some(variant) = variant { - write!(f, "{}", variant.name)?; + write!(f, "{}", variant.ident)?; // Only for Adt we can have `S {...}`, // which we handle separately here. @@ -394,13 +329,13 @@ pub struct PatternContext<'a, 'tcx: 'a> { pub tcx: TyCtxt<'a, 'tcx, 'tcx>, pub param_env: ty::ParamEnv<'tcx>, pub tables: &'a ty::TypeckTables<'tcx>, - pub substs: &'tcx Substs<'tcx>, + pub substs: SubstsRef<'tcx>, pub errors: Vec, } impl<'a, 'tcx> Pattern<'tcx> { pub fn from_hir(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env_and_substs: ty::ParamEnvAnd<'tcx, &'tcx Substs<'tcx>>, + param_env_and_substs: ty::ParamEnvAnd<'tcx, SubstsRef<'tcx>>, tables: &'a ty::TypeckTables<'tcx>, pat: &'tcx hir::Pat) -> Self { let mut pcx = PatternContext::new(tcx, param_env_and_substs, tables); @@ -416,7 +351,7 @@ impl<'a, 'tcx> Pattern<'tcx> { impl<'a, 'tcx> PatternContext<'a, 'tcx> { pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env_and_substs: ty::ParamEnvAnd<'tcx, &'tcx Substs<'tcx>>, + param_env_and_substs: ty::ParamEnvAnd<'tcx, SubstsRef<'tcx>>, tables: &'a ty::TypeckTables<'tcx>) -> Self { PatternContext { tcx, @@ -465,8 +400,21 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { ) } + fn lower_range_expr( + &mut self, + expr: &'tcx hir::Expr, + ) -> (PatternKind<'tcx>, Option>) { + match self.lower_lit(expr) { + PatternKind::AscribeUserType { + ascription: lo_ascription, + subpattern: Pattern { kind: box kind, .. }, + } => (kind, Some(lo_ascription)), + kind => (kind, None), + } + } + fn lower_pattern_unadjusted(&mut self, pat: &'tcx hir::Pat) -> Pattern<'tcx> { - let mut ty = self.tables.node_id_to_type(pat.hir_id); + let mut ty = self.tables.node_type(pat.hir_id); let kind = match pat.node { PatKind::Wild => PatternKind::Wild, @@ -474,10 +422,11 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { PatKind::Lit(ref value) => self.lower_lit(value), PatKind::Range(ref lo_expr, ref hi_expr, end) => { - match (self.lower_lit(lo_expr), self.lower_lit(hi_expr)) { - (PatternKind::Constant { value: lo }, - PatternKind::Constant { value: hi }) => { - use std::cmp::Ordering; + let (lo, lo_ascription) = self.lower_range_expr(lo_expr); + let (hi, hi_ascription) = self.lower_range_expr(hi_expr); + + let mut kind = match (lo, hi) { + (PatternKind::Constant { value: lo }, PatternKind::Constant { value: hi }) => { let cmp = compare_const_vals( self.tcx, lo, @@ -524,9 +473,33 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { PatternKind::Wild } } + }, + ref pats => { + self.tcx.sess.delay_span_bug( + pat.span, + &format!( + "found bad range pattern `{:?}` outside of error recovery", + pats, + ), + ); + + PatternKind::Wild + }, + }; + + // If we are handling a range with associated constants (e.g. + // `Foo::<'a>::A..=Foo::B`), we need to put the ascriptions for the associated + // constants somewhere. Have them on the range pattern. + for ascription in &[lo_ascription, hi_ascription] { + if let Some(ascription) = ascription { + kind = PatternKind::AscribeUserType { + ascription: *ascription, + subpattern: Pattern { span: pat.span, ty, kind: Box::new(kind), }, + }; } - _ => PatternKind::Wild } + + kind } PatKind::Path(ref qpath) => { @@ -555,11 +528,11 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { ty::Error => { // Avoid ICE return Pattern { span: pat.span, ty, kind: Box::new(PatternKind::Wild) }; } - ref sty => + _ => span_bug!( pat.span, "unexpanded type for vector pattern: {:?}", - sty), + ty), } } @@ -580,18 +553,15 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { ty::Error => { // Avoid ICE (#50577) return Pattern { span: pat.span, ty, kind: Box::new(PatternKind::Wild) }; } - ref sty => span_bug!(pat.span, "unexpected type for tuple pattern: {:?}", sty), + _ => span_bug!(pat.span, "unexpected type for tuple pattern: {:?}", ty), } } PatKind::Binding(_, id, ident, ref sub) => { - let var_ty = self.tables.node_id_to_type(pat.hir_id); - let region = match var_ty.sty { - ty::Ref(r, _, _) => Some(r), - ty::Error => { // Avoid ICE - return Pattern { span: pat.span, ty, kind: Box::new(PatternKind::Wild) }; - } - _ => None, + let var_ty = self.tables.node_type(pat.hir_id); + if let ty::Error = var_ty.sty { + // Avoid ICE + return Pattern { span: pat.span, ty, kind: Box::new(PatternKind::Wild) }; }; let bm = *self.tables.pat_binding_modes().get(pat.hir_id) .expect("missing binding mode"); @@ -602,10 +572,10 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { (Mutability::Not, BindingMode::ByValue), ty::BindByReference(hir::MutMutable) => (Mutability::Not, BindingMode::ByRef( - region.unwrap(), BorrowKind::Mut { allow_two_phase_borrow: false })), + BorrowKind::Mut { allow_two_phase_borrow: false })), ty::BindByReference(hir::MutImmutable) => (Mutability::Not, BindingMode::ByRef( - region.unwrap(), BorrowKind::Shared)), + BorrowKind::Shared)), }; // A ref x pattern is the same node used for x, and as such it has @@ -637,7 +607,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { } _ => span_bug!(pat.span, "tuple struct pattern not applied to an ADT {:?}", - ty.sty), + ty), }; let variant_def = adt_def.variant_of_def(def); @@ -659,7 +629,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { fields.iter() .map(|field| { FieldPattern { - field: Field::new(self.tcx.field_index(field.node.id, + field: Field::new(self.tcx.field_index(field.node.hir_id, self.tables)), pattern: self.lower_pattern(&field.node.pat), } @@ -762,9 +732,17 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { ty: Ty<'tcx>, subpatterns: Vec>, ) -> PatternKind<'tcx> { + let def = match def { + Def::Ctor(variant_ctor_id, CtorOf::Variant, ..) => { + let variant_id = self.tcx.parent(variant_ctor_id).unwrap(); + Def::Variant(variant_id) + }, + def => def, + }; + let mut kind = match def { - Def::Variant(variant_id) | Def::VariantCtor(variant_id, ..) => { - let enum_id = self.tcx.parent_def_id(variant_id).unwrap(); + Def::Variant(variant_id) => { + let enum_id = self.tcx.parent(variant_id).unwrap(); let adt_def = self.tcx.adt_def(enum_id); if adt_def.is_enum() { let substs = match ty.sty { @@ -773,7 +751,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { ty::Error => { // Avoid ICE (#50585) return PatternKind::Wild; } - _ => bug!("inappropriate type for def: {:?}", ty.sty), + _ => bug!("inappropriate type for def: {:?}", ty), }; PatternKind::Variant { adt_def, @@ -786,7 +764,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { } } - Def::Struct(..) | Def::StructCtor(..) | Def::Union(..) | + Def::Struct(..) | Def::Ctor(_, CtorOf::Struct, ..) | Def::Union(..) | Def::TyAlias(..) | Def::AssociatedTy(..) | Def::SelfTy(..) | Def::SelfCtor(..) => { PatternKind::Leaf { subpatterns } } @@ -798,19 +776,18 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { }; if let Some(user_ty) = self.user_substs_applied_to_ty_of_hir_id(hir_id) { - let subpattern = Pattern { - span, - ty, - kind: Box::new(kind), - }; - - debug!("pattern user_ty = {:?} for pattern at {:?}", user_ty, span); - - let pat_ty = PatternTypeProjection::from_user_type(user_ty); + debug!("lower_variant_or_leaf: kind={:?} user_ty={:?} span={:?}", kind, user_ty, span); kind = PatternKind::AscribeUserType { - subpattern, - user_ty: pat_ty, - user_ty_span: span, + subpattern: Pattern { + span, + ty, + kind: Box::new(kind), + }, + ascription: Ascription { + user_ty: PatternTypeProjection::from_user_type(user_ty), + user_ty_span: span, + variance: ty::Variance::Covariant, + }, }; } @@ -819,13 +796,13 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { /// Takes a HIR Path. If the path is a constant, evaluates it and feeds /// it to `const_to_pat`. Any other path (like enum variants without fields) - /// is converted to the corresponding pattern via `lower_variant_or_leaf` + /// is converted to the corresponding pattern via `lower_variant_or_leaf`. fn lower_path(&mut self, qpath: &hir::QPath, id: hir::HirId, span: Span) -> Pattern<'tcx> { - let ty = self.tables.node_id_to_type(id); + let ty = self.tables.node_type(id); let def = self.tables.qpath_def(qpath, id); let is_associated_const = match def { Def::AssociatedConst(_) => true, @@ -847,7 +824,33 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { }; match self.tcx.at(span).const_eval(self.param_env.and(cid)) { Ok(value) => { - return self.const_to_pat(instance, value, id, span) + let pattern = self.const_to_pat(instance, value, id, span); + if !is_associated_const { + return pattern; + } + + let user_provided_types = self.tables().user_provided_types(); + return if let Some(u_ty) = user_provided_types.get(id) { + let user_ty = PatternTypeProjection::from_user_type(*u_ty); + Pattern { + span, + kind: Box::new( + PatternKind::AscribeUserType { + subpattern: pattern, + ascription: Ascription { + /// Note that use `Contravariant` here. See the + /// `variance` field documentation for details. + variance: ty::Variance::Contravariant, + user_ty, + user_ty_span: span, + }, + } + ), + ty: value.ty, + } + } else { + pattern + } }, Err(_) => { self.tcx.sess.span_err( @@ -879,8 +882,8 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { } /// Converts literals, paths and negation of literals to patterns. - /// The special case for negation exists to allow things like -128i8 - /// which would overflow if we tried to evaluate 128i8 and then negate + /// The special case for negation exists to allow things like `-128_i8` + /// which would overflow if we tried to evaluate `128_i8` and then negate /// afterwards. fn lower_lit(&mut self, expr: &'tcx hir::Expr) -> PatternKind<'tcx> { match expr.node { @@ -929,21 +932,18 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { /// Converts an evaluated constant to a pattern (if possible). /// This means aggregate values (like structs and enums) are converted - /// to a pattern that matches the value (as if you'd compare via eq). + /// to a pattern that matches the value (as if you'd compared via equality). fn const_to_pat( &self, instance: ty::Instance<'tcx>, - cv: &'tcx ty::Const<'tcx>, + cv: ty::Const<'tcx>, id: hir::HirId, span: Span, ) -> Pattern<'tcx> { - debug!("const_to_pat: cv={:#?}", cv); + debug!("const_to_pat: cv={:#?} id={:?}", cv, id); let adt_subpattern = |i, variant_opt| { let field = Field::new(i); - let val = const_field( - self.tcx, self.param_env, instance, - variant_opt, field, cv, - ).expect("field access failed"); + let val = const_field(self.tcx, self.param_env, variant_opt, field, cv); self.const_to_pat(instance, val, id, span) }; let adt_subpatterns = |n, variant_opt| { @@ -955,10 +955,10 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { } }).collect::>() }; + debug!("const_to_pat: cv.ty={:?} span={:?}", cv.ty, span); let kind = match cv.ty.sty { ty::Float(_) => { - let id = self.tcx.hir().hir_to_node_id(id); - self.tcx.lint_node( + self.tcx.lint_hir( ::rustc::lint::builtin::ILLEGAL_FLOATING_POINT_LITERAL_PATTERN, id, span, @@ -967,7 +967,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { PatternKind::Constant { value: cv, } - }, + } ty::Adt(adt_def, _) if adt_def.is_union() => { // Matching on union fields is unsafe, we can't hide it in constants self.tcx.sess.span_err(span, "cannot use unions in constant patterns"); @@ -976,15 +976,13 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { ty::Adt(adt_def, _) if !self.tcx.has_attr(adt_def.did, "structural_match") => { let msg = format!("to use a constant of type `{}` in a pattern, \ `{}` must be annotated with `#[derive(PartialEq, Eq)]`", - self.tcx.item_path_str(adt_def.did), - self.tcx.item_path_str(adt_def.did)); + self.tcx.def_path_str(adt_def.did), + self.tcx.def_path_str(adt_def.did)); self.tcx.sess.span_err(span, &msg); PatternKind::Wild - }, + } ty::Adt(adt_def, substs) if adt_def.is_enum() => { - let variant_index = const_variant_index( - self.tcx, self.param_env, instance, cv - ).expect("const_variant_index failed"); + let variant_index = const_variant_index(self.tcx, self.param_env, cv); let subpatterns = adt_subpatterns( adt_def.variants[variant_index].fields.len(), Some(variant_index), @@ -995,7 +993,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { variant_index, subpatterns, } - }, + } ty::Adt(adt_def, _) => { let struct_var = adt_def.non_enum_variant(); PatternKind::Leaf { @@ -1020,7 +1018,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> { PatternKind::Constant { value: cv, } - }, + } }; Pattern { @@ -1093,10 +1091,10 @@ macro_rules! CloneImpls { } CloneImpls!{ <'tcx> - Span, Field, Mutability, ast::Name, ast::NodeId, usize, &'tcx ty::Const<'tcx>, - Region<'tcx>, Ty<'tcx>, BindingMode<'tcx>, &'tcx AdtDef, - &'tcx Substs<'tcx>, &'tcx Kind<'tcx>, UserTypeAnnotation<'tcx>, - UserTypeProjection<'tcx>, PatternTypeProjection<'tcx> + Span, Field, Mutability, ast::Name, hir::HirId, usize, ty::Const<'tcx>, + Region<'tcx>, Ty<'tcx>, BindingMode, &'tcx AdtDef, + SubstsRef<'tcx>, &'tcx Kind<'tcx>, UserType<'tcx>, + UserTypeProjection, PatternTypeProjection<'tcx> } impl<'tcx> PatternFoldable<'tcx> for FieldPattern<'tcx> { @@ -1132,12 +1130,18 @@ impl<'tcx> PatternFoldable<'tcx> for PatternKind<'tcx> { PatternKind::Wild => PatternKind::Wild, PatternKind::AscribeUserType { ref subpattern, - ref user_ty, - user_ty_span, + ascription: Ascription { + variance, + ref user_ty, + user_ty_span, + }, } => PatternKind::AscribeUserType { subpattern: subpattern.fold_with(folder), - user_ty: user_ty.fold_with(folder), - user_ty_span, + ascription: Ascription { + user_ty: user_ty.fold_with(folder), + variance, + user_ty_span, + }, }, PatternKind::Binding { mutability, @@ -1215,8 +1219,8 @@ impl<'tcx> PatternFoldable<'tcx> for PatternKind<'tcx> { pub fn compare_const_vals<'a, 'gcx, 'tcx>( tcx: TyCtxt<'a, 'gcx, 'tcx>, - a: &'tcx ty::Const<'tcx>, - b: &'tcx ty::Const<'tcx>, + a: ty::Const<'tcx>, + b: ty::Const<'tcx>, ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, ) -> Option { trace!("compare_const_vals: {:?}, {:?}", a, b); @@ -1236,9 +1240,6 @@ pub fn compare_const_vals<'a, 'gcx, 'tcx>( return fallback(); } - let tcx = tcx.global_tcx(); - let (a, b, ty) = (a, b, ty).lift_to_tcx(tcx).unwrap(); - // FIXME: This should use assert_bits(ty) instead of use_bits // but triggers possibly bugs due to mismatching of arrays and slices if let (Some(a), Some(b)) = (a.to_bits(tcx, ty), b.to_bits(tcx, ty)) { @@ -1248,19 +1249,20 @@ pub fn compare_const_vals<'a, 'gcx, 'tcx>( let l = ::rustc_apfloat::ieee::Single::from_bits(a); let r = ::rustc_apfloat::ieee::Single::from_bits(b); l.partial_cmp(&r) - }, + } ty::Float(ast::FloatTy::F64) => { let l = ::rustc_apfloat::ieee::Double::from_bits(a); let r = ::rustc_apfloat::ieee::Double::from_bits(b); l.partial_cmp(&r) - }, - ty::Int(_) => { - let layout = tcx.layout_of(ty).ok()?; - assert!(layout.abi.is_signed()); - let a = sign_extend(a, layout.size); - let b = sign_extend(b, layout.size); + } + ty::Int(ity) => { + use rustc::ty::layout::{Integer, IntegerExt}; + use syntax::attr::SignedInt; + let size = Integer::from_attr(&tcx, SignedInt(ity)).size(); + let a = sign_extend(a, size); + let b = sign_extend(b, size); Some((a as i128).cmp(&(b as i128))) - }, + } _ => Some(a.cmp(&b)), } } @@ -1268,25 +1270,21 @@ pub fn compare_const_vals<'a, 'gcx, 'tcx>( if let ty::Str = ty.value.sty { match (a.val, b.val) { ( - ConstValue::ScalarPair( + ConstValue::Slice( Scalar::Ptr(ptr_a), len_a, ), - ConstValue::ScalarPair( + ConstValue::Slice( Scalar::Ptr(ptr_b), len_b, ), ) if ptr_a.offset.bytes() == 0 && ptr_b.offset.bytes() == 0 => { - if let Ok(len_a) = len_a.to_bits(tcx.data_layout.pointer_size) { - if let Ok(len_b) = len_b.to_bits(tcx.data_layout.pointer_size) { - if len_a == len_b { - let map = tcx.alloc_map.lock(); - let alloc_a = map.unwrap_memory(ptr_a.alloc_id); - let alloc_b = map.unwrap_memory(ptr_b.alloc_id); - if alloc_a.bytes.len() as u128 == len_a { - return from_bool(alloc_a == alloc_b); - } - } + if len_a == len_b { + let map = tcx.alloc_map.lock(); + let alloc_a = map.unwrap_memory(ptr_a.alloc_id); + let alloc_b = map.unwrap_memory(ptr_b.alloc_id); + if alloc_a.bytes.len() as u64 == len_a { + return from_bool(alloc_a == alloc_b); } } } diff --git a/src/librustc_mir/hair/util.rs b/src/librustc_mir/hair/util.rs index f81a0fa5dfadb..c9dae6990795b 100644 --- a/src/librustc_mir/hair/util.rs +++ b/src/librustc_mir/hair/util.rs @@ -1,46 +1,34 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir; -use rustc::mir::UserTypeAnnotation; -use rustc::ty::{self, AdtDef, TyCtxt}; +use rustc::ty::{self, CanonicalUserType, TyCtxt, UserType}; crate trait UserAnnotatedTyHelpers<'gcx: 'tcx, 'tcx> { fn tcx(&self) -> TyCtxt<'_, 'gcx, 'tcx>; fn tables(&self) -> &ty::TypeckTables<'tcx>; - fn user_substs_applied_to_adt( - &self, - hir_id: hir::HirId, - adt_def: &'tcx AdtDef, - ) -> Option> { - let user_substs = self.tables().user_substs(hir_id)?; - Some(UserTypeAnnotation::TypeOf(adt_def.did, user_substs)) - } - /// Looks up the type associated with this hir-id and applies the /// user-given substitutions; the hir-id must map to a suitable /// type. fn user_substs_applied_to_ty_of_hir_id( &self, hir_id: hir::HirId, - ) -> Option> { - let user_substs = self.tables().user_substs(hir_id)?; - match &self.tables().node_id_to_type(hir_id).sty { - ty::Adt(adt_def, _) => Some(UserTypeAnnotation::TypeOf(adt_def.did, user_substs)), - ty::FnDef(def_id, _) => Some(UserTypeAnnotation::TypeOf(*def_id, user_substs)), - sty => bug!( - "sty: {:?} should not have user-substs {:?} recorded ", - sty, - user_substs + ) -> Option> { + let user_provided_types = self.tables().user_provided_types(); + let mut user_ty = *user_provided_types.get(hir_id)?; + debug!("user_subts_applied_to_ty_of_hir_id: user_ty={:?}", user_ty); + let ty = self.tables().node_type(hir_id); + match ty.sty { + ty::Adt(adt_def, ..) => { + if let UserType::TypeOf(ref mut did, _) = &mut user_ty.value { + *did = adt_def.did; + } + Some(user_ty) + } + ty::FnDef(..) => Some(user_ty), + _ => bug!( + "ty: {:?} should not have user provided type {:?} recorded ", + ty, + user_ty ), } } diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index 7d636b77ced4c..5056d79bec4b1 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -1,27 +1,17 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::ty::{self, Ty, TypeAndMut}; use rustc::ty::layout::{self, TyLayout, Size}; use syntax::ast::{FloatTy, IntTy, UintTy}; use rustc_apfloat::ieee::{Single, Double}; use rustc::mir::interpret::{ - Scalar, EvalResult, Pointer, PointerArithmetic, EvalErrorKind, truncate + Scalar, EvalResult, Pointer, PointerArithmetic, InterpError, truncate }; use rustc::mir::CastKind; use rustc_apfloat::Float; -use super::{EvalContext, Machine, PlaceTy, OpTy, Immediate}; +use super::{InterpretCx, Machine, PlaceTy, OpTy, ImmTy, Immediate}; -impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> { fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool { match ty.sty { ty::RawPtr(ty::TypeAndMut { ty, .. }) | @@ -43,23 +33,17 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> self.unsize_into(src, dest)?; } - Misc => { - let src_layout = src.layout; + Misc | MutToConstPointer => { let src = self.read_immediate(src)?; - // There are no casts to references - assert!(!dest.layout.ty.is_region_ptr()); - // Hence we make all casts erase the tag - let src = src.erase_tag().with_default_tag(); - - if self.type_is_fat_ptr(src_layout.ty) { - match (src, self.type_is_fat_ptr(dest.layout.ty)) { + if self.type_is_fat_ptr(src.layout.ty) { + match (*src, self.type_is_fat_ptr(dest.layout.ty)) { // pointers to extern types (Immediate::Scalar(_),_) | // slices and trait objects to other slices/trait objects (Immediate::ScalarPair(..), true) => { // No change to immediate - self.write_immediate(src, dest)?; + self.write_immediate(*src, dest)?; } // slices and trait objects to thin pointers (dropping the metadata) (Immediate::ScalarPair(data, _), false) => { @@ -67,11 +51,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } } } else { - match src_layout.variants { + match src.layout.variants { layout::Variants::Single { index } => { - if let Some(def) = src_layout.ty.ty_adt_def() { + if let Some(def) = src.layout.ty.ty_adt_def() { // Cast from a univariant enum - assert!(src_layout.is_zst()); + assert!(src.layout.is_zst()); let discr_val = def .discriminant_for_variant(*self.tcx, index) .val; @@ -80,11 +64,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> dest); } } - layout::Variants::Tagged { .. } | - layout::Variants::NicheFilling { .. } => {}, + layout::Variants::Multiple { .. } => {}, } - let dest_val = self.cast_scalar(src.to_scalar()?, src_layout, dest.layout)?; + let dest_val = self.cast_scalar(src.to_scalar()?, src.layout, dest.layout)?; self.write_scalar(dest_val, dest)?; } } @@ -102,11 +85,11 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> self.param_env, def_id, substs, - ).ok_or_else(|| EvalErrorKind::TooGeneric.into()); + ).ok_or_else(|| InterpError::TooGeneric.into()); let fn_ptr = self.memory.create_fn_alloc(instance?).with_default_tag(); self.write_scalar(Scalar::Ptr(fn_ptr.into()), dest)?; } - ref other => bug!("reify fn pointer on {:?}", other), + _ => bug!("reify fn pointer on {:?}", src.layout.ty), } } @@ -117,19 +100,15 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> // No change to value self.write_immediate(*src, dest)?; } - ref other => bug!("fn to unsafe fn cast on {:?}", other), + _ => bug!("fn to unsafe fn cast on {:?}", dest.layout.ty), } } - ClosureFnPointer => { + ClosureFnPointer(_) => { // The src operand does not matter, just its type match src.layout.ty.sty { ty::Closure(def_id, substs) => { - let substs = self.tcx.subst_and_normalize_erasing_regions( - self.substs(), - ty::ParamEnv::reveal_all(), - &substs, - ); + let substs = self.subst_and_normalize_erasing_regions(substs)?; let instance = ty::Instance::resolve_closure( *self.tcx, def_id, @@ -140,7 +119,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> let val = Immediate::Scalar(Scalar::Ptr(fn_ptr.into()).into()); self.write_immediate(val, dest)?; } - ref other => bug!("closure fn pointer on {:?}", other), + _ => bug!("closure fn pointer on {:?}", src.layout.ty), } } } @@ -392,7 +371,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> assert_eq!(src.layout.fields.offset(i).bytes(), 0); assert_eq!(src_field_layout.size, src.layout.size); // just sawp out the layout - OpTy { op: src.op, layout: src_field_layout } + OpTy::from(ImmTy { imm: src.to_immediate(), layout: src_field_layout }) } }; if src_field.layout.ty == dst_field.layout.ty { diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index d36d530fe78b2..32f7ecd97b2ef 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -1,13 +1,4 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - +use std::cell::Cell; use std::fmt::Write; use std::mem; @@ -18,14 +9,14 @@ use rustc::mir; use rustc::ty::layout::{ self, Size, Align, HasDataLayout, LayoutOf, TyLayout }; -use rustc::ty::subst::{Subst, Substs}; +use rustc::ty::subst::{Subst, SubstsRef}; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::ty::query::TyCtxtAt; use rustc_data_structures::indexed_vec::IndexVec; use rustc::mir::interpret::{ ErrorHandled, GlobalId, Scalar, FrameInfo, AllocId, - EvalResult, EvalErrorKind, + EvalResult, InterpError, truncate, sign_extend, }; use rustc_data_structures::fx::FxHashMap; @@ -35,7 +26,7 @@ use super::{ Memory, Machine }; -pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> { +pub struct InterpretCx<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> { /// Stores the `Machine` instance. pub machine: M, @@ -52,7 +43,7 @@ pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> { pub(crate) stack: Vec>, /// A cache for deduplicating vtables - pub(super) vtables: FxHashMap<(Ty<'tcx>, ty::PolyExistentialTraitRef<'tcx>), AllocId>, + pub(super) vtables: FxHashMap<(Ty<'tcx>, Option>), AllocId>, } /// A stack frame. @@ -64,7 +55,7 @@ pub struct Frame<'mir, 'tcx: 'mir, Tag=(), Extra=()> { /// The MIR for the function called on this frame. pub mir: &'mir mir::Mir<'tcx>, - /// The def_id and substs of the current function + /// The def_id and substs of the current function. pub instance: ty::Instance<'tcx>, /// The span of the call site. @@ -73,7 +64,7 @@ pub struct Frame<'mir, 'tcx: 'mir, Tag=(), Extra=()> { //////////////////////////////////////////////////////////////////////////////// // Return place and locals //////////////////////////////////////////////////////////////////////////////// - /// Work to perform when returning from this function + /// Work to perform when returning from this function. pub return_to_block: StackPopCleanup, /// The location where the result of the current stack frame should be written to, @@ -85,7 +76,7 @@ pub struct Frame<'mir, 'tcx: 'mir, Tag=(), Extra=()> { /// The locals are stored as `Option`s. /// `None` represents a local that is currently dead, while a live local /// can either directly contain `Scalar` or refer to some part of an `Allocation`. - pub locals: IndexVec>, + pub locals: IndexVec>, //////////////////////////////////////////////////////////////////////////////// // Current position within the function @@ -97,7 +88,7 @@ pub struct Frame<'mir, 'tcx: 'mir, Tag=(), Extra=()> { /// The index of the currently evaluated statement. pub stmt: usize, - /// Extra data for the machine + /// Extra data for the machine. pub extra: Extra, } @@ -108,41 +99,66 @@ pub enum StackPopCleanup { /// we can validate it at that layout. Goto(Option), /// Just do nohing: Used by Main and for the box_alloc hook in miri. - /// `cleanup` says whether locals are deallocated. Static computation + /// `cleanup` says whether locals are deallocated. Static computation /// wants them leaked to intern what they need (and just throw away /// the entire `ecx` when it is done). None { cleanup: bool }, } -// State of a local variable -#[derive(Copy, Clone, PartialEq, Eq, Hash)] +/// State of a local variable including a memoized layout +#[derive(Clone, PartialEq, Eq)] +pub struct LocalState<'tcx, Tag=(), Id=AllocId> { + pub value: LocalValue, + /// Don't modify if `Some`, this is only used to prevent computing the layout twice + pub layout: Cell>>, +} + +/// Current value of a local variable +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] pub enum LocalValue { + /// This local is not currently alive, and cannot be used at all. Dead, - // Mostly for convenience, we re-use the `Operand` type here. - // This is an optimization over just always having a pointer here; - // we can thus avoid doing an allocation when the local just stores - // immediate values *and* never has its address taken. + /// This local is alive but not yet initialized. It can be written to + /// but not read from or its address taken. Locals get initialized on + /// first write because for unsized locals, we do not know their size + /// before that. + Uninitialized, + /// A normal, live local. + /// Mostly for convenience, we re-use the `Operand` type here. + /// This is an optimization over just always having a pointer here; + /// we can thus avoid doing an allocation when the local just stores + /// immediate values *and* never has its address taken. Live(Operand), } -impl<'tcx, Tag> LocalValue { - pub fn access(&self) -> EvalResult<'tcx, &Operand> { - match self { +impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> { + pub fn access(&self) -> EvalResult<'tcx, Operand> { + match self.value { LocalValue::Dead => err!(DeadLocal), - LocalValue::Live(ref val) => Ok(val), + LocalValue::Uninitialized => + bug!("The type checker should prevent reading from a never-written local"), + LocalValue::Live(val) => Ok(val), } } - pub fn access_mut(&mut self) -> EvalResult<'tcx, &mut Operand> { - match self { + /// Overwrite the local. If the local can be overwritten in place, return a reference + /// to do so; otherwise return the `MemPlace` to consult instead. + pub fn access_mut( + &mut self, + ) -> EvalResult<'tcx, Result<&mut LocalValue, MemPlace>> { + match self.value { LocalValue::Dead => err!(DeadLocal), - LocalValue::Live(ref mut val) => Ok(val), + LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)), + ref mut local @ LocalValue::Live(Operand::Immediate(_)) | + ref mut local @ LocalValue::Uninitialized => { + Ok(Ok(local)) + } } } } impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout - for EvalContext<'a, 'mir, 'tcx, M> + for InterpretCx<'a, 'mir, 'tcx, M> { #[inline] fn data_layout(&self) -> &layout::TargetDataLayout { @@ -150,7 +166,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> HasDataLayout } } -impl<'a, 'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for EvalContext<'a, 'mir, 'tcx, M> +impl<'a, 'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpretCx<'a, 'mir, 'tcx, M> where M: Machine<'a, 'mir, 'tcx> { #[inline] @@ -160,7 +176,7 @@ impl<'a, 'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for EvalContext<'a, 'mir, 'tcx, } impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> LayoutOf - for EvalContext<'a, 'mir, 'tcx, M> + for InterpretCx<'a, 'mir, 'tcx, M> { type Ty = Ty<'tcx>; type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>; @@ -168,17 +184,17 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> LayoutOf #[inline] fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout { self.tcx.layout_of(self.param_env.and(ty)) - .map_err(|layout| EvalErrorKind::Layout(layout).into()) + .map_err(|layout| InterpError::Layout(layout).into()) } } -impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { +impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> { pub fn new( tcx: TyCtxtAt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, machine: M, ) -> Self { - EvalContext { + InterpretCx { machine, tcx, param_env, @@ -224,33 +240,39 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc self.frame().mir } - pub fn substs(&self) -> &'tcx Substs<'tcx> { - if let Some(frame) = self.stack.last() { - frame.instance.substs - } else { - Substs::empty() + pub(super) fn subst_and_normalize_erasing_regions>( + &self, + substs: T, + ) -> EvalResult<'tcx, T> { + match self.stack.last() { + Some(frame) => Ok(self.tcx.subst_and_normalize_erasing_regions( + frame.instance.substs, + self.param_env, + &substs, + )), + None => if substs.needs_subst() { + err!(TooGeneric).into() + } else { + Ok(substs) + }, } } pub(super) fn resolve( &self, def_id: DefId, - substs: &'tcx Substs<'tcx> + substs: SubstsRef<'tcx> ) -> EvalResult<'tcx, ty::Instance<'tcx>> { trace!("resolve: {:?}, {:#?}", def_id, substs); - trace!("substs: {:#?}", self.substs()); trace!("param_env: {:#?}", self.param_env); - let substs = self.tcx.subst_and_normalize_erasing_regions( - self.substs(), - self.param_env, - &substs, - ); + let substs = self.subst_and_normalize_erasing_regions(substs)?; + trace!("substs: {:#?}", substs); ty::Instance::resolve( *self.tcx, self.param_env, def_id, substs, - ).ok_or_else(|| EvalErrorKind::TooGeneric.into()) + ).ok_or_else(|| InterpError::TooGeneric.into()) } pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { @@ -275,19 +297,33 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc } trace!("load mir {:?}", instance); match instance { - ty::InstanceDef::Item(def_id) => { - self.tcx.maybe_optimized_mir(def_id).ok_or_else(|| - EvalErrorKind::NoMirFor(self.tcx.item_path_str(def_id)).into() - ) - } + ty::InstanceDef::Item(def_id) => if self.tcx.is_mir_available(did) { + Ok(self.tcx.optimized_mir(did)) + } else { + err!(NoMirFor(self.tcx.def_path_str(def_id))) + }, _ => Ok(self.tcx.instance_mir(instance)), } } - pub fn monomorphize + Subst<'tcx>>( + pub(super) fn monomorphize + Subst<'tcx>>( &self, t: T, - substs: &'tcx Substs<'tcx> + ) -> EvalResult<'tcx, T> { + match self.stack.last() { + Some(frame) => Ok(self.monomorphize_with_substs(t, frame.instance.substs)), + None => if t.needs_subst() { + err!(TooGeneric).into() + } else { + Ok(t) + }, + } + } + + fn monomorphize_with_substs + Subst<'tcx>>( + &self, + t: T, + substs: SubstsRef<'tcx> ) -> T { // miri doesn't care about lifetimes, and will choke on some crazy ones // let's simply get rid of them @@ -298,11 +334,22 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc pub fn layout_of_local( &self, frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>, - local: mir::Local + local: mir::Local, + layout: Option>, ) -> EvalResult<'tcx, TyLayout<'tcx>> { - let local_ty = frame.mir.local_decls[local].ty; - let local_ty = self.monomorphize(local_ty, frame.instance.substs); - self.layout_of(local_ty) + match frame.locals[local].layout.get() { + None => { + let layout = crate::interpret::operand::from_known_layout(layout, || { + let local_ty = frame.mir.local_decls[local].ty; + let local_ty = self.monomorphize_with_substs(local_ty, frame.instance.substs); + self.layout_of(local_ty) + })?; + // Layouts of locals are requested a lot, so we cache them. + frame.locals[local].layout.set(Some(layout)); + Ok(layout) + } + Some(layout) => Ok(layout), + } } pub fn str_to_immediate(&mut self, s: &str) -> EvalResult<'tcx, Immediate> { @@ -310,7 +357,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc Ok(Immediate::new_slice(Scalar::Ptr(ptr), s.len() as u64, self)) } - /// Return the actual dynamic size and alignment of the place at the given type. + /// Returns the actual dynamic size and alignment of the place at the given type. /// Only the "meta" (metadata) part of the place matters. /// This can fail to provide an answer for extern types. pub(super) fn size_and_align_of( @@ -421,8 +468,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc return_place: Option>, return_to_block: StackPopCleanup, ) -> EvalResult<'tcx> { - if self.stack.len() > 1 { // FIXME should be "> 0", printing topmost frame crashes rustc... - debug!("PAUSING({}) {}", self.cur_frame(), self.frame().instance); + if self.stack.len() > 0 { + info!("PAUSING({}) {}", self.cur_frame(), self.frame().instance); } ::log_settings::settings().indentation += 1; @@ -444,15 +491,15 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc // don't allocate at all for trivial constants if mir.local_decls.len() > 1 { - // We put some marker immediate into the locals that we later want to initialize. - // This can be anything except for LocalValue::Dead -- because *that* is the - // value we use for things that we know are initially dead. - let dummy = - LocalValue::Live(Operand::Immediate(Immediate::Scalar(ScalarMaybeUndef::Undef))); + // Locals are initially uninitialized. + let dummy = LocalState { + value: LocalValue::Uninitialized, + layout: Cell::new(None), + }; let mut locals = IndexVec::from_elem(dummy, &mir.local_decls); // Return place is handled specially by the `eval_place` functions, and the // entry in `locals` should never be used. Make it dead, to be sure. - locals[mir::RETURN_PLACE] = LocalValue::Dead; + locals[mir::RETURN_PLACE].value = LocalValue::Dead; // Now mark those locals as dead that we do not want to initialize match self.tcx.describe_def(instance.def_id()) { // statics and constants don't have `Storage*` statements, no need to look for them @@ -465,7 +512,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc match stmt.kind { StorageLive(local) | StorageDead(local) => { - locals[local] = LocalValue::Dead; + locals[local].value = LocalValue::Dead; } _ => {} } @@ -473,26 +520,11 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc } }, } - // Finally, properly initialize all those that still have the dummy value - for (local, decl) in locals.iter_mut().zip(mir.local_decls.iter()) { - match *local { - LocalValue::Live(_) => { - // This needs to be peoperly initialized. - let layout = self.layout_of(self.monomorphize(decl.ty, instance.substs))?; - *local = LocalValue::Live(self.uninit_operand(layout)?); - } - LocalValue::Dead => { - // Nothing to do - } - } - } // done self.frame_mut().locals = locals; } - if self.stack.len() > 1 { // FIXME no check should be needed, but some instances ICE - debug!("ENTERING({}) {}", self.cur_frame(), self.frame().instance); - } + info!("ENTERING({}) {}", self.cur_frame(), self.frame().instance); if self.stack.len() > self.tcx.sess.const_eval_stack_frame_limit { err!(StackFrameLimitReached) @@ -502,9 +534,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc } pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> { - if self.stack.len() > 1 { // FIXME no check should be needed, but some instances ICE - debug!("LEAVING({}) {}", self.cur_frame(), self.frame().instance); - } + info!("LEAVING({}) {}", self.cur_frame(), self.frame().instance); ::log_settings::settings().indentation -= 1; let frame = self.stack.pop().expect( "tried to pop a stack frame, but there were none", @@ -524,7 +554,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc } // Deallocate all locals that are backed by an allocation. for local in frame.locals { - self.deallocate_local(local)?; + self.deallocate_local(local.value)?; } // Validate the return value. Do this after deallocating so that we catch dangling // references. @@ -556,8 +586,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc StackPopCleanup::None { .. } => {} } - if self.stack.len() > 1 { // FIXME should be "> 0", printing topmost frame crashes rustc... - debug!("CONTINUING({}) {}", self.cur_frame(), self.frame().instance); + if self.stack.len() > 0 { + info!("CONTINUING({}) {}", self.cur_frame(), self.frame().instance); } Ok(()) @@ -572,10 +602,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc assert!(local != mir::RETURN_PLACE, "Cannot make return place live"); trace!("{:?} is now live", local); - let layout = self.layout_of_local(self.frame(), local)?; - let init = LocalValue::Live(self.uninit_operand(layout)?); + let local_val = LocalValue::Uninitialized; // StorageLive *always* kills the value that's currently stored - Ok(mem::replace(&mut self.frame_mut().locals[local], init)) + Ok(mem::replace(&mut self.frame_mut().locals[local].value, local_val)) } /// Returns the old value of the local. @@ -584,7 +613,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc assert!(local != mir::RETURN_PLACE, "Cannot make return place dead"); trace!("{:?} is now dead", local); - mem::replace(&mut self.frame_mut().locals[local], LocalValue::Dead) + mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead) } pub(super) fn deallocate_local( @@ -616,8 +645,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc // `Memory::get_static_alloc` which has to use `const_eval_raw` to avoid cycles. let val = self.tcx.const_eval_raw(param_env.and(gid)).map_err(|err| { match err { - ErrorHandled::Reported => EvalErrorKind::ReferencedConstant, - ErrorHandled::TooGeneric => EvalErrorKind::TooGeneric, + ErrorHandled::Reported => InterpError::ReferencedConstant, + ErrorHandled::TooGeneric => InterpError::TooGeneric, } })?; self.raw_const_to_mplace(val) @@ -637,31 +666,31 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc } write!(msg, ":").unwrap(); - match self.stack[frame].locals[local].access() { - Err(err) => { - if let EvalErrorKind::DeadLocal = err.kind { - write!(msg, " is dead").unwrap(); - } else { - panic!("Failed to access local: {:?}", err); - } - } - Ok(Operand::Indirect(mplace)) => { - let (ptr, align) = mplace.to_scalar_ptr_align(); - match ptr { + match self.stack[frame].locals[local].value { + LocalValue::Dead => write!(msg, " is dead").unwrap(), + LocalValue::Uninitialized => write!(msg, " is uninitialized").unwrap(), + LocalValue::Live(Operand::Indirect(mplace)) => { + match mplace.ptr { Scalar::Ptr(ptr) => { - write!(msg, " by align({}) ref:", align.bytes()).unwrap(); + write!(msg, " by align({}){} ref:", + mplace.align.bytes(), + match mplace.meta { + Some(meta) => format!(" meta({:?})", meta), + None => String::new() + } + ).unwrap(); allocs.push(ptr.alloc_id); } ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(), } } - Ok(Operand::Immediate(Immediate::Scalar(val))) => { + LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => { write!(msg, " {:?}", val).unwrap(); if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val { allocs.push(ptr.alloc_id); } } - Ok(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => { + LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => { write!(msg, " ({:?}, {:?})", val1, val2).unwrap(); if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 { allocs.push(ptr.alloc_id); diff --git a/src/librustc_mir/interpret/intrinsics.rs b/src/librustc_mir/interpret/intrinsics.rs index cbe2e25b4fcd1..d9721a8cadff9 100644 --- a/src/librustc_mir/interpret/intrinsics.rs +++ b/src/librustc_mir/interpret/intrinsics.rs @@ -1,27 +1,17 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Intrinsics and other functions that the miri engine executes without -//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE +//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE //! and miri. use syntax::symbol::Symbol; use rustc::ty; -use rustc::ty::layout::{LayoutOf, Primitive}; +use rustc::ty::layout::{LayoutOf, Primitive, Size}; use rustc::mir::BinOp; use rustc::mir::interpret::{ - EvalResult, EvalErrorKind, Scalar, + EvalResult, InterpError, Scalar, }; use super::{ - Machine, PlaceTy, OpTy, EvalContext, + Machine, PlaceTy, OpTy, InterpretCx, }; @@ -46,8 +36,8 @@ fn numeric_intrinsic<'tcx, Tag>( Ok(Scalar::from_uint(bits_out, size)) } -impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { - /// Returns whether emulation happened. +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> { + /// Returns `true` if emulation happened. pub fn emulate_intrinsic( &mut self, instance: ty::Instance<'tcx>, @@ -97,7 +87,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> let bits = self.read_scalar(args[0])?.to_bits(layout_of.size)?; let kind = match layout_of.abi { ty::layout::Abi::Scalar(ref scalar) => scalar.value, - _ => Err(::rustc::mir::interpret::EvalErrorKind::TypeNotPrimitive(ty))?, + _ => Err(::rustc::mir::interpret::InterpError::TypeNotPrimitive(ty))?, }; let out_val = if intrinsic_name.ends_with("_nonzero") { if bits == 0 { @@ -132,6 +122,49 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> self.binop_with_overflow(bin_op, lhs, rhs, dest)?; } } + "saturating_add" | "saturating_sub" => { + let l = self.read_immediate(args[0])?; + let r = self.read_immediate(args[1])?; + let is_add = intrinsic_name == "saturating_add"; + let (val, overflowed) = self.binary_op(if is_add { + BinOp::Add + } else { + BinOp::Sub + }, l, r)?; + let val = if overflowed { + let num_bits = l.layout.size.bits(); + if l.layout.abi.is_signed() { + // For signed ints the saturated value depends on the sign of the first + // term since the sign of the second term can be inferred from this and + // the fact that the operation has overflowed (if either is 0 no + // overflow can occur) + let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?; + let first_term_positive = first_term & (1 << (num_bits-1)) == 0; + if first_term_positive { + // Negative overflow not possible since the positive first term + // can only increase an (in range) negative term for addition + // or corresponding negated positive term for subtraction + Scalar::from_uint((1u128 << (num_bits - 1)) - 1, // max positive + Size::from_bits(num_bits)) + } else { + // Positive overflow not possible for similar reason + // max negative + Scalar::from_uint(1u128 << (num_bits - 1), Size::from_bits(num_bits)) + } + } else { // unsigned + if is_add { + // max unsigned + Scalar::from_uint(u128::max_value() >> (128 - num_bits), + Size::from_bits(num_bits)) + } else { // underflow to 0 + Scalar::from_uint(0u128, Size::from_bits(num_bits)) + } + } + } else { + val + }; + self.write_scalar(val, dest)?; + } "unchecked_shl" | "unchecked_shr" => { let l = self.read_immediate(args[0])?; let r = self.read_immediate(args[1])?; @@ -140,7 +173,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> "unchecked_shr" => BinOp::Shr, _ => bug!("Already checked for int ops") }; - let (val, overflowed) = self.binary_op_imm(bin_op, l, r)?; + let (val, overflowed) = self.binary_op(bin_op, l, r)?; if overflowed { let layout = self.layout_of(substs.type_at(0))?; let r_val = r.to_scalar()?.to_bits(layout.size)?; @@ -179,7 +212,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } /// "Intercept" a function call because we have something special to do for it. - /// Returns whether an intercept happened. + /// Returns `true` if an intercept happened. pub fn hook_fn( &mut self, instance: ty::Instance<'tcx>, @@ -215,7 +248,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> let file = Symbol::intern(self.read_str(file_place)?); let line = self.read_scalar(line.into())?.to_u32()?; let col = self.read_scalar(col.into())?.to_u32()?; - return Err(EvalErrorKind::Panic { msg, file, line, col }.into()); + return Err(InterpError::Panic { msg, file, line, col }.into()); } else if Some(def_id) == self.tcx.lang_items().begin_panic_fn() { assert!(args.len() == 2); // &'static str, &(&'static str, u32, u32) @@ -233,7 +266,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> let file = Symbol::intern(self.read_str(file_place)?); let line = self.read_scalar(line.into())?.to_u32()?; let col = self.read_scalar(col.into())?.to_u32()?; - return Err(EvalErrorKind::Panic { msg, file, line, col }.into()); + return Err(InterpError::Panic { msg, file, line, col }.into()); } else { return Ok(false); } diff --git a/src/librustc_mir/interpret/machine.rs b/src/librustc_mir/interpret/machine.rs index 4c7aa887045c7..09d403ab243d6 100644 --- a/src/librustc_mir/interpret/machine.rs +++ b/src/librustc_mir/interpret/machine.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This module contains everything needed to instantiate an interpreter. //! This separation exists to ensure that no fancy miri features like //! interpreting common C functions leak into CTFE. @@ -17,11 +7,11 @@ use std::hash::Hash; use rustc::hir::{self, def_id::DefId}; use rustc::mir; -use rustc::ty::{self, layout::TyLayout, query::TyCtxtAt}; +use rustc::ty::{self, query::TyCtxtAt}; use super::{ Allocation, AllocId, EvalResult, Scalar, AllocationExtra, - EvalContext, PlaceTy, MPlaceTy, OpTy, Pointer, MemoryKind, + InterpretCx, PlaceTy, MPlaceTy, OpTy, ImmTy, Pointer, MemoryKind, }; /// Whether this kind of memory is allowed to leak @@ -31,23 +21,23 @@ pub trait MayLeak: Copy { /// The functionality needed by memory to manage its allocations pub trait AllocMap { - /// Test if the map contains the given key. + /// Tests if the map contains the given key. /// Deliberately takes `&mut` because that is sufficient, and some implementations /// can be more efficient then (using `RefCell::get_mut`). fn contains_key(&mut self, k: &Q) -> bool where K: Borrow; - /// Insert new entry into the map. + /// Inserts a new entry into the map. fn insert(&mut self, k: K, v: V) -> Option; - /// Remove entry from the map. + /// Removes an entry from the map. fn remove(&mut self, k: &Q) -> Option where K: Borrow; - /// Return data based the keys and values in the map. + /// Returns data based the keys and values in the map. fn filter_map_collect(&self, f: impl FnMut(&K, &V) -> Option) -> Vec; - /// Return a reference to entry `k`. If no such entry exists, call + /// Returns a reference to entry `k`. If no such entry exists, call /// `vacant` and either forward its error, or add its result to the map /// and return a reference to *that*. fn get_or( @@ -56,7 +46,7 @@ pub trait AllocMap { vacant: impl FnOnce() -> Result ) -> Result<&V, E>; - /// Return a mutable reference to entry `k`. If no such entry exists, call + /// Returns a mutable reference to entry `k`. If no such entry exists, call /// `vacant` and either forward its error, or add its result to the map /// and return a reference to *that*. fn get_mut_or( @@ -72,7 +62,7 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized { /// Additional memory kinds a machine wishes to distinguish from the builtin ones type MemoryKinds: ::std::fmt::Debug + MayLeak + Eq + 'static; - /// Tag tracked alongside every pointer. This is used to implement "Stacked Borrows" + /// Tag tracked alongside every pointer. This is used to implement "Stacked Borrows" /// . /// The `default()` is used for pointers to consts, statics, vtables and functions. type PointerTag: ::std::fmt::Debug + Default + Copy + Eq + Hash + 'static; @@ -80,13 +70,13 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized { /// Extra data stored in every call frame. type FrameExtra; - /// Extra data stored in memory. A reference to this is available when `AllocExtra` + /// Extra data stored in memory. A reference to this is available when `AllocExtra` /// gets initialized, so you can e.g., have an `Rc` here if there is global state you /// need access to in the `AllocExtra` hooks. type MemoryExtra: Default; /// Extra data stored in every allocation. - type AllocExtra: AllocationExtra; + type AllocExtra: AllocationExtra + 'static; /// Memory's allocation map type MemoryMap: @@ -105,24 +95,24 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized { const STATIC_KIND: Option; /// Whether to enforce the validity invariant - fn enforce_validity(ecx: &EvalContext<'a, 'mir, 'tcx, Self>) -> bool; + fn enforce_validity(ecx: &InterpretCx<'a, 'mir, 'tcx, Self>) -> bool; /// Called before a basic block terminator is executed. /// You can use this to detect endlessly running programs. - fn before_terminator(ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>) -> EvalResult<'tcx>; + fn before_terminator(ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>) -> EvalResult<'tcx>; /// Entry point to all function calls. /// /// Returns either the mir to use for the call, or `None` if execution should /// just proceed (which usually means this hook did all the work that the - /// called function should usually have done). In the latter case, it is + /// called function should usually have done). In the latter case, it is /// this hook's responsibility to call `goto_block(ret)` to advance the instruction pointer! /// (This is to support functions like `__rust_maybe_catch_panic` that neither find a MIR /// nor just jump to `ret`, but instead push their own stack frame.) /// Passing `dest`and `ret` in the same `Option` proved very annoying when only one of them /// was used. fn find_fn( - ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx, Self::PointerTag>], dest: Option>, @@ -132,7 +122,7 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized { /// Directly process an intrinsic without pushing a stack frame. /// If this returns successfully, the engine will take care of jumping to the next block. fn call_intrinsic( - ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx, Self::PointerTag>], dest: PlaceTy<'tcx, Self::PointerTag>, @@ -166,67 +156,55 @@ pub trait Machine<'a, 'mir, 'tcx>: Sized { /// /// Returns a (value, overflowed) pair if the operation succeeded fn ptr_op( - ecx: &EvalContext<'a, 'mir, 'tcx, Self>, + ecx: &InterpretCx<'a, 'mir, 'tcx, Self>, bin_op: mir::BinOp, - left: Scalar, - left_layout: TyLayout<'tcx>, - right: Scalar, - right_layout: TyLayout<'tcx>, + left: ImmTy<'tcx, Self::PointerTag>, + right: ImmTy<'tcx, Self::PointerTag>, ) -> EvalResult<'tcx, (Scalar, bool)>; /// Heap allocations via the `box` keyword. fn box_alloc( - ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>, dest: PlaceTy<'tcx, Self::PointerTag>, ) -> EvalResult<'tcx>; - /// Add the tag for a newly allocated pointer. + /// Adds the tag for a newly allocated pointer. fn tag_new_allocation( - ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>, ptr: Pointer, kind: MemoryKind, - ) -> EvalResult<'tcx, Pointer>; + ) -> Pointer; /// Executed when evaluating the `*` operator: Following a reference. - /// This has the chance to adjust the tag. It should not change anything else! + /// This has the chance to adjust the tag. It should not change anything else! /// `mutability` can be `None` in case a raw ptr is being dereferenced. #[inline] fn tag_dereference( - _ecx: &EvalContext<'a, 'mir, 'tcx, Self>, + _ecx: &InterpretCx<'a, 'mir, 'tcx, Self>, place: MPlaceTy<'tcx, Self::PointerTag>, _mutability: Option, ) -> EvalResult<'tcx, Scalar> { Ok(place.ptr) } - /// Execute a retagging operation + /// Executes a retagging operation #[inline] fn retag( - _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - _fn_entry: bool, - _two_phase: bool, + _ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>, + _kind: mir::RetagKind, _place: PlaceTy<'tcx, Self::PointerTag>, ) -> EvalResult<'tcx> { Ok(()) } - /// Execute an escape-to-raw operation - #[inline] - fn escape_to_raw( - _ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, - _ptr: OpTy<'tcx, Self::PointerTag>, - ) -> EvalResult<'tcx> { - Ok(()) - } - /// Called immediately before a new stack frame got pushed fn stack_push( - ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>, ) -> EvalResult<'tcx, Self::FrameExtra>; /// Called immediately after a stack frame gets popped fn stack_pop( - ecx: &mut EvalContext<'a, 'mir, 'tcx, Self>, + ecx: &mut InterpretCx<'a, 'mir, 'tcx, Self>, extra: Self::FrameExtra, ) -> EvalResult<'tcx>; } diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 420fe26426321..e5d8341dfcf6d 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -1,19 +1,9 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The memory subsystem. //! //! Generally, we use `Pointer` to denote memory addresses. However, some operations //! have a "size"-like parameter, and they take `Scalar` for the address because //! if the size is 0, then the pointer can also be a (properly aligned, non-NULL) -//! integer. It is crucial that these operations call `check_align` *before* +//! integer. It is crucial that these operations call `check_align` *before* //! short-circuiting the empty case! use std::collections::VecDeque; @@ -29,7 +19,7 @@ use syntax::ast::Mutability; use super::{ Pointer, AllocId, Allocation, GlobalId, AllocationExtra, - EvalResult, Scalar, EvalErrorKind, AllocKind, PointerArithmetic, + EvalResult, Scalar, InterpError, AllocKind, PointerArithmetic, Machine, AllocMap, MayLeak, ErrorHandled, InboundsCheck, }; @@ -57,10 +47,10 @@ impl MayLeak for MemoryKind { // `Memory` has to depend on the `Machine` because some of its operations // (e.g., `get`) call a `Machine` hook. pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> { - /// Allocations local to this instance of the miri engine. The kind + /// Allocations local to this instance of the miri engine. The kind /// helps ensure that the same mechanism is used for allocation and - /// deallocation. When an allocation is not found here, it is a - /// static and looked up in the `tcx` for read access. Some machines may + /// deallocation. When an allocation is not found here, it is a + /// static and looked up in the `tcx` for read access. Some machines may /// have to mutate this map even on a read-only access to a static (because /// they do pointer provenance tracking and the allocations in `tcx` have /// the wrong type), so we let the machine override this type. @@ -131,10 +121,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { &mut self, alloc: Allocation, kind: MemoryKind, - ) -> EvalResult<'tcx, AllocId> { + ) -> AllocId { let id = self.tcx.alloc_map.lock().reserve(); self.alloc_map.insert(id, (kind, alloc)); - Ok(id) + id } pub fn allocate( @@ -142,9 +132,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { size: Size, align: Align, kind: MemoryKind, - ) -> EvalResult<'tcx, Pointer> { + ) -> Pointer { let extra = AllocationExtra::memory_allocated(size, &self.extra); - Ok(Pointer::from(self.allocate_with(Allocation::undef(size, align, extra), kind)?)) + Pointer::from(self.allocate_with(Allocation::undef(size, align, extra), kind)) } pub fn reallocate( @@ -162,7 +152,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc". // This happens so rarely, the perf advantage is outweighed by the maintenance cost. - let new_ptr = self.allocate(new_size, new_align, kind)?; + let new_ptr = self.allocate(new_size, new_align, kind); self.copy( ptr.into(), old_align, @@ -250,7 +240,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Ok(()) } - /// Check that the pointer is aligned AND non-NULL. This supports ZSTs in two ways: + /// Checks that the pointer is aligned AND non-NULL. This supports ZSTs in two ways: /// You can pass a scalar, and a `Pointer` does not have to actually still be allocated. pub fn check_align( &self, @@ -262,7 +252,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { Scalar::Ptr(ptr) => { // check this is not NULL -- which we can ensure only if this is in-bounds // of some (potentially dead) allocation. - let align = self.check_bounds_ptr_maybe_dead(ptr)?; + let align = self.check_bounds_ptr(ptr, InboundsCheck::MaybeDead)?; (ptr.offset.bytes(), align) } Scalar::Bits { bits, size } => { @@ -294,20 +284,18 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } } - /// Check if the pointer is "in-bounds". Notice that a pointer pointing at the end + /// Checks if the pointer is "in-bounds". Notice that a pointer pointing at the end /// of an allocation (i.e., at the first *inaccessible* location) *is* considered /// in-bounds! This follows C's/LLVM's rules. - /// This function also works for deallocated allocations. - /// Use `.get(ptr.alloc_id)?.check_bounds_ptr(ptr)` if you want to force the allocation - /// to still be live. /// If you want to check bounds before doing a memory access, better first obtain /// an `Allocation` and call `check_bounds`. - pub fn check_bounds_ptr_maybe_dead( + pub fn check_bounds_ptr( &self, ptr: Pointer, + liveness: InboundsCheck, ) -> EvalResult<'tcx, Align> { - let (allocation_size, align) = self.get_size_and_align(ptr.alloc_id); - ptr.check_in_alloc(allocation_size, InboundsCheck::MaybeDead)?; + let (allocation_size, align) = self.get_size_and_align(ptr.alloc_id, liveness)?; + ptr.check_in_alloc(allocation_size, liveness)?; Ok(align) } } @@ -356,8 +344,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { // no need to report anything, the const_eval call takes care of that for statics assert!(tcx.is_static(def_id).is_some()); match err { - ErrorHandled::Reported => EvalErrorKind::ReferencedConstant.into(), - ErrorHandled::TooGeneric => EvalErrorKind::TooGeneric.into(), + ErrorHandled::Reported => InterpError::ReferencedConstant.into(), + ErrorHandled::TooGeneric => InterpError::TooGeneric.into(), } }).map(|raw_const| { let allocation = tcx.alloc_map.lock().unwrap_memory(raw_const.alloc_id); @@ -429,27 +417,37 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { } } - pub fn get_size_and_align(&self, id: AllocId) -> (Size, Align) { + /// Obtain the size and alignment of an allocation, even if that allocation has been deallocated + /// + /// If `liveness` is `InboundsCheck::Dead`, this function always returns `Ok` + pub fn get_size_and_align( + &self, + id: AllocId, + liveness: InboundsCheck, + ) -> EvalResult<'static, (Size, Align)> { if let Ok(alloc) = self.get(id) { - return (Size::from_bytes(alloc.bytes.len() as u64), alloc.align); + return Ok((Size::from_bytes(alloc.bytes.len() as u64), alloc.align)); } // Could also be a fn ptr or extern static match self.tcx.alloc_map.lock().get(id) { - Some(AllocKind::Function(..)) => (Size::ZERO, Align::from_bytes(1).unwrap()), + Some(AllocKind::Function(..)) => Ok((Size::ZERO, Align::from_bytes(1).unwrap())), Some(AllocKind::Static(did)) => { // The only way `get` couldn't have worked here is if this is an extern static assert!(self.tcx.is_foreign_item(did)); // Use size and align of the type let ty = self.tcx.type_of(did); let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap(); - (layout.size, layout.align.abi) - } - _ => { - // Must be a deallocated pointer - *self.dead_alloc_map.get(&id).expect( - "allocation missing in dead_alloc_map" - ) + Ok((layout.size, layout.align.abi)) } + _ => match liveness { + InboundsCheck::MaybeDead => { + // Must be a deallocated pointer + Ok(*self.dead_alloc_map.get(&id).expect( + "allocation missing in dead_alloc_map" + )) + }, + InboundsCheck::Live => err!(DanglingPointerDeref), + }, } } @@ -460,7 +458,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { trace!("reading fn ptr: {}", ptr.alloc_id); match self.tcx.alloc_map.lock().get(ptr.alloc_id) { Some(AllocKind::Function(instance)) => Ok(instance), - _ => Err(EvalErrorKind::ExecuteMemory.into()), + _ => Err(InterpError::ExecuteMemory.into()), } } @@ -661,7 +659,7 @@ where } } -/// Reading and writing +/// Reading and writing. impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { pub fn copy( &mut self, @@ -702,19 +700,29 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { // relocations overlapping the edges; those would not be handled correctly). let relocations = { let relocations = self.get(src.alloc_id)?.relocations(self, src, size); - let mut new_relocations = Vec::with_capacity(relocations.len() * (length as usize)); - for i in 0..length { - new_relocations.extend( - relocations - .iter() - .map(|&(offset, reloc)| { - (offset + dest.offset - src.offset + (i * size * relocations.len() as u64), - reloc) - }) - ); - } + if relocations.is_empty() { + // nothing to copy, ignore even the `length` loop + Vec::new() + } else { + let mut new_relocations = Vec::with_capacity(relocations.len() * (length as usize)); + for i in 0..length { + new_relocations.extend( + relocations + .iter() + .map(|&(offset, reloc)| { + // compute offset for current repetition + let dest_offset = dest.offset + (i * size); + ( + // shift offsets from source allocation to destination allocation + offset + dest_offset - src.offset, + reloc, + ) + }) + ); + } - new_relocations + new_relocations + } }; let tcx = self.tcx.tcx; @@ -781,20 +789,65 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> { // The bits have to be saved locally before writing to dest in case src and dest overlap. assert_eq!(size.bytes() as usize as u64, size.bytes()); - let undef_mask = self.get(src.alloc_id)?.undef_mask.clone(); - let dest_allocation = self.get_mut(dest.alloc_id)?; + let undef_mask = &self.get(src.alloc_id)?.undef_mask; + + // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`), + // a naive undef mask copying algorithm would repeatedly have to read the undef mask from + // the source and write it to the destination. Even if we optimized the memory accesses, + // we'd be doing all of this `repeat` times. + // Therefor we precompute a compressed version of the undef mask of the source value and + // then write it back `repeat` times without computing any more information from the source. + + // a precomputed cache for ranges of defined/undefined bits + // 0000010010001110 will become + // [5, 1, 2, 1, 3, 3, 1] + // where each element toggles the state + let mut ranges = smallvec::SmallVec::<[u64; 1]>::new(); + let first = undef_mask.get(src.offset); + let mut cur_len = 1; + let mut cur = first; + for i in 1..size.bytes() { + // FIXME: optimize to bitshift the current undef block's bits and read the top bit + if undef_mask.get(src.offset + Size::from_bytes(i)) == cur { + cur_len += 1; + } else { + ranges.push(cur_len); + cur_len = 1; + cur = !cur; + } + } - for i in 0..size.bytes() { - let defined = undef_mask.get(src.offset + Size::from_bytes(i)); + // now fill in all the data + let dest_allocation = self.get_mut(dest.alloc_id)?; + // an optimization where we can just overwrite an entire range of definedness bits if + // they are going to be uniformly `1` or `0`. + if ranges.is_empty() { + dest_allocation.undef_mask.set_range_inbounds( + dest.offset, + dest.offset + size * repeat, + first, + ); + return Ok(()) + } - for j in 0..repeat { - dest_allocation.undef_mask.set( - dest.offset + Size::from_bytes(i + (size.bytes() * j)), - defined + // remember to fill in the trailing bits + ranges.push(cur_len); + + for mut j in 0..repeat { + j *= size.bytes(); + j += dest.offset.bytes(); + let mut cur = first; + for range in &ranges { + let old_j = j; + j += range; + dest_allocation.undef_mask.set_range_inbounds( + Size::from_bytes(old_j), + Size::from_bytes(j), + cur, ); + cur = !cur; } } - Ok(()) } } diff --git a/src/librustc_mir/interpret/mod.rs b/src/librustc_mir/interpret/mod.rs index 96ea0d5094966..ea358389ddb76 100644 --- a/src/librustc_mir/interpret/mod.rs +++ b/src/librustc_mir/interpret/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! An interpreter for MIR used in CTFE and by miri mod cast; @@ -28,7 +18,7 @@ mod visitor; pub use rustc::mir::interpret::*; // have all the `interpret` symbols in one place: here pub use self::eval_context::{ - EvalContext, Frame, StackPopCleanup, LocalValue, + InterpretCx, Frame, StackPopCleanup, LocalState, LocalValue, }; pub use self::place::{Place, PlaceTy, MemPlace, MPlaceTy}; diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index 83ceadada65ce..1ce6d09d7a4e0 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -1,27 +1,21 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Functions concerning immediate values and operands, and reading from operands. //! All high-level functions to read from memory work on operands as sources. use std::convert::TryInto; -use rustc::mir; +use rustc::{mir, ty}; use rustc::ty::layout::{self, Size, LayoutOf, TyLayout, HasDataLayout, IntegerExt, VariantIdx}; use rustc::mir::interpret::{ - GlobalId, AllocId, + GlobalId, AllocId, InboundsCheck, ConstValue, Pointer, Scalar, - EvalResult, EvalErrorKind, + EvalResult, InterpError, + sign_extend, truncate, +}; +use super::{ + InterpretCx, Machine, + MemPlace, MPlaceTy, PlaceTy, Place, }; -use super::{EvalContext, Machine, MemPlace, MPlaceTy, MemoryKind}; pub use rustc::mir::interpret::ScalarMaybeUndef; /// A `Value` represents a single immediate self-contained Rust value. @@ -51,6 +45,11 @@ impl Immediate { } impl<'tcx, Tag> Immediate { + #[inline] + pub fn from_scalar(val: Scalar) -> Self { + Immediate::Scalar(ScalarMaybeUndef::Scalar(val)) + } + #[inline] pub fn erase_tag(self) -> Immediate { @@ -97,7 +96,7 @@ impl<'tcx, Tag> Immediate { } } - /// Convert the immediate into a pointer (or a pointer-sized integer). + /// Converts the immediate into a pointer (or a pointer-sized integer). /// Throws away the second half of a ScalarPair! #[inline] pub fn to_scalar_ptr(self) -> EvalResult<'tcx, Scalar> { @@ -107,7 +106,7 @@ impl<'tcx, Tag> Immediate { } } - /// Convert the value into its metadata. + /// Converts the value into its metadata. /// Throws away the first half of a ScalarPair! #[inline] pub fn to_meta(self) -> EvalResult<'tcx, Option>> { @@ -122,7 +121,7 @@ impl<'tcx, Tag> Immediate { // as input for binary and cast operations. #[derive(Copy, Clone, Debug)] pub struct ImmTy<'tcx, Tag=()> { - immediate: Immediate, + pub imm: Immediate, pub layout: TyLayout<'tcx>, } @@ -130,12 +129,12 @@ impl<'tcx, Tag> ::std::ops::Deref for ImmTy<'tcx, Tag> { type Target = Immediate; #[inline(always)] fn deref(&self) -> &Immediate { - &self.immediate + &self.imm } } /// An `Operand` is the result of computing a `mir::Operand`. It can be immediate, -/// or still in memory. The latter is an optimization, to delay reading that chunk of +/// or still in memory. The latter is an optimization, to delay reading that chunk of /// memory and to avoid having to store arbitrary-sized data here. #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub enum Operand { @@ -190,7 +189,7 @@ impl Operand { #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] pub struct OpTy<'tcx, Tag=()> { - crate op: Operand, // ideally we'd make this private, but const_prop needs this + op: Operand, pub layout: TyLayout<'tcx>, } @@ -216,12 +215,25 @@ impl<'tcx, Tag> From> for OpTy<'tcx, Tag> { #[inline(always)] fn from(val: ImmTy<'tcx, Tag>) -> Self { OpTy { - op: Operand::Immediate(val.immediate), + op: Operand::Immediate(val.imm), layout: val.layout } } } +impl<'tcx, Tag: Copy> ImmTy<'tcx, Tag> +{ + #[inline] + pub fn from_scalar(val: Scalar, layout: TyLayout<'tcx>) -> Self { + ImmTy { imm: Immediate::from_scalar(val), layout } + } + + #[inline] + pub fn to_bits(self) -> EvalResult<'tcx, u128> { + self.to_scalar()?.to_bits(self.layout.size) + } +} + impl<'tcx, Tag> OpTy<'tcx, Tag> { #[inline] @@ -237,7 +249,7 @@ impl<'tcx, Tag> OpTy<'tcx, Tag> // Use the existing layout if given (but sanity check in debug mode), // or compute the layout. #[inline(always)] -fn from_known_layout<'tcx>( +pub(super) fn from_known_layout<'tcx>( layout: Option>, compute: impl FnOnce() -> EvalResult<'tcx, TyLayout<'tcx>> ) -> EvalResult<'tcx, TyLayout<'tcx>> { @@ -255,10 +267,10 @@ fn from_known_layout<'tcx>( } } -impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> { /// Try reading an immediate in memory; this is interesting particularly for ScalarPair. - /// Return None if the layout does not permit loading this as a value. - pub(super) fn try_read_immediate_from_mplace( + /// Returns `None` if the layout does not permit loading this as a value. + fn try_read_immediate_from_mplace( &self, mplace: MPlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx, Option>> { @@ -312,7 +324,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> /// Note that for a given layout, this operation will either always fail or always /// succeed! Whether it succeeds depends on whether the layout can be represented /// in a `Immediate`, not on which data is stored there currently. - pub(crate) fn try_read_immediate( + pub(super) fn try_read_immediate( &self, src: OpTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx, Result, MemPlace>> { @@ -334,8 +346,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> &self, op: OpTy<'tcx, M::PointerTag> ) -> EvalResult<'tcx, ImmTy<'tcx, M::PointerTag>> { - if let Ok(immediate) = self.try_read_immediate(op)? { - Ok(ImmTy { immediate, layout: op.layout }) + if let Ok(imm) = self.try_read_immediate(op)? { + Ok(ImmTy { imm, layout: op.layout }) } else { bug!("primitive read failed for type: {:?}", op.layout.ty); } @@ -357,37 +369,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> let len = mplace.len(self)?; let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len as u64))?; let str = ::std::str::from_utf8(bytes) - .map_err(|err| EvalErrorKind::ValidationFailure(err.to_string()))?; + .map_err(|err| InterpError::ValidationFailure(err.to_string()))?; Ok(str) } - pub fn uninit_operand( - &mut self, - layout: TyLayout<'tcx> - ) -> EvalResult<'tcx, Operand> { - // This decides which types we will use the Immediate optimization for, and hence should - // match what `try_read_immediate` and `eval_place_to_op` support. - if layout.is_zst() { - return Ok(Operand::Immediate(Immediate::Scalar(Scalar::zst().into()))); - } - - Ok(match layout.abi { - layout::Abi::Scalar(..) => - Operand::Immediate(Immediate::Scalar(ScalarMaybeUndef::Undef)), - layout::Abi::ScalarPair(..) => - Operand::Immediate(Immediate::ScalarPair( - ScalarMaybeUndef::Undef, - ScalarMaybeUndef::Undef, - )), - _ => { - trace!("Forcing allocation for local of type {:?}", layout.ty); - Operand::Indirect( - *self.allocate(layout, MemoryKind::Stack)? - ) - } - }) - } - /// Projection functions pub fn operand_field( &self, @@ -467,8 +452,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } /// This is used by [priroda](https://github.com/oli-obk/priroda) to get an OpTy from a local - /// - /// When you know the layout of the local in advance, you can pass it as last argument pub fn access_local( &self, frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>, @@ -476,24 +459,44 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> layout: Option>, ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { assert_ne!(local, mir::RETURN_PLACE); - let op = *frame.locals[local].access()?; - let layout = from_known_layout(layout, - || self.layout_of_local(frame, local))?; + let layout = self.layout_of_local(frame, local, layout)?; + let op = if layout.is_zst() { + // Do not read from ZST, they might not be initialized + Operand::Immediate(Immediate::Scalar(Scalar::zst().into())) + } else { + frame.locals[local].access()? + }; Ok(OpTy { op, layout }) } + /// Every place can be read from, so we can turm them into an operand + #[inline(always)] + pub fn place_to_op( + &self, + place: PlaceTy<'tcx, M::PointerTag> + ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { + let op = match *place { + Place::Ptr(mplace) => { + Operand::Indirect(mplace) + } + Place::Local { frame, local } => + *self.access_local(&self.stack[frame], local, None)? + }; + Ok(OpTy { op, layout: place.layout }) + } + // Evaluate a place with the goal of reading from it. This lets us sometimes - // avoid allocations. If you already know the layout, you can pass it in - // to avoid looking it up again. - fn eval_place_to_op( + // avoid allocations. + pub(super) fn eval_place_to_op( &self, mir_place: &mir::Place<'tcx>, layout: Option>, ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { use rustc::mir::Place::*; + use rustc::mir::PlaceBase; let op = match *mir_place { - Local(mir::RETURN_PLACE) => return err!(ReadFromReturnPointer), - Local(local) => self.access_local(self.frame(), local, layout)?, + Base(PlaceBase::Local(mir::RETURN_PLACE)) => return err!(ReadFromReturnPointer), + Base(PlaceBase::Local(local)) => self.access_local(self.frame(), local, layout)?, Projection(ref proj) => { let op = self.eval_place_to_op(&proj.base, None)?; @@ -522,14 +525,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> Move(ref place) => self.eval_place_to_op(place, layout)?, - Constant(ref constant) => { - let layout = from_known_layout(layout, || { - let ty = self.monomorphize(mir_op.ty(self.mir(), *self.tcx), self.substs()); - self.layout_of(ty) - })?; - let op = self.const_value_to_op(constant.literal.val)?; - OpTy { op, layout } - } + Constant(ref constant) => self.eval_const_to_op(*constant.literal, layout)?, }; trace!("{:?}: {:?}", mir_op, *op); Ok(op) @@ -545,37 +541,44 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> .collect() } - // Used when miri runs into a constant, and by CTFE. - // FIXME: CTFE should use allocations, then we can make this private (embed it into - // `eval_operand`, ideally). - pub(crate) fn const_value_to_op( + // Used when the miri-engine runs into a constant and for extracting information from constants + // in patterns via the `const_eval` module + crate fn eval_const_to_op( &self, - val: ConstValue<'tcx>, - ) -> EvalResult<'tcx, Operand> { - trace!("const_value_to_op: {:?}", val); - match val { - ConstValue::Unevaluated(def_id, substs) => { - let instance = self.resolve(def_id, substs)?; - Ok(*OpTy::from(self.const_eval_raw(GlobalId { - instance, - promoted: None, - })?)) - } - ConstValue::ByRef(id, alloc, offset) => { + val: ty::Const<'tcx>, + layout: Option>, + ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { + let op = match val.val { + ConstValue::Param(_) | ConstValue::Infer(_) => bug!(), + ConstValue::ByRef(ptr, alloc) => { // We rely on mutability being set correctly in that allocation to prevent writes // where none should happen -- and for `static mut`, we copy on demand anyway. - Ok(Operand::Indirect( - MemPlace::from_ptr(Pointer::new(id, offset), alloc.align) - ).with_default_tag()) + Operand::Indirect( + MemPlace::from_ptr(ptr, alloc.align) + ).with_default_tag() }, - ConstValue::ScalarPair(a, b) => - Ok(Operand::Immediate(Immediate::ScalarPair( + ConstValue::Slice(a, b) => + Operand::Immediate(Immediate::ScalarPair( a.into(), - b.into(), - )).with_default_tag()), + Scalar::from_uint(b, self.tcx.data_layout.pointer_size).into(), + )).with_default_tag(), ConstValue::Scalar(x) => - Ok(Operand::Immediate(Immediate::Scalar(x.into())).with_default_tag()), - } + Operand::Immediate(Immediate::Scalar(x.into())).with_default_tag(), + ConstValue::Unevaluated(def_id, substs) => { + let instance = self.resolve(def_id, substs)?; + return Ok(OpTy::from(self.const_eval_raw(GlobalId { + instance, + promoted: None, + })?)); + }, + }; + let layout = from_known_layout(layout, || { + self.layout_of(self.monomorphize(val.ty)?) + })?; + Ok(OpTy { + op, + layout, + }) } /// Read discriminant, return the runtime value as well as the variant index. @@ -585,44 +588,41 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> ) -> EvalResult<'tcx, (u128, VariantIdx)> { trace!("read_discriminant_value {:#?}", rval.layout); - match rval.layout.variants { + let (discr_kind, discr_index) = match rval.layout.variants { layout::Variants::Single { index } => { let discr_val = rval.layout.ty.ty_adt_def().map_or( index.as_u32() as u128, |def| def.discriminant_for_variant(*self.tcx, index).val); return Ok((discr_val, index)); } - layout::Variants::Tagged { .. } | - layout::Variants::NicheFilling { .. } => {}, - } + layout::Variants::Multiple { ref discr_kind, discr_index, .. } => + (discr_kind, discr_index), + }; + // read raw discriminant value - let discr_op = self.operand_field(rval, 0)?; + let discr_op = self.operand_field(rval, discr_index as u64)?; let discr_val = self.read_immediate(discr_op)?; let raw_discr = discr_val.to_scalar_or_undef(); trace!("discr value: {:?}", raw_discr); // post-process - Ok(match rval.layout.variants { - layout::Variants::Single { .. } => bug!(), - layout::Variants::Tagged { .. } => { + Ok(match *discr_kind { + layout::DiscriminantKind::Tag => { let bits_discr = match raw_discr.to_bits(discr_val.layout.size) { Ok(raw_discr) => raw_discr, Err(_) => return err!(InvalidDiscriminant(raw_discr.erase_tag())), }; let real_discr = if discr_val.layout.ty.is_signed() { - let i = bits_discr as i128; // going from layout tag type to typeck discriminant type // requires first sign extending with the layout discriminant - let shift = 128 - discr_val.layout.size.bits(); - let sexted = (i << shift) >> shift; + let sexted = sign_extend(bits_discr, discr_val.layout.size) as i128; // and then zeroing with the typeck discriminant type let discr_ty = rval.layout.ty .ty_adt_def().expect("tagged layout corresponds to adt") .repr .discr_type(); - let discr_ty = layout::Integer::from_attr(self, discr_ty); - let shift = 128 - discr_ty.size().bits(); + let size = layout::Integer::from_attr(self, discr_ty).size(); let truncatee = sexted as u128; - (truncatee << shift) >> shift + truncate(truncatee, size) } else { bits_discr }; @@ -632,14 +632,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> .expect("tagged layout for non adt") .discriminants(self.tcx.tcx) .find(|(_, var)| var.val == real_discr) - .ok_or_else(|| EvalErrorKind::InvalidDiscriminant(raw_discr.erase_tag()))?; + .ok_or_else(|| InterpError::InvalidDiscriminant(raw_discr.erase_tag()))?; (real_discr, index.0) }, - layout::Variants::NicheFilling { + layout::DiscriminantKind::Niche { dataful_variant, ref niche_variants, niche_start, - .. } => { let variants_start = niche_variants.start().as_u32() as u128; let variants_end = niche_variants.end().as_u32() as u128; @@ -647,7 +646,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) => { // The niche must be just 0 (which an inbounds pointer value never is) let ptr_valid = niche_start == 0 && variants_start == variants_end && - self.memory.check_bounds_ptr_maybe_dead(ptr).is_ok(); + self.memory.check_bounds_ptr(ptr, InboundsCheck::MaybeDead).is_ok(); if !ptr_valid { return err!(InvalidDiscriminant(raw_discr.erase_tag())); } @@ -675,5 +674,4 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } }) } - } diff --git a/src/librustc_mir/interpret/operator.rs b/src/librustc_mir/interpret/operator.rs index 31824d5ec4a93..488f81d8f740e 100644 --- a/src/librustc_mir/interpret/operator.rs +++ b/src/librustc_mir/interpret/operator.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::mir; use rustc::ty::{self, layout::{Size, TyLayout}}; use syntax::ast::FloatTy; @@ -15,10 +5,10 @@ use rustc_apfloat::ieee::{Double, Single}; use rustc_apfloat::Float; use rustc::mir::interpret::{EvalResult, Scalar}; -use super::{EvalContext, PlaceTy, Immediate, Machine, ImmTy}; +use super::{InterpretCx, PlaceTy, Immediate, Machine, ImmTy}; -impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> { /// Applies the binary operation `op` to the two operands and writes a tuple of the result /// and a boolean signifying the potential overflow to the destination. pub fn binop_with_overflow( @@ -28,7 +18,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> right: ImmTy<'tcx, M::PointerTag>, dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { - let (val, overflowed) = self.binary_op_imm(op, left, right)?; + let (val, overflowed) = self.binary_op(op, left, right)?; let val = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into()); self.write_immediate(val, dest) } @@ -42,12 +32,12 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> right: ImmTy<'tcx, M::PointerTag>, dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { - let (val, _overflowed) = self.binary_op_imm(op, left, right)?; + let (val, _overflowed) = self.binary_op(op, left, right)?; self.write_scalar(val, dest) } } -impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> { fn binary_char_op( &self, bin_op: mir::BinOp, @@ -282,69 +272,55 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> Ok((val, false)) } - /// Convenience wrapper that's useful when keeping the layout together with the - /// immediate value. + /// Returns the result of the specified operation and whether it overflowed. #[inline] - pub fn binary_op_imm( + pub fn binary_op( &self, bin_op: mir::BinOp, left: ImmTy<'tcx, M::PointerTag>, right: ImmTy<'tcx, M::PointerTag>, - ) -> EvalResult<'tcx, (Scalar, bool)> { - self.binary_op( - bin_op, - left.to_scalar()?, left.layout, - right.to_scalar()?, right.layout, - ) - } - - /// Returns the result of the specified operation and whether it overflowed. - pub fn binary_op( - &self, - bin_op: mir::BinOp, - left: Scalar, - left_layout: TyLayout<'tcx>, - right: Scalar, - right_layout: TyLayout<'tcx>, ) -> EvalResult<'tcx, (Scalar, bool)> { trace!("Running binary op {:?}: {:?} ({:?}), {:?} ({:?})", - bin_op, left, left_layout.ty, right, right_layout.ty); + bin_op, *left, left.layout.ty, *right, right.layout.ty); - match left_layout.ty.sty { + match left.layout.ty.sty { ty::Char => { - assert_eq!(left_layout.ty, right_layout.ty); - let left = left.to_char()?; - let right = right.to_char()?; + assert_eq!(left.layout.ty, right.layout.ty); + let left = left.to_scalar()?.to_char()?; + let right = right.to_scalar()?.to_char()?; self.binary_char_op(bin_op, left, right) } ty::Bool => { - assert_eq!(left_layout.ty, right_layout.ty); - let left = left.to_bool()?; - let right = right.to_bool()?; + assert_eq!(left.layout.ty, right.layout.ty); + let left = left.to_scalar()?.to_bool()?; + let right = right.to_scalar()?.to_bool()?; self.binary_bool_op(bin_op, left, right) } ty::Float(fty) => { - assert_eq!(left_layout.ty, right_layout.ty); - let left = left.to_bits(left_layout.size)?; - let right = right.to_bits(right_layout.size)?; + assert_eq!(left.layout.ty, right.layout.ty); + let left = left.to_bits()?; + let right = right.to_bits()?; self.binary_float_op(bin_op, fty, left, right) } _ => { // Must be integer(-like) types. Don't forget about == on fn pointers. - assert!(left_layout.ty.is_integral() || left_layout.ty.is_unsafe_ptr() || - left_layout.ty.is_fn()); - assert!(right_layout.ty.is_integral() || right_layout.ty.is_unsafe_ptr() || - right_layout.ty.is_fn()); + assert!(left.layout.ty.is_integral() || left.layout.ty.is_unsafe_ptr() || + left.layout.ty.is_fn()); + assert!(right.layout.ty.is_integral() || right.layout.ty.is_unsafe_ptr() || + right.layout.ty.is_fn()); // Handle operations that support pointer values - if left.is_ptr() || right.is_ptr() || bin_op == mir::BinOp::Offset { - return M::ptr_op(self, bin_op, left, left_layout, right, right_layout); + if left.to_scalar_ptr()?.is_ptr() || + right.to_scalar_ptr()?.is_ptr() || + bin_op == mir::BinOp::Offset + { + return M::ptr_op(self, bin_op, left, right); } // Everything else only works with "proper" bits - let left = left.to_bits(left_layout.size).expect("we checked is_ptr"); - let right = right.to_bits(right_layout.size).expect("we checked is_ptr"); - self.binary_int_op(bin_op, left, left_layout, right, right_layout) + let l = left.to_bits().expect("we checked is_ptr"); + let r = right.to_bits().expect("we checked is_ptr"); + self.binary_int_op(bin_op, l, left.layout, r, right.layout) } } } @@ -352,14 +328,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> pub fn unary_op( &self, un_op: mir::UnOp, - val: Scalar, - layout: TyLayout<'tcx>, + val: ImmTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx, Scalar> { use rustc::mir::UnOp::*; - use rustc_apfloat::ieee::{Single, Double}; - use rustc_apfloat::Float; - trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty.sty); + let layout = val.layout; + let val = val.to_scalar()?; + trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty); match layout.ty.sty { ty::Bool => { diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index bae670bf2b4b3..32ad52746896f 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Computations on places -- field projections, going from mir::Place, and writing //! into a place. //! All high-level functions to write to memory work on places as destinations. @@ -17,13 +7,15 @@ use std::hash::Hash; use rustc::hir; use rustc::mir; +use rustc::mir::interpret::truncate; use rustc::ty::{self, Ty}; use rustc::ty::layout::{self, Size, Align, LayoutOf, TyLayout, HasDataLayout, VariantIdx}; +use rustc::ty::TypeFoldable; use super::{ GlobalId, AllocId, Allocation, Scalar, EvalResult, Pointer, PointerArithmetic, - EvalContext, Machine, AllocMap, AllocationExtra, - RawConst, Immediate, ImmTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind + InterpretCx, Machine, AllocMap, AllocationExtra, + RawConst, Immediate, ImmTy, ScalarMaybeUndef, Operand, OpTy, MemoryKind, LocalValue }; #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] @@ -33,7 +25,7 @@ pub struct MemPlace { /// However, it may never be undef. pub ptr: Scalar, pub align: Align, - /// Metadata for unsized places. Interpretation is up to the type. + /// Metadata for unsized places. Interpretation is up to the type. /// Must not be present for sized types, but can be missing for unsized types /// (e.g., `extern type`). pub meta: Option>, @@ -67,7 +59,7 @@ impl<'tcx, Tag> ::std::ops::Deref for PlaceTy<'tcx, Tag> { } /// A MemPlace with its layout. Constructing it is only possible in this module. -#[derive(Copy, Clone, Debug)] +#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] pub struct MPlaceTy<'tcx, Tag=()> { mplace: MemPlace, pub layout: TyLayout<'tcx>, @@ -253,10 +245,10 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { } } -impl<'tcx, Tag: ::std::fmt::Debug> OpTy<'tcx, Tag> { +impl<'tcx, Tag: ::std::fmt::Debug + Copy> OpTy<'tcx, Tag> { #[inline(always)] pub fn try_as_mplace(self) -> Result, Immediate> { - match self.op { + match *self { Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }), Operand::Immediate(imm) => Err(imm), } @@ -313,7 +305,7 @@ impl<'tcx, Tag: ::std::fmt::Debug> PlaceTy<'tcx, Tag> { } // separating the pointer tag for `impl Trait`, see https://github.com/rust-lang/rust/issues/54385 -impl<'a, 'mir, 'tcx, Tag, M> EvalContext<'a, 'mir, 'tcx, M> +impl<'a, 'mir, 'tcx, Tag, M> InterpretCx<'a, 'mir, 'tcx, M> where // FIXME: Working around https://github.com/rust-lang/rust/issues/54385 Tag: ::std::fmt::Debug+Default+Copy+Eq+Hash+'static, @@ -335,6 +327,10 @@ where let mplace = MemPlace { ptr: val.to_scalar_ptr()?, + // We could use the run-time alignment here. For now, we do not, because + // the point of tracking the alignment here is to make sure that the *static* + // alignment information emitted with the loads is correct. The run-time + // alignment can only be more restrictive. align: layout.align.abi, meta: val.to_meta()?, }; @@ -358,7 +354,7 @@ where ty::Ref(_, _, mutbl) => Some(mutbl), ty::Adt(def, _) if def.is_box() => Some(hir::MutMutable), ty::RawPtr(_) => None, - _ => bug!("Unexpected pointer type {}", val.layout.ty.sty), + _ => bug!("Unexpected pointer type {}", val.layout.ty), }; place.mplace.ptr = M::tag_dereference(self, place, mutbl)?; Ok(place) @@ -394,9 +390,11 @@ where // above). In that case, all fields are equal. let field_layout = base.layout.field(self, usize::try_from(field).unwrap_or(0))?; - // Offset may need adjustment for unsized fields + // Offset may need adjustment for unsized fields. let (meta, offset) = if field_layout.is_unsized() { - // re-use parent metadata to determine dynamic field layout + // Re-use parent metadata to determine dynamic field layout. + // With custom DSTS, this *will* execute user-defined code, but the same + // happens at run-time so that's okay. let align = match self.size_and_align_of(base.meta, field_layout)? { Some((_, align)) => align, None if offset == Size::ZERO => @@ -496,9 +494,9 @@ where Deref => self.deref_operand(base.into())?, Index(local) => { - let n = *self.frame().locals[local].access()?; - let n_layout = self.layout_of(self.tcx.types.usize)?; - let n = self.read_scalar(OpTy { op: n, layout: n_layout })?; + let layout = self.layout_of(self.tcx.types.usize)?; + let n = self.access_local(self.frame(), local, Some(layout))?; + let n = self.read_scalar(n)?; let n = n.to_bits(self.tcx.data_layout.pointer_size)?; self.mplace_field(base, u64::try_from(n).unwrap())? } @@ -525,7 +523,7 @@ where }) } - /// Get the place of a field inside the place, and also the field's type. + /// Gets the place of a field inside the place, and also the field's type. /// Just a convenience function, but used quite a bit. /// This is the only projection that might have a side-effect: We cannot project /// into the field of a local `ScalarPair`, we have to first allocate it. @@ -556,11 +554,11 @@ where }) } - /// Project into a place + /// Projects into a place. pub fn place_projection( &mut self, base: PlaceTy<'tcx, M::PointerTag>, - proj_elem: &mir::ProjectionElem<'tcx, mir::Local, Ty<'tcx>>, + proj_elem: &mir::ProjectionElem>, ) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { use rustc::mir::ProjectionElem::*; Ok(match *proj_elem { @@ -576,26 +574,28 @@ where }) } - /// Evaluate statics and promoteds to an `MPlace`. Used to share some code between + /// Evaluate statics and promoteds to an `MPlace`. Used to share some code between /// `eval_place` and `eval_place_to_op`. pub(super) fn eval_place_to_mplace( &self, mir_place: &mir::Place<'tcx> ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { use rustc::mir::Place::*; + use rustc::mir::PlaceBase; + use rustc::mir::{Static, StaticKind}; Ok(match *mir_place { - Promoted(ref promoted) => { + Base(PlaceBase::Static(box Static { kind: StaticKind::Promoted(promoted), .. })) => { let instance = self.frame().instance; self.const_eval_raw(GlobalId { instance, - promoted: Some(promoted.0), + promoted: Some(promoted), })? } - Static(ref static_) => { - let ty = self.monomorphize(static_.ty, self.substs()); + Base(PlaceBase::Static(box Static { kind: StaticKind::Static(def_id), ty })) => { + assert!(!ty.needs_subst()); let layout = self.layout_of(ty)?; - let instance = ty::Instance::mono(*self.tcx, static_.def_id); + let instance = ty::Instance::mono(*self.tcx, def_id); let cid = GlobalId { instance, promoted: None @@ -607,7 +607,7 @@ where // global table but not in its local memory: It calls back into tcx through // a query, triggering the CTFE machinery to actually turn this lazy reference // into a bunch of bytes. IOW, statics are evaluated with CTFE even when - // this EvalContext uses another Machine (e.g., in miri). This is what we + // this InterpretCx uses another Machine (e.g., in miri). This is what we // want! This way, computing statics works concistently between codegen // and miri: They use the same query to eventually obtain a `ty::Const` // and use that for further computation. @@ -619,30 +619,32 @@ where }) } - /// Compute a place. You should only use this if you intend to write into this + /// Computes a place. You should only use this if you intend to write into this /// place; for reading, a more efficient alternative is `eval_place_for_read`. pub fn eval_place( &mut self, mir_place: &mir::Place<'tcx> ) -> EvalResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { use rustc::mir::Place::*; + use rustc::mir::PlaceBase; let place = match *mir_place { - Local(mir::RETURN_PLACE) => match self.frame().return_place { + Base(PlaceBase::Local(mir::RETURN_PLACE)) => match self.frame().return_place { Some(return_place) => // We use our layout to verify our assumption; caller will validate // their layout on return. PlaceTy { place: *return_place, - layout: self.layout_of_local(self.frame(), mir::RETURN_PLACE)?, + layout: self.layout_of(self.monomorphize(self.frame().mir.return_ty())?)?, }, None => return err!(InvalidNullPointerUsage), }, - Local(local) => PlaceTy { + Base(PlaceBase::Local(local)) => PlaceTy { + // This works even for dead/uninitialized locals; we check further when writing place: Place::Local { frame: self.cur_frame(), local, }, - layout: self.layout_of_local(self.frame(), local)?, + layout: self.layout_of_local(self.frame(), local, None)?, }, Projection(ref proj) => { @@ -713,16 +715,19 @@ where // but not factored as a separate function. let mplace = match dest.place { Place::Local { frame, local } => { - match *self.stack[frame].locals[local].access_mut()? { - Operand::Immediate(ref mut dest_val) => { - // Yay, we can just change the local directly. - *dest_val = src; + match self.stack[frame].locals[local].access_mut()? { + Ok(local) => { + // Local can be updated in-place. + *local = LocalValue::Live(Operand::Immediate(src)); return Ok(()); - }, - Operand::Indirect(mplace) => mplace, // already in memory + } + Err(mplace) => { + // The local is in memory, go on below. + mplace + } } }, - Place::Ptr(mplace) => mplace, // already in memory + Place::Ptr(mplace) => mplace, // already referring to memory }; let dest = MPlaceTy { mplace, layout: dest.layout }; @@ -794,7 +799,7 @@ where } } - /// Copy the data from an operand to a place. This does not support transmuting! + /// Copies the data from an operand to a place. This does not support transmuting! /// Use `copy_op_transmute` if the layouts could disagree. #[inline(always)] pub fn copy_op( @@ -812,7 +817,7 @@ where Ok(()) } - /// Copy the data from an operand to a place. This does not support transmuting! + /// Copies the data from an operand to a place. This does not support transmuting! /// Use `copy_op_transmute` if the layouts could disagree. /// Also, if you use this you are responsible for validating that things git copied at the /// right type. @@ -821,8 +826,6 @@ where src: OpTy<'tcx, M::PointerTag>, dest: PlaceTy<'tcx, M::PointerTag>, ) -> EvalResult<'tcx> { - debug_assert!(!src.layout.is_unsized() && !dest.layout.is_unsized(), - "Cannot copy unsized data"); // We do NOT compare the types for equality, because well-typed code can // actually "transmute" `&mut T` to `&T` in an assignment without a cast. assert!(src.layout.details == dest.layout.details, @@ -831,7 +834,10 @@ where // Let us see if the layout is simple so we take a shortcut, avoid force_allocation. let src = match self.try_read_immediate(src)? { Ok(src_val) => { + assert!(!src.layout.is_unsized(), "cannot have unsized immediates"); // Yay, we got a value that we can write directly. + // FIXME: Add a check to make sure that if `src` is indirect, + // it does not overlap with `dest`. return self.write_immediate_no_validate(src_val, dest); } Err(mplace) => mplace, @@ -839,19 +845,26 @@ where // Slow path, this does not fit into an immediate. Just memcpy. trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty); - let dest = self.force_allocation(dest)?; - let (src_ptr, src_align) = src.to_scalar_ptr_align(); - let (dest_ptr, dest_align) = dest.to_scalar_ptr_align(); + // This interprets `src.meta` with the `dest` local's layout, if an unsized local + // is being initialized! + let (dest, size) = self.force_allocation_maybe_sized(dest, src.meta)?; + let size = size.unwrap_or_else(|| { + assert!(!dest.layout.is_unsized(), + "Cannot copy into already initialized unsized place"); + dest.layout.size + }); + assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances"); self.memory.copy( - src_ptr, src_align, - dest_ptr, dest_align, - dest.layout.size, false + src.ptr, src.align, + dest.ptr, dest.align, + size, + /*nonoverlapping*/ true, )?; Ok(()) } - /// Copy the data from an operand to a place. The layouts may disagree, but they must + /// Copies the data from an operand to a place. The layouts may disagree, but they must /// have the same size. pub fn copy_op_transmute( &mut self, @@ -862,11 +875,13 @@ where // Fast path: Just use normal `copy_op` return self.copy_op(src, dest); } - // We still require the sizes to match - debug_assert!(!src.layout.is_unsized() && !dest.layout.is_unsized(), - "Cannot copy unsized data"); + // We still require the sizes to match. assert!(src.layout.size == dest.layout.size, "Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest); + // Unsized copies rely on interpreting `src.meta` with `dest.layout`, we want + // to avoid that here. + assert!(!src.layout.is_unsized() && !dest.layout.is_unsized(), + "Cannot transmute unsized data"); // The hard case is `ScalarPair`. `src` is already read from memory in this case, // using `src.layout` to figure out which bytes to use for the 1st and 2nd field. @@ -890,59 +905,84 @@ where Ok(()) } - /// Make sure that a place is in memory, and return where it is. + /// Ensures that a place is in memory, and returns where it is. /// If the place currently refers to a local that doesn't yet have a matching allocation, /// create such an allocation. /// This is essentially `force_to_memplace`. - pub fn force_allocation( + /// + /// This supports unsized types and returns the computed size to avoid some + /// redundant computation when copying; use `force_allocation` for a simpler, sized-only + /// version. + pub fn force_allocation_maybe_sized( &mut self, place: PlaceTy<'tcx, M::PointerTag>, - ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { - let mplace = match place.place { + meta: Option>, + ) -> EvalResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option)> { + let (mplace, size) = match place.place { Place::Local { frame, local } => { - match *self.stack[frame].locals[local].access()? { - Operand::Indirect(mplace) => mplace, - Operand::Immediate(value) => { + match self.stack[frame].locals[local].access_mut()? { + Ok(local_val) => { // We need to make an allocation. // FIXME: Consider not doing anything for a ZST, and just returning // a fake pointer? Are we even called for ZST? + // We cannot hold on to the reference `local_val` while allocating, + // but we can hold on to the value in there. + let old_val = + if let LocalValue::Live(Operand::Immediate(value)) = *local_val { + Some(value) + } else { + None + }; + // We need the layout of the local. We can NOT use the layout we got, // that might e.g., be an inner field of a struct with `Scalar` layout, // that has different alignment than the outer field. - let local_layout = self.layout_of_local(&self.stack[frame], local)?; - let ptr = self.allocate(local_layout, MemoryKind::Stack)?; - // We don't have to validate as we can assume the local - // was already valid for its type. - self.write_immediate_to_mplace_no_validate(value, ptr)?; - let mplace = ptr.mplace; - // Update the local - *self.stack[frame].locals[local].access_mut()? = - Operand::Indirect(mplace); - mplace + // We also need to support unsized types, and hence cannot use `allocate`. + let local_layout = self.layout_of_local(&self.stack[frame], local, None)?; + let (size, align) = self.size_and_align_of(meta, local_layout)? + .expect("Cannot allocate for non-dyn-sized type"); + let ptr = self.memory.allocate(size, align, MemoryKind::Stack); + let ptr = M::tag_new_allocation(self, ptr, MemoryKind::Stack); + let mplace = MemPlace { ptr: ptr.into(), align, meta }; + if let Some(value) = old_val { + // Preserve old value. + // We don't have to validate as we can assume the local + // was already valid for its type. + let mplace = MPlaceTy { mplace, layout: local_layout }; + self.write_immediate_to_mplace_no_validate(value, mplace)?; + } + // Now we can call `access_mut` again, asserting it goes well, + // and actually overwrite things. + *self.stack[frame].locals[local].access_mut().unwrap().unwrap() = + LocalValue::Live(Operand::Indirect(mplace)); + (mplace, Some(size)) } + Err(mplace) => (mplace, None), // this already was an indirect local } } - Place::Ptr(mplace) => mplace + Place::Ptr(mplace) => (mplace, None) }; // Return with the original layout, so that the caller can go on - Ok(MPlaceTy { mplace, layout: place.layout }) + Ok((MPlaceTy { mplace, layout: place.layout }, size)) + } + + #[inline(always)] + pub fn force_allocation( + &mut self, + place: PlaceTy<'tcx, M::PointerTag>, + ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { + Ok(self.force_allocation_maybe_sized(place, None)?.0) } pub fn allocate( &mut self, layout: TyLayout<'tcx>, kind: MemoryKind, - ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { - if layout.is_unsized() { - assert!(self.tcx.features().unsized_locals, "cannot alloc memory for unsized type"); - // FIXME: What should we do here? We should definitely also tag! - Ok(MPlaceTy::dangling(layout, self)) - } else { - let ptr = self.memory.allocate(layout.size, layout.align.abi, kind)?; - let ptr = M::tag_new_allocation(self, ptr, kind)?; - Ok(MPlaceTy::from_aligned_ptr(ptr, layout)) - } + ) -> MPlaceTy<'tcx, M::PointerTag> { + let ptr = self.memory.allocate(layout.size, layout.align.abi, kind); + let ptr = M::tag_new_allocation(self, ptr, kind); + MPlaceTy::from_aligned_ptr(ptr, layout) } pub fn write_discriminant_index( @@ -954,7 +994,12 @@ where layout::Variants::Single { index } => { assert_eq!(index, variant_index); } - layout::Variants::Tagged { ref tag, .. } => { + layout::Variants::Multiple { + discr_kind: layout::DiscriminantKind::Tag, + ref discr, + discr_index, + .. + } => { let adt_def = dest.layout.ty.ty_adt_def().unwrap(); assert!(variant_index.as_usize() < adt_def.variants.len()); let discr_val = adt_def @@ -964,17 +1009,19 @@ where // raw discriminants for enums are isize or bigger during // their computation, but the in-memory tag is the smallest possible // representation - let size = tag.value.size(self); - let shift = 128 - size.bits(); - let discr_val = (discr_val << shift) >> shift; + let size = discr.value.size(self); + let discr_val = truncate(discr_val, size); - let discr_dest = self.place_field(dest, 0)?; + let discr_dest = self.place_field(dest, discr_index as u64)?; self.write_scalar(Scalar::from_uint(discr_val, size), discr_dest)?; } - layout::Variants::NicheFilling { - dataful_variant, - ref niche_variants, - niche_start, + layout::Variants::Multiple { + discr_kind: layout::DiscriminantKind::Niche { + dataful_variant, + ref niche_variants, + niche_start, + }, + discr_index, .. } => { assert!( @@ -982,7 +1029,7 @@ where ); if variant_index != dataful_variant { let niche_dest = - self.place_field(dest, 0)?; + self.place_field(dest, discr_index as u64)?; let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); let niche_value = (niche_value as u128) .wrapping_add(niche_start); @@ -997,22 +1044,6 @@ where Ok(()) } - /// Every place can be read from, so we can turm them into an operand - #[inline(always)] - pub fn place_to_op( - &self, - place: PlaceTy<'tcx, M::PointerTag> - ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { - let op = match place.place { - Place::Ptr(mplace) => { - Operand::Indirect(mplace) - } - Place::Local { frame, local } => - *self.stack[frame].locals[local].access()? - }; - Ok(OpTy { op, layout: place.layout }) - } - pub fn raw_const_to_mplace( &self, raw: RawConst<'tcx>, diff --git a/src/librustc_mir/interpret/snapshot.rs b/src/librustc_mir/interpret/snapshot.rs index f9ce7b4319fac..0bb8b1d9d02ca 100644 --- a/src/librustc_mir/interpret/snapshot.rs +++ b/src/librustc_mir/interpret/snapshot.rs @@ -12,7 +12,7 @@ use rustc::mir; use rustc::mir::interpret::{ AllocId, Pointer, Scalar, Relocations, Allocation, UndefMask, - EvalResult, EvalErrorKind, + EvalResult, InterpError, }; use rustc::ty::{self, TyCtxt}; @@ -23,9 +23,9 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use syntax::ast::Mutability; use syntax::source_map::Span; -use super::eval_context::{LocalValue, StackPopCleanup}; -use super::{Frame, Memory, Operand, MemPlace, Place, Immediate, ScalarMaybeUndef}; -use const_eval::CompileTimeInterpreter; +use super::eval_context::{LocalState, StackPopCleanup}; +use super::{Frame, Memory, Operand, MemPlace, Place, Immediate, ScalarMaybeUndef, LocalValue}; +use crate::const_eval::CompileTimeInterpreter; #[derive(Default)] pub(crate) struct InfiniteLoopDetector<'a, 'mir, 'tcx: 'a + 'mir> { @@ -78,7 +78,7 @@ impl<'a, 'mir, 'tcx> InfiniteLoopDetector<'a, 'mir, 'tcx> } // Second cycle - Err(EvalErrorKind::InfiniteLoop.into()) + Err(InterpError::InfiniteLoop.into()) } } @@ -101,9 +101,8 @@ macro_rules! __impl_snapshot_field { // This assumes the type has two type parameters, first for the tag (set to `()`), // then for the id macro_rules! impl_snapshot_for { - // FIXME(mark-i-m): Some of these should be `?` rather than `*`. (enum $enum_name:ident { - $( $variant:ident $( ( $($field:ident $(-> $delegate:expr)*),* ) )* ),* $(,)* + $( $variant:ident $( ( $($field:ident $(-> $delegate:expr)?),* ) )? ),* $(,)? }) => { impl<'a, Ctx> self::Snapshot<'a, Ctx> for $enum_name @@ -115,18 +114,18 @@ macro_rules! impl_snapshot_for { fn snapshot(&self, __ctx: &'a Ctx) -> Self::Item { match *self { $( - $enum_name::$variant $( ( $(ref $field),* ) )* => + $enum_name::$variant $( ( $(ref $field),* ) )? => { $enum_name::$variant $( - ( $( __impl_snapshot_field!($field, __ctx $(, $delegate)*) ),* ), - )* + ( $( __impl_snapshot_field!($field, __ctx $(, $delegate)?) ),* ) + )? + } )* } } } }; - // FIXME(mark-i-m): same here. - (struct $struct_name:ident { $($field:ident $(-> $delegate:expr)*),* $(,)* }) => { + (struct $struct_name:ident { $($field:ident $(-> $delegate:expr)?),* $(,)? }) => { impl<'a, Ctx> self::Snapshot<'a, Ctx> for $struct_name where Ctx: self::SnapshotContext<'a>, { @@ -139,7 +138,7 @@ macro_rules! impl_snapshot_for { } = *self; $struct_name { - $( $field: __impl_snapshot_field!($field, __ctx $(, $delegate)*) ),* + $( $field: __impl_snapshot_field!($field, __ctx $(, $delegate)?) ),* } } } @@ -200,7 +199,7 @@ impl_snapshot_for!(enum ScalarMaybeUndef { Undef, }); -impl_stable_hash_for!(struct ::interpret::MemPlace { +impl_stable_hash_for!(struct crate::interpret::MemPlace { ptr, align, meta, @@ -211,7 +210,7 @@ impl_snapshot_for!(struct MemPlace { align -> *align, // just copy alignment verbatim }); -impl_stable_hash_for!(enum ::interpret::Place { +impl_stable_hash_for!(enum crate::interpret::Place { Ptr(mem_place), Local { frame, local }, }); @@ -232,7 +231,7 @@ impl<'a, Ctx> Snapshot<'a, Ctx> for Place } } -impl_stable_hash_for!(enum ::interpret::Immediate { +impl_stable_hash_for!(enum crate::interpret::Immediate { Scalar(x), ScalarPair(x, y), }); @@ -241,7 +240,7 @@ impl_snapshot_for!(enum Immediate { ScalarPair(s, t), }); -impl_stable_hash_for!(enum ::interpret::Operand { +impl_stable_hash_for!(enum crate::interpret::Operand { Immediate(x), Indirect(x), }); @@ -250,13 +249,15 @@ impl_snapshot_for!(enum Operand { Indirect(m), }); -impl_stable_hash_for!(enum ::interpret::LocalValue { +impl_stable_hash_for!(enum crate::interpret::LocalValue { Dead, + Uninitialized, Live(x), }); impl_snapshot_for!(enum LocalValue { - Live(v), Dead, + Uninitialized, + Live(v), }); impl<'a, Ctx> Snapshot<'a, Ctx> for Relocations @@ -298,7 +299,7 @@ impl<'a, Ctx> Snapshot<'a, Ctx> for &'a Allocation } } -impl_stable_hash_for!(enum ::interpret::eval_context::StackPopCleanup { +impl_stable_hash_for!(enum crate::interpret::eval_context::StackPopCleanup { Goto(block), None { cleanup }, }); @@ -314,7 +315,7 @@ struct FrameSnapshot<'a, 'tcx: 'a> { stmt: usize, } -impl_stable_hash_for!(impl<'tcx, 'mir: 'tcx> for struct Frame<'mir, 'tcx> { +impl_stable_hash_for!(impl<'mir, 'tcx: 'mir> for struct Frame<'mir, 'tcx> { mir, instance, span, @@ -356,6 +357,22 @@ impl<'a, 'mir, 'tcx, Ctx> Snapshot<'a, Ctx> for &'a Frame<'mir, 'tcx> } } +impl<'a, 'tcx, Ctx> Snapshot<'a, Ctx> for &'a LocalState<'tcx> + where Ctx: SnapshotContext<'a>, +{ + type Item = LocalValue<(), AllocIdSnapshot<'a>>; + + fn snapshot(&self, ctx: &'a Ctx) -> Self::Item { + let LocalState { value, layout: _ } = self; + value.snapshot(ctx) + } +} + +impl_stable_hash_for!(struct LocalState<'tcx> { + value, + layout -> _, +}); + impl<'a, 'b, 'mir, 'tcx: 'a+'mir> SnapshotContext<'b> for Memory<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>> { @@ -417,7 +434,7 @@ impl<'a, 'mir, 'tcx> Eq for EvalSnapshot<'a, 'mir, 'tcx> impl<'a, 'mir, 'tcx> PartialEq for EvalSnapshot<'a, 'mir, 'tcx> { fn eq(&self, other: &Self) -> bool { - // FIXME: This looks to be a *ridicolously expensive* comparison operation. + // FIXME: This looks to be a *ridiculously expensive* comparison operation. // Doesn't this make tons of copies? Either `snapshot` is very badly named, // or it does! self.snapshot() == other.snapshot() diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index a6835e4f16738..29a8547035e4a 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -1,14 +1,4 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This module contains the `EvalContext` methods for executing a single step of the interpreter. +//! This module contains the `InterpretCx` methods for executing a single step of the interpreter. //! //! The main entry point is the `step` method. @@ -16,7 +6,7 @@ use rustc::mir; use rustc::ty::layout::LayoutOf; use rustc::mir::interpret::{EvalResult, Scalar, PointerArithmetic}; -use super::{EvalContext, Machine}; +use super::{InterpretCx, Machine}; /// Classify whether an operator is "left-homogeneous", i.e., the LHS has the /// same type as the result. @@ -45,13 +35,13 @@ fn binop_right_homogeneous(op: mir::BinOp) -> bool { } } -impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> { pub fn run(&mut self) -> EvalResult<'tcx> { while self.step()? {} Ok(()) } - /// Returns true as long as there are more things to do. + /// Returns `true` as long as there are more things to do. /// /// This is used by [priroda](https://github.com/oli-obk/priroda) pub fn step(&mut self) -> EvalResult<'tcx, bool> { @@ -81,7 +71,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> EvalResult<'tcx> { - debug!("{:?}", stmt); + info!("{:?}", stmt); use rustc::mir::StatementKind::*; @@ -119,13 +109,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> FakeRead(..) => {} // Stacked Borrows. - Retag { fn_entry, two_phase, ref place } => { + Retag(kind, ref place) => { let dest = self.eval_place(place)?; - M::retag(self, fn_entry, two_phase, dest)?; - } - EscapeToRaw(ref op) => { - let op = self.eval_operand(op, None)?; - M::escape_to_raw(self, op)?; + M::retag(self, kind, dest)?; } // Statements we do not track. @@ -190,7 +176,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> UnaryOp(un_op, ref operand) => { // The operand always has the same type as the result. let val = self.read_immediate(self.eval_operand(operand, Some(dest.layout))?)?; - let val = self.unary_op(un_op, val.to_scalar()?, dest.layout)?; + let val = self.unary_op(un_op, val)?; self.write_scalar(val, dest)?; } @@ -262,7 +248,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } NullaryOp(mir::NullOp::SizeOf, ty) => { - let ty = self.monomorphize(ty, self.substs()); + let ty = self.monomorphize(ty)?; let layout = self.layout_of(ty)?; assert!(!layout.is_unsized(), "SizeOf nullary MIR operator called for unsized type"); @@ -274,14 +260,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } Cast(kind, ref operand, cast_ty) => { - debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest.layout.ty); + debug_assert_eq!(self.monomorphize(cast_ty)?, dest.layout.ty); let src = self.eval_operand(operand, None)?; self.cast(src, kind, dest)?; } Discriminant(ref place) => { - let place = self.eval_place(place)?; - let discr_val = self.read_discriminant(self.place_to_op(place)?)?.0; + let op = self.eval_place_to_op(place, None)?; + let discr_val = self.read_discriminant(op)?.0; let size = dest.layout.size; self.write_scalar(Scalar::from_uint(discr_val, size), dest)?; } @@ -293,7 +279,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> EvalResult<'tcx> { - debug!("{:?}", terminator.kind); + info!("{:?}", terminator.kind); self.tcx.span = terminator.source_info.span; self.memory.tcx.span = terminator.source_info.span; @@ -303,7 +289,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> if !self.stack.is_empty() { // This should change *something* debug_assert!(self.cur_frame() != old_stack || self.frame().block != old_bb); - debug!("// {:?}", self.frame().block); + info!("// {:?}", self.frame().block); } Ok(()) } diff --git a/src/librustc_mir/interpret/terminator.rs b/src/librustc_mir/interpret/terminator.rs index 4a672f195d240..ba48a28fc8315 100644 --- a/src/librustc_mir/interpret/terminator.rs +++ b/src/librustc_mir/interpret/terminator.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::borrow::Cow; use rustc::{mir, ty}; @@ -15,12 +5,12 @@ use rustc::ty::layout::{self, TyLayout, LayoutOf}; use syntax::source_map::Span; use rustc_target::spec::abi::Abi; -use rustc::mir::interpret::{EvalResult, PointerArithmetic, EvalErrorKind, Scalar}; +use rustc::mir::interpret::{EvalResult, PointerArithmetic, InterpError, Scalar}; use super::{ - EvalContext, Machine, Immediate, OpTy, PlaceTy, MPlaceTy, Operand, StackPopCleanup + InterpretCx, Machine, Immediate, OpTy, ImmTy, PlaceTy, MPlaceTy, StackPopCleanup }; -impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> { #[inline] pub fn goto_block(&mut self, target: Option) -> EvalResult<'tcx> { if let Some(target) = target { @@ -61,8 +51,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> // Compare using binary_op, to also support pointer values let const_int = Scalar::from_uint(const_int, discr.layout.size); let (res, _) = self.binary_op(mir::BinOp::Eq, - discr.to_scalar()?, discr.layout, - const_int, discr.layout, + discr, + ImmTy::from_scalar(const_int, discr.layout), )?; if res.to_bool()? { target_block = targets[index]; @@ -122,7 +112,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> let ty = place.layout.ty; trace!("TerminatorKind::drop: {:?}, type {}", location, ty); - let instance = ::monomorphize::resolve_drop_in_place(*self.tcx, ty); + let instance = crate::monomorphize::resolve_drop_in_place(*self.tcx, ty); self.drop_in_place( place, instance, @@ -144,7 +134,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> self.goto_block(Some(target))?; } else { // Compute error message - use rustc::mir::interpret::EvalErrorKind::*; + use rustc::mir::interpret::InterpError::*; return match *msg { BoundsCheck { ref len, ref index } => { let len = self.read_immediate(self.eval_operand(len, None)?) @@ -222,7 +212,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> return Ok(()); } let caller_arg = caller_arg.next() - .ok_or_else(|| EvalErrorKind::FunctionArgCountMismatch)?; + .ok_or_else(|| InterpError::FunctionArgCountMismatch)?; if rust_abi { debug_assert!(!caller_arg.layout.is_zst(), "ZSTs must have been already filtered out"); } @@ -319,24 +309,25 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> mir.spread_arg, mir.args_iter() .map(|local| - (local, self.layout_of_local(self.frame(), local).unwrap().ty) + (local, self.layout_of_local(self.frame(), local, None).unwrap().ty) ) .collect::>() ); // Figure out how to pass which arguments. - // We have two iterators: Where the arguments come from, - // and where they go to. + // The Rust ABI is special: ZST get skipped. let rust_abi = match caller_abi { Abi::Rust | Abi::RustCall => true, _ => false }; + // We have two iterators: Where the arguments come from, + // and where they go to. // For where they come from: If the ABI is RustCall, we untuple the // last incoming argument. These two iterators do not have the same type, // so to keep the code paths uniform we accept an allocation // (for RustCall ABI only). - let caller_args : Cow<[OpTy<'tcx, M::PointerTag>]> = + let caller_args : Cow<'_, [OpTy<'tcx, M::PointerTag>]> = if caller_abi == Abi::RustCall && !args.is_empty() { // Untuple let (&untuple_arg, args) = args.split_last().unwrap(); @@ -345,7 +336,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> .chain((0..untuple_arg.layout.fields.count()).into_iter() .map(|i| self.operand_field(untuple_arg, i as u64)) ) - .collect::>>>()?) + .collect::>>>()?) } else { // Plain arg passing Cow::from(args) @@ -362,7 +353,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> // not advance `caller_iter` for ZSTs. let mut locals_iter = mir.args_iter(); while let Some(local) = locals_iter.next() { - let dest = self.eval_place(&mir::Place::Local(local))?; + let dest = self.eval_place( + &mir::Place::Base(mir::PlaceBase::Local(local)) + )?; if Some(local) == mir.spread_arg { // Must be a tuple for i in 0..dest.layout.fields.count() { @@ -376,12 +369,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } // Now we should have no more caller args if caller_iter.next().is_some() { - trace!("Caller has too many args over"); + trace!("Caller has passed too many args"); return err!(FunctionArgCountMismatch); } // Don't forget to check the return type! if let Some(caller_ret) = dest { - let callee_ret = self.eval_place(&mir::Place::Local(mir::RETURN_PLACE))?; + let callee_ret = self.eval_place( + &mir::Place::RETURN_PLACE + )?; if !Self::check_argument_compat( rust_abi, caller_ret.layout, @@ -393,7 +388,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } } else { let callee_layout = - self.layout_of_local(self.frame(), mir::RETURN_PLACE)?; + self.layout_of_local(self.frame(), mir::RETURN_PLACE, None)?; if !callee_layout.abi.is_uninhabited() { return err!(FunctionRetMismatch( self.tcx.types.never, callee_layout.ty @@ -412,9 +407,24 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> } // cannot use the shim here, because that will only result in infinite recursion ty::InstanceDef::Virtual(_, idx) => { + let mut args = args.to_vec(); let ptr_size = self.pointer_size(); - let ptr = self.deref_operand(args[0])?; - let vtable = ptr.vtable()?; + // We have to implement all "object safe receivers". Currently we + // support built-in pointers (&, &mut, Box) as well as unsized-self. We do + // not yet support custom self types. + // Also see librustc_codegen_llvm/abi.rs and librustc_codegen_llvm/mir/block.rs. + let receiver_place = match args[0].layout.ty.builtin_deref(true) { + Some(_) => { + // Built-in pointer. + self.deref_operand(args[0])? + } + None => { + // Unsized self. + args[0].to_mem_place() + } + }; + // Find and consult vtable + let vtable = receiver_place.vtable()?; self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?; let fn_ptr = self.memory.get(vtable.alloc_id)?.read_ptr_sized( self, @@ -422,14 +432,17 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> )?.to_ptr()?; let instance = self.memory.get_fn(fn_ptr)?; - // We have to patch the self argument, in particular get the layout - // expected by the actual function. Cannot just use "field 0" due to - // Box. - let mut args = args.to_vec(); - let pointee = args[0].layout.ty.builtin_deref(true).unwrap().ty; - let fake_fat_ptr_ty = self.tcx.mk_mut_ptr(pointee); - args[0].layout = self.layout_of(fake_fat_ptr_ty)?.field(self, 0)?; - args[0].op = Operand::Immediate(Immediate::Scalar(ptr.ptr.into())); // strip vtable + // `*mut receiver_place.layout.ty` is almost the layout that we + // want for args[0]: We have to project to field 0 because we want + // a thin pointer. + assert!(receiver_place.layout.is_unsized()); + let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty); + let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0)?; + // Adjust receiver argument. + args[0] = OpTy::from(ImmTy { + layout: this_receiver_ptr, + imm: Immediate::Scalar(receiver_place.ptr.into()) + }); trace!("Patched self operand to {:#?}", args[0]); // recurse with concrete function self.eval_fn_call(instance, span, caller_abi, &args, dest, ret) @@ -458,8 +471,8 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> _ => (instance, place), }; - let arg = OpTy { - op: Operand::Immediate(place.to_ref()), + let arg = ImmTy { + imm: place.to_ref(), layout: self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?, }; @@ -470,7 +483,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> instance, span, Abi::Rust, - &[arg], + &[arg.into()], Some(dest.into()), Some(target), ) diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index bda585b8eda34..a451f2afb4663 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -1,20 +1,10 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::ty::{self, Ty}; use rustc::ty::layout::{Size, Align, LayoutOf}; use rustc::mir::interpret::{Scalar, Pointer, EvalResult, PointerArithmetic}; -use super::{EvalContext, Machine, MemoryKind}; +use super::{InterpretCx, InterpError, Machine, MemoryKind}; -impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> { /// Creates a dynamic vtable for the given type and vtable origin. This is used only for /// objects. /// @@ -24,20 +14,28 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> pub fn get_vtable( &mut self, ty: Ty<'tcx>, - poly_trait_ref: ty::PolyExistentialTraitRef<'tcx>, + poly_trait_ref: Option>, ) -> EvalResult<'tcx, Pointer> { trace!("get_vtable(trait_ref={:?})", poly_trait_ref); let (ty, poly_trait_ref) = self.tcx.erase_regions(&(ty, poly_trait_ref)); if let Some(&vtable) = self.vtables.get(&(ty, poly_trait_ref)) { + // This means we guarantee that there are no duplicate vtables, we will + // always use the same vtable for the same (Type, Trait) combination. + // That's not what happens in rustc, but emulating per-crate deduplication + // does not sound like it actually makes anything any better. return Ok(Pointer::from(vtable).with_default_tag()); } - let trait_ref = poly_trait_ref.with_self_ty(*self.tcx, ty); - let trait_ref = self.tcx.erase_regions(&trait_ref); + let methods = if let Some(poly_trait_ref) = poly_trait_ref { + let trait_ref = poly_trait_ref.with_self_ty(*self.tcx, ty); + let trait_ref = self.tcx.erase_regions(&trait_ref); - let methods = self.tcx.vtable_methods(trait_ref); + self.tcx.vtable_methods(trait_ref) + } else { + &[] + }; let layout = self.layout_of(ty)?; assert!(!layout.is_unsized(), "can't create a vtable for an unsized type"); @@ -54,10 +52,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> ptr_size * (3 + methods.len() as u64), ptr_align, MemoryKind::Vtable, - )?.with_default_tag(); + ).with_default_tag(); let tcx = &*self.tcx; - let drop = ::monomorphize::resolve_drop_in_place(*tcx, ty); + let drop = crate::monomorphize::resolve_drop_in_place(*tcx, ty); let drop = self.memory.create_fn_alloc(drop).with_default_tag(); // no need to do any alignment checks on the memory accesses below, because we know the // allocation is correctly aligned as we created it above. Also we're only offsetting by @@ -77,7 +75,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> for (i, method) in methods.iter().enumerate() { if let Some((def_id, substs)) = *method { - let instance = self.resolve(def_id, substs)?; + // resolve for vtable: insert shims where needed + let substs = self.subst_and_normalize_erasing_regions(substs)?; + let instance = ty::Instance::resolve_for_vtable( + *self.tcx, + self.param_env, + def_id, + substs, + ).ok_or_else(|| InterpError::TooGeneric)?; let fn_ptr = self.memory.create_fn_alloc(instance).with_default_tag(); let method_ptr = vtable.offset(ptr_size * (3 + i as u64), self)?; self.memory @@ -92,7 +97,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> Ok(vtable) } - /// Return the drop fn instance as well as the actual dynamic type + /// Returns the drop fn instance as well as the actual dynamic type pub fn read_drop_type_from_vtable( &self, vtable: Pointer, diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs index c7bfea82a2d31..d4cf906619d89 100644 --- a/src/librustc_mir/interpret/validity.rs +++ b/src/librustc_mir/interpret/validity.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::fmt::Write; use std::hash::Hash; use std::ops::RangeInclusive; @@ -17,11 +7,11 @@ use rustc::ty::layout::{self, Size, Align, TyLayout, LayoutOf, VariantIdx}; use rustc::ty; use rustc_data_structures::fx::FxHashSet; use rustc::mir::interpret::{ - Scalar, AllocKind, EvalResult, EvalErrorKind, + Scalar, AllocKind, EvalResult, InterpError, }; use super::{ - OpTy, Machine, EvalContext, ValueVisitor, + OpTy, Machine, InterpretCx, ValueVisitor, MPlaceTy, }; macro_rules! validation_failure { @@ -84,13 +74,13 @@ pub enum PathElem { } /// State for tracking recursive validation of references -pub struct RefTracking<'tcx, Tag> { - pub seen: FxHashSet<(OpTy<'tcx, Tag>)>, - pub todo: Vec<(OpTy<'tcx, Tag>, Vec)>, +pub struct RefTracking { + pub seen: FxHashSet, + pub todo: Vec<(T, Vec)>, } -impl<'tcx, Tag: Copy+Eq+Hash> RefTracking<'tcx, Tag> { - pub fn new(op: OpTy<'tcx, Tag>) -> Self { +impl<'tcx, T: Copy + Eq + Hash> RefTracking { + pub fn new(op: T) -> Self { let mut ref_tracking = RefTracking { seen: FxHashSet::default(), todo: vec![(op, Vec::new())], @@ -161,9 +151,9 @@ struct ValidityVisitor<'rt, 'a: 'rt, 'mir: 'rt, 'tcx: 'a+'rt+'mir, M: Machine<'a /// starts must not be changed! `visit_fields` and `visit_array` rely on /// this stack discipline. path: Vec, - ref_tracking: Option<&'rt mut RefTracking<'tcx, M::PointerTag>>, + ref_tracking: Option<&'rt mut RefTracking>>, const_mode: bool, - ecx: &'rt EvalContext<'a, 'mir, 'tcx, M>, + ecx: &'rt InterpretCx<'a, 'mir, 'tcx, M>, } impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> ValidityVisitor<'rt, 'a, 'mir, 'tcx, M> { @@ -234,7 +224,7 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> type V = OpTy<'tcx, M::PointerTag>; #[inline(always)] - fn ecx(&self) -> &EvalContext<'a, 'mir, 'tcx, M> { + fn ecx(&self) -> &InterpretCx<'a, 'mir, 'tcx, M> { &self.ecx } @@ -256,7 +246,7 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> variant_id: VariantIdx, new_op: OpTy<'tcx, M::PointerTag> ) -> EvalResult<'tcx> { - let name = old_op.layout.ty.ty_adt_def().unwrap().variants[variant_id].name; + let name = old_op.layout.ty.ty_adt_def().unwrap().variants[variant_id].ident.name; self.visit_elem(new_op, PathElem::Variant(name)) } @@ -268,11 +258,11 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> match self.walk_value(op) { Ok(()) => Ok(()), Err(err) => match err.kind { - EvalErrorKind::InvalidDiscriminant(val) => + InterpError::InvalidDiscriminant(val) => validation_failure!( val, self.path, "a valid enum discriminant" ), - EvalErrorKind::ReadPointerAsBytes => + InterpError::ReadPointerAsBytes => validation_failure!( "a pointer", self.path, "plain (non-pointer) bytes" ), @@ -365,10 +355,12 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Err(err) => { error!("{:?} is not aligned to {:?}", ptr, align); match err.kind { - EvalErrorKind::InvalidNullPointerUsage => + InterpError::InvalidNullPointerUsage => return validation_failure!("NULL reference", self.path), - EvalErrorKind::AlignmentCheckFailed { .. } => - return validation_failure!("unaligned reference", self.path), + InterpError::AlignmentCheckFailed { required, has } => + return validation_failure!(format!("unaligned reference \ + (required {} byte alignment but found {})", + required.bytes(), has.bytes()), self.path), _ => return validation_failure!( "dangling (out-of-bounds) reference (might be NULL at \ @@ -409,16 +401,15 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> // before. Proceed recursively even for integer pointers, no // reason to skip them! They are (recursively) valid for some ZST, // but not for others (e.g., `!` is a ZST). - let op = place.into(); - if ref_tracking.seen.insert(op) { - trace!("Recursing below ptr {:#?}", *op); + if ref_tracking.seen.insert(place) { + trace!("Recursing below ptr {:#?}", *place); // We need to clone the path anyway, make sure it gets created // with enough space for the additional `Deref`. let mut new_path = Vec::with_capacity(self.path.len()+1); new_path.clone_from(&self.path); new_path.push(PathElem::Deref); // Remember to come back to this later. - ref_tracking.todo.push((op, new_path)); + ref_tracking.todo.push((place, new_path)); } } } @@ -459,8 +450,13 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> } // At least one value is excluded. Get the bits. let value = try_validation!(value.not_undef(), - value, self.path, - format!("something in the range {:?}", layout.valid_range)); + value, + self.path, + format!( + "something {}", + wrapping_range_format(&layout.valid_range, max_hi), + ) + ); let bits = match value { Scalar::Ptr(ptr) => { if lo == 1 && hi == max_hi { @@ -566,7 +562,7 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Err(err) => { // For some errors we might be able to provide extra information match err.kind { - EvalErrorKind::ReadUndefBytes(offset) => { + InterpError::ReadUndefBytes(offset) => { // Some byte was undefined, determine which // element that byte belongs to so we can // provide an index. @@ -591,8 +587,8 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> } } -impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> { - /// This function checks the data at `op`. `op` is assumed to cover valid memory if it +impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> { + /// This function checks the data at `op`. `op` is assumed to cover valid memory if it /// is an indirect operand. /// It will error if the bits at the destination do not match the ones described by the layout. /// @@ -603,7 +599,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> &self, op: OpTy<'tcx, M::PointerTag>, path: Vec, - ref_tracking: Option<&mut RefTracking<'tcx, M::PointerTag>>, + ref_tracking: Option<&mut RefTracking>>, const_mode: bool, ) -> EvalResult<'tcx> { trace!("validate_operand: {:?}, {:?}", *op, op.layout.ty); diff --git a/src/librustc_mir/interpret/visitor.rs b/src/librustc_mir/interpret/visitor.rs index 4773f5627d716..05343ac66d966 100644 --- a/src/librustc_mir/interpret/visitor.rs +++ b/src/librustc_mir/interpret/visitor.rs @@ -8,7 +8,7 @@ use rustc::mir::interpret::{ }; use super::{ - Machine, EvalContext, MPlaceTy, OpTy, + Machine, InterpretCx, MPlaceTy, OpTy, }; // A thing that we can project into, and that has a layout. @@ -16,29 +16,29 @@ use super::{ // that's just more convenient to work with (avoids repeating all the `Machine` bounds). pub trait Value<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>: Copy { - /// Get this value's layout. + /// Gets this value's layout. fn layout(&self) -> TyLayout<'tcx>; - /// Make this into an `OpTy`. + /// Makes this into an `OpTy`. fn to_op( self, - ecx: &EvalContext<'a, 'mir, 'tcx, M>, + ecx: &InterpretCx<'a, 'mir, 'tcx, M>, ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>>; - /// Create this from an `MPlaceTy`. - fn from_mem_place(MPlaceTy<'tcx, M::PointerTag>) -> Self; + /// Creates this from an `MPlaceTy`. + fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self; - /// Project to the given enum variant. + /// Projects to the given enum variant. fn project_downcast( self, - ecx: &EvalContext<'a, 'mir, 'tcx, M>, + ecx: &InterpretCx<'a, 'mir, 'tcx, M>, variant: VariantIdx, ) -> EvalResult<'tcx, Self>; - /// Project to the n-th field. + /// Projects to the n-th field. fn project_field( self, - ecx: &EvalContext<'a, 'mir, 'tcx, M>, + ecx: &InterpretCx<'a, 'mir, 'tcx, M>, field: u64, ) -> EvalResult<'tcx, Self>; } @@ -56,7 +56,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Value<'a, 'mir, 'tcx, M> #[inline(always)] fn to_op( self, - _ecx: &EvalContext<'a, 'mir, 'tcx, M>, + _ecx: &InterpretCx<'a, 'mir, 'tcx, M>, ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { Ok(self) } @@ -69,7 +69,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Value<'a, 'mir, 'tcx, M> #[inline(always)] fn project_downcast( self, - ecx: &EvalContext<'a, 'mir, 'tcx, M>, + ecx: &InterpretCx<'a, 'mir, 'tcx, M>, variant: VariantIdx, ) -> EvalResult<'tcx, Self> { ecx.operand_downcast(self, variant) @@ -78,7 +78,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Value<'a, 'mir, 'tcx, M> #[inline(always)] fn project_field( self, - ecx: &EvalContext<'a, 'mir, 'tcx, M>, + ecx: &InterpretCx<'a, 'mir, 'tcx, M>, field: u64, ) -> EvalResult<'tcx, Self> { ecx.operand_field(self, field) @@ -95,7 +95,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Value<'a, 'mir, 'tcx, M> #[inline(always)] fn to_op( self, - _ecx: &EvalContext<'a, 'mir, 'tcx, M>, + _ecx: &InterpretCx<'a, 'mir, 'tcx, M>, ) -> EvalResult<'tcx, OpTy<'tcx, M::PointerTag>> { Ok(self.into()) } @@ -108,7 +108,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Value<'a, 'mir, 'tcx, M> #[inline(always)] fn project_downcast( self, - ecx: &EvalContext<'a, 'mir, 'tcx, M>, + ecx: &InterpretCx<'a, 'mir, 'tcx, M>, variant: VariantIdx, ) -> EvalResult<'tcx, Self> { ecx.mplace_downcast(self, variant) @@ -117,7 +117,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Value<'a, 'mir, 'tcx, M> #[inline(always)] fn project_field( self, - ecx: &EvalContext<'a, 'mir, 'tcx, M>, + ecx: &InterpretCx<'a, 'mir, 'tcx, M>, field: u64, ) -> EvalResult<'tcx, Self> { ecx.mplace_field(self, field) @@ -125,29 +125,29 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Value<'a, 'mir, 'tcx, M> } macro_rules! make_value_visitor { - ($visitor_trait_name:ident, $($mutability:ident)*) => { + ($visitor_trait_name:ident, $($mutability:ident)?) => { // How to traverse a value and what to do when we are at the leaves. pub trait $visitor_trait_name<'a, 'mir, 'tcx: 'mir+'a, M: Machine<'a, 'mir, 'tcx>>: Sized { type V: Value<'a, 'mir, 'tcx, M>; - /// The visitor must have an `EvalContext` in it. - fn ecx(&$($mutability)* self) - -> &$($mutability)* EvalContext<'a, 'mir, 'tcx, M>; + /// The visitor must have an `InterpretCx` in it. + fn ecx(&$($mutability)? self) + -> &$($mutability)? InterpretCx<'a, 'mir, 'tcx, M>; // Recursive actions, ready to be overloaded. - /// Visit the given value, dispatching as appropriate to more specialized visitors. + /// Visits the given value, dispatching as appropriate to more specialized visitors. #[inline(always)] fn visit_value(&mut self, v: Self::V) -> EvalResult<'tcx> { self.walk_value(v) } - /// Visit the given value as a union. No automatic recursion can happen here. + /// Visits the given value as a union. No automatic recursion can happen here. #[inline(always)] fn visit_union(&mut self, _v: Self::V) -> EvalResult<'tcx> { Ok(()) } - /// Visit this vale as an aggregate, you are even getting an iterator yielding + /// Visits this vale as an aggregate, you are even getting an iterator yielding /// all the fields (still in an `EvalResult`, you have to do error handling yourself). /// Recurses into the fields. #[inline(always)] @@ -173,7 +173,7 @@ macro_rules! make_value_visitor { self.visit_value(new_val) } - /// Called for recursing into the field of a generator. These are not known to be + /// Called for recursing into the field of a generator. These are not known to be /// initialized, so we treat them like unions. #[inline(always)] fn visit_generator_field( @@ -215,8 +215,8 @@ macro_rules! make_value_visitor { fn visit_scalar(&mut self, _v: Self::V, _layout: &layout::Scalar) -> EvalResult<'tcx> { Ok(()) } - /// Called whenever we reach a value of primitive type. There can be no recursion - /// below such a value. This is the leaf function. + /// Called whenever we reach a value of primitive type. There can be no recursion + /// below such a value. This is the leaf function. /// We do *not* provide an `ImmTy` here because some implementations might want /// to write to the place this primitive lives in. #[inline(always)] @@ -241,8 +241,7 @@ macro_rules! make_value_visitor { // If this is a multi-variant layout, we have find the right one and proceed with // that. match v.layout().variants { - layout::Variants::NicheFilling { .. } | - layout::Variants::Tagged { .. } => { + layout::Variants::Multiple { .. } => { let op = v.to_op(self.ecx())?; let idx = self.ecx().read_discriminant(op)?.1; let inner = v.project_downcast(self.ecx(), idx)?; diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index 983488da0031d..deeed9a0b9846 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /*! Rust MIR: a lowered representation of Rust. Also: an experiment! @@ -17,7 +7,6 @@ Rust MIR: a lowered representation of Rust. Also: an experiment! #![feature(nll)] #![feature(in_band_lifetimes)] #![feature(slice_patterns)] -#![feature(slice_sort_by_cached_key)] #![feature(box_patterns)] #![feature(box_syntax)] #![feature(crate_visibility_modifier)] @@ -25,7 +14,6 @@ Rust MIR: a lowered representation of Rust. Also: an experiment! #![feature(const_fn)] #![feature(decl_macro)] #![feature(exhaustive_patterns)] -#![feature(range_contains)] #![feature(rustc_diagnostic_macros)] #![feature(rustc_attrs)] #![feature(never_type)] @@ -34,43 +22,23 @@ Rust MIR: a lowered representation of Rust. Also: an experiment! #![feature(unicode_internals)] #![feature(step_trait)] #![feature(slice_concat_ext)] -#![feature(if_while_or_patterns)] -#![feature(try_from)] #![feature(reverse_bits)] -#![cfg_attr(stage0, feature(underscore_imports))] +#![feature(try_blocks)] #![recursion_limit="256"] -extern crate arena; +#![deny(rust_2018_idioms)] +#![cfg_attr(not(stage0), deny(internal))] +#![allow(explicit_outlives_requirements)] -#[macro_use] -extern crate bitflags; #[macro_use] extern crate log; -extern crate either; -extern crate graphviz as dot; -extern crate polonius_engine; #[macro_use] extern crate rustc; #[macro_use] extern crate rustc_data_structures; -extern crate serialize as rustc_serialize; -extern crate rustc_errors; +#[allow(unused_extern_crates)] +extern crate serialize as rustc_serialize; // used by deriving #[macro_use] extern crate syntax; -extern crate syntax_pos; -extern crate rustc_target; -extern crate log_settings; -extern crate rustc_apfloat; -extern crate byteorder; -extern crate core; -extern crate smallvec; - -// Once we can use edition 2018 in the compiler, -// replace this with real try blocks. -macro_rules! try_block { - ($($inside:tt)*) => ( - (||{ ::std::ops::Try::from_ok({ $($inside)* }) })() - ) -} mod diagnostics; @@ -86,10 +54,9 @@ pub mod interpret; pub mod monomorphize; pub mod const_eval; -pub use hair::pattern::check_crate as matchck_crate; use rustc::ty::query::Providers; -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { borrow_check::provide(providers); shim::provide(providers); transform::provide(providers); diff --git a/src/librustc_mir/lints.rs b/src/librustc_mir/lints.rs index 775431e5cbd6b..572f7133cad84 100644 --- a/src/librustc_mir/lints.rs +++ b/src/librustc_mir/lints.rs @@ -1,34 +1,24 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc_data_structures::bit_set::BitSet; use rustc::hir::def_id::DefId; use rustc::hir::intravisit::FnKind; use rustc::hir::map::blocks::FnLikeNode; use rustc::lint::builtin::UNCONDITIONAL_RECURSION; use rustc::mir::{self, Mir, TerminatorKind}; -use rustc::ty::{AssociatedItem, AssociatedItemContainer, Instance, TyCtxt, TyKind}; -use rustc::ty::subst::Substs; +use rustc::ty::{self, AssociatedItem, AssociatedItemContainer, Instance, TyCtxt}; +use rustc::ty::subst::InternalSubsts; pub fn check(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &Mir<'tcx>, def_id: DefId) { - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); - if let Some(fn_like_node) = FnLikeNode::from_node(tcx.hir().get(node_id)) { + if let Some(fn_like_node) = FnLikeNode::from_node(tcx.hir().get_by_hir_id(hir_id)) { check_fn_for_unconditional_recursion(tcx, fn_like_node.kind(), mir, def_id); } } fn check_fn_for_unconditional_recursion(tcx: TyCtxt<'a, 'tcx, 'tcx>, - fn_kind: FnKind, + fn_kind: FnKind<'_>, mir: &Mir<'tcx>, def_id: DefId) { if let FnKind::Closure(_) = fn_kind { @@ -79,7 +69,7 @@ fn check_fn_for_unconditional_recursion(tcx: TyCtxt<'a, 'tcx, 'tcx>, }) => tcx.generics_of(trait_def_id).count(), _ => 0 }; - let caller_substs = &Substs::identity_for_item(tcx, def_id)[..trait_substs_count]; + let caller_substs = &InternalSubsts::identity_for_item(tcx, def_id)[..trait_substs_count]; while let Some(bb) = reachable_without_self_call_queue.pop() { if visited.contains(bb) { @@ -96,7 +86,7 @@ fn check_fn_for_unconditional_recursion(tcx: TyCtxt<'a, 'tcx, 'tcx>, TerminatorKind::Call { ref func, .. } => { let func_ty = func.ty(mir, tcx); - if let TyKind::FnDef(fn_def_id, substs) = func_ty.sty { + if let ty::FnDef(fn_def_id, substs) = func_ty.sty { let (call_fn_id, call_substs) = if let Some(instance) = Instance::resolve(tcx, param_env, @@ -139,12 +129,12 @@ fn check_fn_for_unconditional_recursion(tcx: TyCtxt<'a, 'tcx, 'tcx>, // no break */ }`) shouldn't be linted unless it actually // recurs. if !reached_exit_without_self_call && !self_call_locations.is_empty() { - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); - let sp = tcx.sess.source_map().def_span(tcx.hir().span(node_id)); - let mut db = tcx.struct_span_lint_node(UNCONDITIONAL_RECURSION, - node_id, - sp, - "function cannot return without recursing"); + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); + let sp = tcx.sess.source_map().def_span(tcx.hir().span_by_hir_id(hir_id)); + let mut db = tcx.struct_span_lint_hir(UNCONDITIONAL_RECURSION, + hir_id, + sp, + "function cannot return without recursing"); db.span_label(sp, "cannot return without recursing"); // offer some help to the programmer. for location in &self_call_locations { diff --git a/src/librustc_mir/monomorphize/collector.rs b/src/librustc_mir/monomorphize/collector.rs index a6239a8115a36..af875c2f9e8a1 100644 --- a/src/librustc_mir/monomorphize/collector.rs +++ b/src/librustc_mir/monomorphize/collector.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Mono Item Collection //! =========================== //! @@ -139,7 +129,7 @@ //! //! #### Boxes //! Since `Box` expression have special compiler support, no explicit calls to -//! `exchange_malloc()` and `exchange_free()` may show up in MIR, even if the +//! `exchange_malloc()` and `box_free()` may show up in MIR, even if the //! compiler will generate them. We have to observe `Rvalue::Box` expressions //! and Box-typed drop-statements for that purpose. //! @@ -187,27 +177,29 @@ use rustc::hir::{self, CodegenFnAttrFlags}; use rustc::hir::itemlikevisit::ItemLikeVisitor; -use rustc::hir::def_id::DefId; +use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::mir::interpret::{AllocId, ConstValue}; use rustc::middle::lang_items::{ExchangeMallocFnLangItem, StartFnLangItem}; -use rustc::ty::subst::Substs; +use rustc::ty::subst::{InternalSubsts, SubstsRef}; use rustc::ty::{self, TypeFoldable, Ty, TyCtxt, GenericParamDefKind}; use rustc::ty::adjustment::CustomCoerceUnsized; -use rustc::session::config; -use rustc::mir::{self, Location, Promoted}; +use rustc::session::config::EntryFnType; +use rustc::mir::{self, Location, Place, PlaceBase, Promoted, Static, StaticKind}; use rustc::mir::visit::Visitor as MirVisitor; use rustc::mir::mono::MonoItem; use rustc::mir::interpret::{Scalar, GlobalId, AllocKind, ErrorHandled}; -use monomorphize::{self, Instance}; +use crate::monomorphize::{self, Instance}; use rustc::util::nodemap::{FxHashSet, FxHashMap, DefIdMap}; use rustc::util::common::time; -use monomorphize::item::{MonoItemExt, DefPathBasedNames, InstantiationMode}; +use crate::monomorphize::item::{MonoItemExt, DefPathBasedNames, InstantiationMode}; use rustc_data_structures::bit_set::GrowableBitSet; use rustc_data_structures::sync::{MTRef, MTLock, ParallelIterator, par_iter}; +use std::iter; + #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] pub enum MonoItemCollectionMode { Eager, @@ -331,9 +323,7 @@ fn collect_roots<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let mut roots = Vec::new(); { - let entry_fn = tcx.sess.entry_fn.borrow().map(|(node_id, _, _)| { - tcx.hir().local_def_id(node_id) - }); + let entry_fn = tcx.entry_fn(LOCAL_CRATE); debug!("collect_roots: entry_fn = {:?}", entry_fn); @@ -367,7 +357,7 @@ fn collect_items_rec<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // We've been here already, no need to search again. return; } - debug!("BEGIN collect_items_rec({})", starting_point.to_string(tcx)); + debug!("BEGIN collect_items_rec({})", starting_point.to_string(tcx, true)); let mut neighbors = Vec::new(); let recursion_depth_reset; @@ -391,7 +381,7 @@ fn collect_items_rec<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let param_env = ty::ParamEnv::reveal_all(); if let Ok(val) = tcx.const_eval(param_env.and(cid)) { - collect_const(tcx, val, instance.substs, &mut neighbors); + collect_const(tcx, val, InternalSubsts::empty(), &mut neighbors); } } MonoItem::Fn(instance) => { @@ -421,7 +411,7 @@ fn collect_items_rec<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, recursion_depths.insert(def_id, depth); } - debug!("END collect_items_rec({})", starting_point.to_string(tcx)); + debug!("END collect_items_rec({})", starting_point.to_string(tcx, true)); } fn record_accesses<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -462,8 +452,8 @@ fn check_recursion_limit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if recursion_depth > *tcx.sess.recursion_limit.get() { let error = format!("reached the recursion limit while instantiating `{}`", instance); - if let Some(node_id) = tcx.hir().as_local_node_id(def_id) { - tcx.sess.span_fatal(tcx.hir().span(node_id), &error); + if let Some(hir_id) = tcx.hir().as_local_hir_id(def_id) { + tcx.sess.span_fatal(tcx.hir().span_by_hir_id(hir_id), &error); } else { tcx.sess.fatal(&error); } @@ -478,7 +468,8 @@ fn check_type_length_limit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: Instance<'tcx>) { let type_length = instance.substs.types().flat_map(|ty| ty.walk()).count(); - debug!(" => type length={}", type_length); + let const_length = instance.substs.consts().flat_map(|ct| ct.ty.walk()).count(); + debug!(" => type length={}, const length={}", type_length, const_length); // Rust code can easily create exponentially-long types using only a // polynomial recursion depth. Even with the default recursion @@ -487,22 +478,36 @@ fn check_type_length_limit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // // Bail out in these cases to avoid that bad user experience. let type_length_limit = *tcx.sess.type_length_limit.get(); - if type_length > type_length_limit { - // The instance name is already known to be too long for rustc. Use - // `{:.64}` to avoid blasting the user's terminal with thousands of - // lines of type-name. - let instance_name = instance.to_string(); - let msg = format!("reached the type-length limit while instantiating `{:.64}...`", - instance_name); - let mut diag = if let Some(node_id) = tcx.hir().as_local_node_id(instance.def_id()) { - tcx.sess.struct_span_fatal(tcx.hir().span(node_id), &msg) - } else { - tcx.sess.struct_fatal(&msg) - }; + // We include the const length in the type length, as it's better + // to be overly conservative. + if type_length + const_length > type_length_limit { + // The instance name is already known to be too long for rustc. + // Show only the first and last 32 characters to avoid blasting + // the user's terminal with thousands of lines of type-name. + let shrink = |s: String, before: usize, after: usize| { + // An iterator of all byte positions including the end of the string. + let positions = || s.char_indices().map(|(i, _)| i).chain(iter::once(s.len())); + + let shrunk = format!( + "{before}...{after}", + before = &s[..positions().nth(before).unwrap_or(s.len())], + after = &s[positions().rev().nth(after).unwrap_or(0)..], + ); + // Only use the shrunk version if it's really shorter. + // This also avoids the case where before and after slices overlap. + if shrunk.len() < s.len() { + shrunk + } else { + s + } + }; + let msg = format!("reached the type-length limit while instantiating `{}`", + shrink(instance.to_string(), 32, 32)); + let mut diag = tcx.sess.struct_span_fatal(tcx.def_span(instance.def_id()), &msg); diag.note(&format!( "consider adding a `#![type_length_limit=\"{}\"]` attribute to your crate", - type_length_limit*2)); + type_length)); diag.emit(); tcx.sess.abort_if_errors(); } @@ -512,7 +517,7 @@ struct MirNeighborCollector<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &'a mir::Mir<'tcx>, output: &'a mut Vec>, - param_substs: &'tcx Substs<'tcx>, + param_substs: SubstsRef<'tcx>, } impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { @@ -558,7 +563,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { ); visit_fn_use(self.tcx, fn_ty, false, &mut self.output); } - mir::Rvalue::Cast(mir::CastKind::ClosureFnPointer, ref operand, _) => { + mir::Rvalue::Cast(mir::CastKind::ClosureFnPointer(_), ref operand, _) => { let source_ty = operand.ty(self.mir, self.tcx); let source_ty = self.tcx.subst_and_normalize_erasing_regions( self.param_substs, @@ -596,7 +601,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { fn visit_const(&mut self, constant: &&'tcx ty::Const<'tcx>, location: Location) { debug!("visiting const {:?} @ {:?}", *constant, location); - collect_const(self.tcx, constant, self.param_substs, self.output); + collect_const(self.tcx, **constant, self.param_substs, self.output); self.super_const(constant); } @@ -620,8 +625,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { } mir::TerminatorKind::Drop { ref location, .. } | mir::TerminatorKind::DropAndReplace { ref location, .. } => { - let ty = location.ty(self.mir, self.tcx) - .to_ty(self.tcx); + let ty = location.ty(self.mir, self.tcx).ty; let ty = tcx.subst_and_normalize_erasing_regions( self.param_substs, ty::ParamEnv::reveal_all(), @@ -645,19 +649,26 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> { self.super_terminator_kind(block, kind, location); } - fn visit_static(&mut self, - static_: &mir::Static<'tcx>, + fn visit_place(&mut self, + place: &mir::Place<'tcx>, context: mir::visit::PlaceContext<'tcx>, location: Location) { - debug!("visiting static {:?} @ {:?}", static_.def_id, location); + match place { + Place::Base( + PlaceBase::Static(box Static{ kind:StaticKind::Static(def_id), .. }) + ) => { + debug!("visiting static {:?} @ {:?}", def_id, location); - let tcx = self.tcx; - let instance = Instance::mono(tcx, static_.def_id); - if should_monomorphize_locally(tcx, &instance) { - self.output.push(MonoItem::Static(static_.def_id)); + let tcx = self.tcx; + let instance = Instance::mono(tcx, *def_id); + if should_monomorphize_locally(tcx, &instance) { + self.output.push(MonoItem::Static(*def_id)); + } + } + _ => {} } - self.super_static(static_, context, location); + self.super_place(place, context, location); } } @@ -760,7 +771,7 @@ fn should_monomorphize_locally<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: fn is_available_upstream_generic<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, - substs: &'tcx Substs<'tcx>) + substs: SubstsRef<'tcx>) -> bool { debug_assert!(!def_id.is_local()); @@ -771,10 +782,10 @@ fn should_monomorphize_locally<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, instance: return false } - // If this instance has no type parameters, it cannot be a shared + // If this instance has non-erasable parameters, it cannot be a shared // monomorphization. Non-generic instances are already handled above // by `is_reachable_non_generic()` - if substs.types().next().is_none() { + if substs.non_erasable_generics().next().is_none() { return false } @@ -837,7 +848,7 @@ fn find_vtable_types_for_unsizing<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, match tail.sty { ty::Foreign(..) => false, ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, - _ => bug!("unexpected unsized tail: {:?}", tail.sty), + _ => bug!("unexpected unsized tail: {:?}", tail), } }; if type_has_metadata(inner_source) { @@ -904,20 +915,23 @@ fn create_mono_items_for_vtable_methods<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, !impl_ty.needs_subst() && !impl_ty.has_escaping_bound_vars()); if let ty::Dynamic(ref trait_ty, ..) = trait_ty.sty { - let poly_trait_ref = trait_ty.principal().with_self_ty(tcx, impl_ty); - assert!(!poly_trait_ref.has_escaping_bound_vars()); - - // Walk all methods of the trait, including those of its supertraits - let methods = tcx.vtable_methods(poly_trait_ref); - let methods = methods.iter().cloned().filter_map(|method| method) - .map(|(def_id, substs)| ty::Instance::resolve_for_vtable( + if let Some(principal) = trait_ty.principal() { + let poly_trait_ref = principal.with_self_ty(tcx, impl_ty); + assert!(!poly_trait_ref.has_escaping_bound_vars()); + + // Walk all methods of the trait, including those of its supertraits + let methods = tcx.vtable_methods(poly_trait_ref); + let methods = methods.iter().cloned().filter_map(|method| method) + .map(|(def_id, substs)| ty::Instance::resolve_for_vtable( tcx, ty::ParamEnv::reveal_all(), def_id, substs).unwrap()) - .filter(|&instance| should_monomorphize_locally(tcx, &instance)) - .map(|instance| create_fn_mono_item(instance)); - output.extend(methods); + .filter(|&instance| should_monomorphize_locally(tcx, &instance)) + .map(|instance| create_fn_mono_item(instance)); + output.extend(methods); + } + // Also add the destructor visit_drop_use(tcx, impl_ty, false, output); } @@ -931,7 +945,7 @@ struct RootCollector<'b, 'a: 'b, 'tcx: 'a + 'b> { tcx: TyCtxt<'a, 'tcx, 'tcx>, mode: MonoItemCollectionMode, output: &'b mut Vec>, - entry_fn: Option, + entry_fn: Option<(DefId, EntryFnType)>, } impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> { @@ -961,11 +975,11 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> { hir::ItemKind::Union(_, ref generics) => { if generics.params.is_empty() { if self.mode == MonoItemCollectionMode::Eager { - let def_id = self.tcx.hir().local_def_id(item.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); debug!("RootCollector: ADT drop-glue for {}", def_id_to_string(self.tcx, def_id)); - let ty = Instance::new(def_id, Substs::empty()).ty(self.tcx); + let ty = Instance::new(def_id, InternalSubsts::empty()).ty(self.tcx); visit_drop_use(self.tcx, ty, true, self.output); } } @@ -973,11 +987,11 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> { hir::ItemKind::GlobalAsm(..) => { debug!("RootCollector: ItemKind::GlobalAsm({})", def_id_to_string(self.tcx, - self.tcx.hir().local_def_id(item.id))); - self.output.push(MonoItem::GlobalAsm(item.id)); + self.tcx.hir().local_def_id_from_hir_id(item.hir_id))); + self.output.push(MonoItem::GlobalAsm(item.hir_id)); } hir::ItemKind::Static(..) => { - let def_id = self.tcx.hir().local_def_id(item.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); debug!("RootCollector: ItemKind::Static({})", def_id_to_string(self.tcx, def_id)); self.output.push(MonoItem::Static(def_id)); @@ -987,7 +1001,7 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> { // actually used somewhere. Just declaring them is insufficient. // but even just declaring them must collect the items they refer to - let def_id = self.tcx.hir().local_def_id(item.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); let instance = Instance::mono(self.tcx, def_id); let cid = GlobalId { @@ -997,11 +1011,11 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> { let param_env = ty::ParamEnv::reveal_all(); if let Ok(val) = self.tcx.const_eval(param_env.and(cid)) { - collect_const(self.tcx, val, instance.substs, &mut self.output); + collect_const(self.tcx, val, InternalSubsts::empty(), &mut self.output); } } hir::ItemKind::Fn(..) => { - let def_id = self.tcx.hir().local_def_id(item.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); self.push_if_root(def_id); } } @@ -1015,7 +1029,7 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> { fn visit_impl_item(&mut self, ii: &'v hir::ImplItem) { match ii.node { hir::ImplItemKind::Method(hir::MethodSig { .. }, _) => { - let def_id = self.tcx.hir().local_def_id(ii.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(ii.hir_id); self.push_if_root(def_id); } _ => { /* Nothing to do here */ } @@ -1025,12 +1039,12 @@ impl<'b, 'a, 'v> ItemLikeVisitor<'v> for RootCollector<'b, 'a, 'v> { impl<'b, 'a, 'v> RootCollector<'b, 'a, 'v> { fn is_root(&self, def_id: DefId) -> bool { - !item_has_type_parameters(self.tcx, def_id) && match self.mode { + !item_requires_monomorphization(self.tcx, def_id) && match self.mode { MonoItemCollectionMode::Eager => { true } MonoItemCollectionMode::Lazy => { - self.entry_fn == Some(def_id) || + self.entry_fn.map(|(id, _)| id) == Some(def_id) || self.tcx.is_reachable_non_generic(def_id) || self.tcx.codegen_fn_attrs(def_id).flags.contains( CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) @@ -1055,14 +1069,9 @@ impl<'b, 'a, 'v> RootCollector<'b, 'a, 'v> { /// the return type of `main`. This is not needed when /// the user writes their own `start` manually. fn push_extra_entry_roots(&mut self) { - if self.tcx.sess.entry_fn.get().map(|e| e.2) != Some(config::EntryFnType::Main) { - return - } - - let main_def_id = if let Some(def_id) = self.entry_fn { - def_id - } else { - return + let main_def_id = match self.entry_fn { + Some((def_id, EntryFnType::Main)) => def_id, + _ => return, }; let start_def_id = match self.tcx.lang_items().require(StartFnLangItem) { @@ -1091,7 +1100,7 @@ impl<'b, 'a, 'v> RootCollector<'b, 'a, 'v> { } } -fn item_has_type_parameters<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> bool { +fn item_requires_monomorphization<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> bool { let generics = tcx.generics_of(def_id); generics.requires_monomorphization(tcx) } @@ -1104,11 +1113,14 @@ fn create_mono_items_for_default_impls<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, for param in &generics.params { match param.kind { hir::GenericParamKind::Lifetime { .. } => {} - hir::GenericParamKind::Type { .. } => return, + hir::GenericParamKind::Type { .. } | + hir::GenericParamKind::Const { .. } => { + return + } } } - let impl_def_id = tcx.hir().local_def_id(item.id); + let impl_def_id = tcx.hir().local_def_id_from_hir_id(item.hir_id); debug!("create_mono_items_for_default_impls(item={})", def_id_to_string(tcx, impl_def_id)); @@ -1123,14 +1135,16 @@ fn create_mono_items_for_default_impls<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, continue; } - if tcx.generics_of(method.def_id).own_counts().types != 0 { + let counts = tcx.generics_of(method.def_id).own_counts(); + if counts.types + counts.consts != 0 { continue; } - let substs = Substs::for_item(tcx, method.def_id, |param, _| { + let substs = InternalSubsts::for_item(tcx, method.def_id, |param, _| { match param.kind { GenericParamDefKind::Lifetime => tcx.types.re_erased.into(), - GenericParamDefKind::Type {..} => { + GenericParamDefKind::Type { .. } | + GenericParamDefKind::Const => { trait_ref.substs[param.index as usize] } } @@ -1228,14 +1242,22 @@ fn def_id_to_string<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn collect_const<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, - constant: &ty::Const<'tcx>, - param_substs: &'tcx Substs<'tcx>, + constant: ty::Const<'tcx>, + param_substs: SubstsRef<'tcx>, output: &mut Vec>, ) { - debug!("visiting const {:?}", *constant); + debug!("visiting const {:?}", constant); - let val = match constant.val { - ConstValue::Unevaluated(def_id, substs) => { + match constant.val { + ConstValue::Slice(Scalar::Ptr(ptr), _) | + ConstValue::Scalar(Scalar::Ptr(ptr)) => + collect_miri(tcx, ptr.alloc_id, output), + ConstValue::ByRef(_ptr, alloc) => { + for &((), id) in alloc.relocations.values() { + collect_miri(tcx, id, output); + } + } + ConstValue::Unevaluated(did, substs) => { let param_env = ty::ParamEnv::reveal_all(); let substs = tcx.subst_and_normalize_erasing_regions( param_substs, @@ -1244,7 +1266,7 @@ fn collect_const<'a, 'tcx>( ); let instance = ty::Instance::resolve(tcx, param_env, - def_id, + did, substs).unwrap(); let cid = GlobalId { @@ -1252,29 +1274,12 @@ fn collect_const<'a, 'tcx>( promoted: None, }; match tcx.const_eval(param_env.and(cid)) { - Ok(val) => val.val, - Err(ErrorHandled::Reported) => return, + Ok(val) => collect_const(tcx, val, param_substs, output), + Err(ErrorHandled::Reported) => {}, Err(ErrorHandled::TooGeneric) => span_bug!( - tcx.def_span(def_id), "collection encountered polymorphic constant", + tcx.def_span(did), "collection encountered polymorphic constant", ), } - }, - _ => constant.val, - }; - match val { - ConstValue::Unevaluated(..) => bug!("const eval yielded unevaluated const"), - ConstValue::ScalarPair(Scalar::Ptr(a), Scalar::Ptr(b)) => { - collect_miri(tcx, a.alloc_id, output); - collect_miri(tcx, b.alloc_id, output); - } - ConstValue::ScalarPair(_, Scalar::Ptr(ptr)) | - ConstValue::ScalarPair(Scalar::Ptr(ptr), _) | - ConstValue::Scalar(Scalar::Ptr(ptr)) => - collect_miri(tcx, ptr.alloc_id, output), - ConstValue::ByRef(_id, alloc, _offset) => { - for &((), id) in alloc.relocations.values() { - collect_miri(tcx, id, output); - } } _ => {}, } diff --git a/src/librustc_mir/monomorphize/item.rs b/src/librustc_mir/monomorphize/item.rs index 2bc6e93c8c1a9..51ba690d3a14b 100644 --- a/src/librustc_mir/monomorphize/item.rs +++ b/src/librustc_mir/monomorphize/item.rs @@ -1,19 +1,10 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use monomorphize::Instance; +use crate::monomorphize::Instance; use rustc::hir; -use rustc::hir::def_id::DefId; +use rustc::hir::def_id::{DefId, LOCAL_CRATE}; +use rustc::mir::interpret::ConstValue; use rustc::session::config::OptLevel; -use rustc::ty::{self, Ty, TyCtxt, ClosureSubsts, GeneratorSubsts}; -use rustc::ty::subst::Substs; +use rustc::ty::{self, Ty, TyCtxt, Const, ClosureSubsts, GeneratorSubsts, ParamConst}; +use rustc::ty::subst::{SubstsRef, InternalSubsts}; use syntax::ast; use syntax::attr::InlineAttr; use std::fmt::{self, Write}; @@ -54,7 +45,7 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug { fn is_generic_fn(&self) -> bool { match *self.as_mono_item() { MonoItem::Fn(ref instance) => { - instance.substs.types().next().is_some() + instance.substs.non_erasable_generics().next().is_some() } MonoItem::Static(..) | MonoItem::GlobalAsm(..) => false, @@ -67,8 +58,8 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug { MonoItem::Static(def_id) => { tcx.symbol_name(Instance::mono(tcx, def_id)) } - MonoItem::GlobalAsm(node_id) => { - let def_id = tcx.hir().local_def_id(node_id); + MonoItem::GlobalAsm(hir_id) => { + let def_id = tcx.hir().local_def_id_from_hir_id(hir_id); ty::SymbolName { name: Symbol::intern(&format!("global_asm_{:?}", def_id)).as_interned_str() } @@ -85,8 +76,7 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug { match *self.as_mono_item() { MonoItem::Fn(ref instance) => { - let entry_def_id = - tcx.sess.entry_fn.borrow().map(|(id, _, _)| tcx.hir().local_def_id(id)); + let entry_def_id = tcx.entry_fn(LOCAL_CRATE).map(|(id, _)| id); // If this function isn't inlined or otherwise has explicit // linkage, then we'll be creating a globally shared version. if self.explicit_linkage(tcx).is_some() || @@ -133,7 +123,7 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug { codegen_fn_attrs.linkage } - /// Returns whether this instance is instantiable - whether it has no unsatisfied + /// Returns `true` if this instance is instantiable - whether it has no unsatisfied /// predicates. /// /// In order to codegen an item, all of its predicates must hold, because @@ -162,7 +152,7 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug { debug!("is_instantiable({:?})", self); let (def_id, substs) = match *self.as_mono_item() { MonoItem::Fn(ref instance) => (instance.def_id(), instance.substs), - MonoItem::Static(def_id) => (def_id, Substs::empty()), + MonoItem::Static(def_id) => (def_id, InternalSubsts::empty()), // global asm never has predicates MonoItem::GlobalAsm(..) => return true }; @@ -170,14 +160,14 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug { tcx.substitute_normalize_and_test_predicates((def_id, &substs)) } - fn to_string(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> String { + fn to_string(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, debug: bool) -> String { return match *self.as_mono_item() { MonoItem::Fn(instance) => { - to_string_internal(tcx, "fn ", instance) + to_string_internal(tcx, "fn ", instance, debug) }, MonoItem::Static(def_id) => { let instance = Instance::new(def_id, tcx.intern_substs(&[])); - to_string_internal(tcx, "static ", instance) + to_string_internal(tcx, "static ", instance, debug) }, MonoItem::GlobalAsm(..) => { "global_asm".to_string() @@ -186,12 +176,13 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug { fn to_string_internal<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, prefix: &str, - instance: Instance<'tcx>) + instance: Instance<'tcx>, + debug: bool) -> String { let mut result = String::with_capacity(32); result.push_str(prefix); let printer = DefPathBasedNames::new(tcx, false, false); - printer.push_instance_as_string(instance, &mut result); + printer.push_instance_as_string(instance, &mut result, debug); result } } @@ -199,15 +190,15 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug { fn local_span(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Option { match *self.as_mono_item() { MonoItem::Fn(Instance { def, .. }) => { - tcx.hir().as_local_node_id(def.def_id()) + tcx.hir().as_local_hir_id(def.def_id()) } MonoItem::Static(def_id) => { - tcx.hir().as_local_node_id(def_id) + tcx.hir().as_local_hir_id(def_id) } - MonoItem::GlobalAsm(node_id) => { - Some(node_id) + MonoItem::GlobalAsm(hir_id) => { + Some(hir_id) } - }.map(|node_id| tcx.hir().span(node_id)) + }.map(|hir_id| tcx.hir().span_by_hir_id(hir_id)) } } @@ -225,9 +216,8 @@ impl<'a, 'tcx> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> { // These keys are used by the handwritten auto-tests, so they need to be // predictable and human-readable. // -// Note: A lot of this could looks very similar to what's already in the -// ppaux module. It would be good to refactor things so we only have one -// parameterizable implementation for printing types. +// Note: A lot of this could looks very similar to what's already in `ty::print`. +// FIXME(eddyb) implement a custom `PrettyPrinter` for this. /// Same as `unique_type_name()` but with the result pushed onto the given /// `output` parameter. @@ -249,7 +239,13 @@ impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> { } } - pub fn push_type_name(&self, t: Ty<'tcx>, output: &mut String) { + // Pushes the type name of the specified type to the provided string. + // If 'debug' is true, printing normally unprintable types is allowed + // (e.g. ty::GeneratorWitness). This parameter should only be set when + // this method is being used for logging purposes (e.g. with debug! or info!) + // When being used for codegen purposes, 'debug' should be set to 'false' + // in order to catch unexpected types that should never end up in a type name + pub fn push_type_name(&self, t: Ty<'tcx>, output: &mut String, debug: bool) { match t.sty { ty::Bool => output.push_str("bool"), ty::Char => output.push_str("char"), @@ -271,12 +267,12 @@ impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> { ty::Float(ast::FloatTy::F64) => output.push_str("f64"), ty::Adt(adt_def, substs) => { self.push_def_path(adt_def.did, output); - self.push_type_params(substs, iter::empty(), output); + self.push_generic_params(substs, iter::empty(), output, debug); }, ty::Tuple(component_types) => { output.push('('); for &component_type in component_types { - self.push_type_name(component_type, output); + self.push_type_name(component_type, output, debug); output.push_str(", "); } if !component_types.is_empty() { @@ -292,7 +288,7 @@ impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> { hir::MutMutable => output.push_str("mut "), } - self.push_type_name(inner_type, output); + self.push_type_name(inner_type, output, debug); }, ty::Ref(_, inner_type, mutbl) => { output.push('&'); @@ -300,27 +296,31 @@ impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> { output.push_str("mut "); } - self.push_type_name(inner_type, output); + self.push_type_name(inner_type, output, debug); }, ty::Array(inner_type, len) => { output.push('['); - self.push_type_name(inner_type, output); + self.push_type_name(inner_type, output, debug); write!(output, "; {}", len.unwrap_usize(self.tcx)).unwrap(); output.push(']'); }, ty::Slice(inner_type) => { output.push('['); - self.push_type_name(inner_type, output); + self.push_type_name(inner_type, output, debug); output.push(']'); }, ty::Dynamic(ref trait_data, ..) => { - let principal = trait_data.principal(); - self.push_def_path(principal.def_id(), output); - self.push_type_params( - principal.skip_binder().substs, - trait_data.projection_bounds(), - output, - ); + if let Some(principal) = trait_data.principal() { + self.push_def_path(principal.def_id(), output); + self.push_generic_params( + principal.skip_binder().substs, + trait_data.projection_bounds(), + output, + debug + ); + } else { + output.push_str("dyn '_"); + } }, ty::Foreign(did) => self.push_def_path(did, output), ty::FnDef(..) | @@ -346,14 +346,14 @@ impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> { if !sig.inputs().is_empty() { for ¶meter_type in sig.inputs() { - self.push_type_name(parameter_type, output); + self.push_type_name(parameter_type, output, debug); output.push_str(", "); } output.pop(); output.pop(); } - if sig.variadic { + if sig.c_variadic { if !sig.inputs().is_empty() { output.push_str(", ..."); } else { @@ -365,7 +365,7 @@ impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> { if !sig.output().is_unit() { output.push_str(" -> "); - self.push_type_name(sig.output(), output); + self.push_type_name(sig.output(), output, debug); } }, ty::Generator(def_id, GeneratorSubsts { ref substs }, _) | @@ -373,7 +373,7 @@ impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> { self.push_def_path(def_id, output); let generics = self.tcx.generics_of(self.tcx.closure_base_def_id(def_id)); let substs = substs.truncate_to(self.tcx, generics); - self.push_type_params(substs, iter::empty(), output); + self.push_generic_params(substs, iter::empty(), output, debug); } ty::Error | ty::Bound(..) | @@ -384,10 +384,28 @@ impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> { ty::Param(_) | ty::GeneratorWitness(_) | ty::Opaque(..) => { - bug!("DefPathBasedNames: Trying to create type name for \ + if debug { + output.push_str(&format!("`{:?}`", t)); + } else { + bug!("DefPathBasedNames: Trying to create type name for \ unexpected type: {:?}", t); + } + } + } + } + + // FIXME(const_generics): handle debug printing. + pub fn push_const_name(&self, c: &Const<'tcx>, output: &mut String, debug: bool) { + match c.val { + ConstValue::Infer(..) => output.push_str("_"), + ConstValue::Param(ParamConst { name, .. }) => { + write!(output, "{}", name).unwrap(); } + ConstValue::Unevaluated(..) => output.push_str("_: _"), + _ => write!(output, "{:?}", c).unwrap(), } + output.push_str(": "); + self.push_type_name(c.ty, output, debug); } pub fn push_def_path(&self, @@ -417,21 +435,22 @@ impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> { output.pop(); } - fn push_type_params(&self, - substs: &Substs<'tcx>, - projections: I, - output: &mut String) - where I: Iterator> - { + fn push_generic_params( + &self, + substs: SubstsRef<'tcx>, + projections: I, + output: &mut String, + debug: bool, + ) where I: Iterator> { let mut projections = projections.peekable(); - if substs.types().next().is_none() && projections.peek().is_none() { + if substs.non_erasable_generics().next().is_none() && projections.peek().is_none() { return; } output.push('<'); for type_parameter in substs.types() { - self.push_type_name(type_parameter, output); + self.push_type_name(type_parameter, output, debug); output.push_str(", "); } @@ -440,7 +459,12 @@ impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> { let name = &self.tcx.associated_item(projection.item_def_id).ident.as_str(); output.push_str(name); output.push_str("="); - self.push_type_name(projection.ty, output); + self.push_type_name(projection.ty, output, debug); + output.push_str(", "); + } + + for const_parameter in substs.consts() { + self.push_const_name(const_parameter, output, debug); output.push_str(", "); } @@ -452,8 +476,9 @@ impl<'a, 'tcx> DefPathBasedNames<'a, 'tcx> { pub fn push_instance_as_string(&self, instance: Instance<'tcx>, - output: &mut String) { + output: &mut String, + debug: bool) { self.push_def_path(instance.def_id(), output); - self.push_type_params(instance.substs, iter::empty(), output); + self.push_generic_params(instance.substs, iter::empty(), output, debug); } } diff --git a/src/librustc_mir/monomorphize/mod.rs b/src/librustc_mir/monomorphize/mod.rs index 30556759bd394..7fa904d32cbb4 100644 --- a/src/librustc_mir/monomorphize/mod.rs +++ b/src/librustc_mir/monomorphize/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir::def_id::DefId; use rustc::middle::lang_items::DropInPlaceFnLangItem; use rustc::traits; diff --git a/src/librustc_mir/monomorphize/partitioning.rs b/src/librustc_mir/monomorphize/partitioning.rs index 00974d4a5b250..4a2c05b201328 100644 --- a/src/librustc_mir/monomorphize/partitioning.rs +++ b/src/librustc_mir/monomorphize/partitioning.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Partitioning Codegen Units for Incremental Compilation //! ====================================================== //! @@ -106,29 +96,28 @@ use std::collections::hash_map::Entry; use std::cmp; use std::sync::Arc; -use syntax::ast::NodeId; use syntax::symbol::InternedString; use rustc::dep_graph::{WorkProductId, WorkProduct, DepNode, DepConstructor}; -use rustc::hir::CodegenFnAttrFlags; +use rustc::hir::{CodegenFnAttrFlags, HirId}; use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE, CRATE_DEF_INDEX}; use rustc::hir::map::DefPathData; use rustc::mir::mono::{Linkage, Visibility, CodegenUnitNameBuilder}; use rustc::middle::exported_symbols::SymbolExportLevel; use rustc::ty::{self, TyCtxt, InstanceDef}; -use rustc::ty::item_path::characteristic_def_id_of_type; +use rustc::ty::print::characteristic_def_id_of_type; use rustc::ty::query::Providers; use rustc::util::common::time; use rustc::util::nodemap::{DefIdSet, FxHashMap, FxHashSet}; use rustc::mir::mono::MonoItem; -use monomorphize::collector::InliningMap; -use monomorphize::collector::{self, MonoItemCollectionMode}; -use monomorphize::item::{MonoItemExt, InstantiationMode}; +use crate::monomorphize::collector::InliningMap; +use crate::monomorphize::collector::{self, MonoItemCollectionMode}; +use crate::monomorphize::item::{MonoItemExt, InstantiationMode}; pub use rustc::mir::mono::CodegenUnit; pub enum PartitioningStrategy { - /// Generate one codegen unit per source-level module. + /// Generates one codegen unit per source-level module. PerModule, /// Partition the whole crate into a fixed number of codegen units. @@ -156,7 +145,7 @@ pub trait CodegenUnitExt<'tcx> { WorkProductId::from_cgu_name(&self.name().as_str()) } - fn work_product(&self, tcx: TyCtxt) -> WorkProduct { + fn work_product(&self, tcx: TyCtxt<'_, '_, '_>) -> WorkProduct { let work_product_id = self.work_product_id(); tcx.dep_graph .previous_work_product(&work_product_id) @@ -172,19 +161,19 @@ pub trait CodegenUnitExt<'tcx> { // The codegen tests rely on items being process in the same order as // they appear in the file, so for local items, we sort by node_id first #[derive(PartialEq, Eq, PartialOrd, Ord)] - pub struct ItemSortKey(Option, ty::SymbolName); + pub struct ItemSortKey(Option, ty::SymbolName); fn item_sort_key<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item: MonoItem<'tcx>) -> ItemSortKey { ItemSortKey(match item { MonoItem::Fn(ref instance) => { match instance.def { - // We only want to take NodeIds of user-defined + // We only want to take HirIds of user-defined // instances into account. The others don't matter for // the codegen tests and can even make item order // unstable. InstanceDef::Item(def_id) => { - tcx.hir().as_local_node_id(def_id) + tcx.hir().as_local_hir_id(def_id) } InstanceDef::VtableShim(..) | InstanceDef::Intrinsic(..) | @@ -198,10 +187,10 @@ pub trait CodegenUnitExt<'tcx> { } } MonoItem::Static(def_id) => { - tcx.hir().as_local_node_id(def_id) + tcx.hir().as_local_hir_id(def_id) } - MonoItem::GlobalAsm(node_id) => { - Some(node_id) + MonoItem::GlobalAsm(hir_id) => { + Some(hir_id) } }, item.symbol_name(tcx)) } @@ -223,7 +212,7 @@ impl<'tcx> CodegenUnitExt<'tcx> for CodegenUnit<'tcx> { } // Anything we can't find a proper codegen unit for goes into this. -fn fallback_cgu_name(name_builder: &mut CodegenUnitNameBuilder) -> InternedString { +fn fallback_cgu_name(name_builder: &mut CodegenUnitNameBuilder<'_, '_, '_>) -> InternedString { name_builder.build_cgu_name(LOCAL_CRATE, &["fallback"], Some("cgu")) } @@ -414,8 +403,8 @@ fn mono_item_visibility( Visibility::Hidden }; } - MonoItem::GlobalAsm(node_id) => { - let def_id = tcx.hir().local_def_id(*node_id); + MonoItem::GlobalAsm(hir_id) => { + let def_id = tcx.hir().local_def_id_from_hir_id(*hir_id); return if tcx.is_reachable_non_generic(def_id) { *can_be_internalized = false; default_visibility(tcx, def_id, false) @@ -458,7 +447,7 @@ fn mono_item_visibility( return Visibility::Hidden } - let is_generic = instance.substs.types().next().is_some(); + let is_generic = instance.substs.non_erasable_generics().next().is_some(); // Upstream `DefId` instances get different handling than local ones if !def_id.is_local() { @@ -546,7 +535,7 @@ fn mono_item_visibility( } } -fn default_visibility(tcx: TyCtxt, id: DefId, is_generic: bool) -> Visibility { +fn default_visibility(tcx: TyCtxt<'_, '_, '_>, id: DefId, is_generic: bool) -> Visibility { if !tcx.sess.target.target.options.default_hidden_visibility { return Visibility::Default } @@ -799,14 +788,14 @@ fn characteristic_def_id_of_mono_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, Some(def_id) } MonoItem::Static(def_id) => Some(def_id), - MonoItem::GlobalAsm(node_id) => Some(tcx.hir().local_def_id(node_id)), + MonoItem::GlobalAsm(hir_id) => Some(tcx.hir().local_def_id_from_hir_id(hir_id)), } } type CguNameCache = FxHashMap<(DefId, bool), InternedString>; -fn compute_codegen_unit_name(tcx: TyCtxt, - name_builder: &mut CodegenUnitNameBuilder, +fn compute_codegen_unit_name(tcx: TyCtxt<'_, '_, '_>, + name_builder: &mut CodegenUnitNameBuilder<'_, '_, '_>, def_id: DefId, volatile: bool, cache: &mut CguNameCache) @@ -865,7 +854,7 @@ fn compute_codegen_unit_name(tcx: TyCtxt, }).clone() } -fn numbered_codegen_unit_name(name_builder: &mut CodegenUnitNameBuilder, +fn numbered_codegen_unit_name(name_builder: &mut CodegenUnitNameBuilder<'_, '_, '_>, index: usize) -> InternedString { name_builder.build_cgu_name_no_mangle(LOCAL_CRATE, &["cgu"], Some(index)) @@ -889,7 +878,7 @@ fn debug_dump<'a, 'b, 'tcx, I>(tcx: TyCtxt<'a, 'tcx, 'tcx>, .unwrap_or(""); debug!(" - {} [{:?}] [{}]", - mono_item.to_string(tcx), + mono_item.to_string(tcx, true), linkage, symbol_hash); } @@ -939,7 +928,7 @@ fn collect_and_partition_mono_items<'a, 'tcx>( tcx.sess.abort_if_errors(); - ::monomorphize::assert_symbols_are_distinct(tcx, items.iter()); + crate::monomorphize::assert_symbols_are_distinct(tcx, items.iter()); let strategy = if tcx.sess.opts.incremental.is_some() { PartitioningStrategy::PerModule @@ -981,7 +970,7 @@ fn collect_and_partition_mono_items<'a, 'tcx>( let mut item_keys: Vec<_> = items .iter() .map(|i| { - let mut output = i.to_string(tcx); + let mut output = i.to_string(tcx, false); output.push_str(" @@"); let mut empty = Vec::new(); let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty); @@ -1023,7 +1012,7 @@ fn collect_and_partition_mono_items<'a, 'tcx>( (Arc::new(mono_items), Arc::new(codegen_units)) } -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { providers.collect_and_partition_mono_items = collect_and_partition_mono_items; diff --git a/src/librustc_mir/shim.rs b/src/librustc_mir/shim.rs index 114162946051e..cb25db73cd2be 100644 --- a/src/librustc_mir/shim.rs +++ b/src/librustc_mir/shim.rs @@ -1,37 +1,28 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir; use rustc::hir::def_id::DefId; use rustc::infer; use rustc::mir::*; -use rustc::ty::{self, Ty, TyCtxt, GenericParamDefKind}; +use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::VariantIdx; -use rustc::ty::subst::{Subst, Substs}; +use rustc::ty::subst::{Subst, InternalSubsts}; use rustc::ty::query::Providers; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; use rustc_target::spec::abi::Abi; -use syntax::ast; use syntax_pos::Span; use std::fmt; use std::iter; -use transform::{add_moves_for_packed_drops, add_call_guards}; -use transform::{remove_noop_landing_pads, no_landing_pads, simplify}; -use util::elaborate_drops::{self, DropElaborator, DropStyle, DropFlagMode}; -use util::patch::MirPatch; +use crate::transform::{ + add_moves_for_packed_drops, add_call_guards, + remove_noop_landing_pads, no_landing_pads, simplify, run_passes +}; +use crate::util::elaborate_drops::{self, DropElaborator, DropStyle, DropFlagMode}; +use crate::util::patch::MirPatch; -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { providers.mir_shims = make_shim; } @@ -123,12 +114,15 @@ fn make_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } }; debug!("make_shim({:?}) = untransformed {:?}", instance, result); - add_moves_for_packed_drops::add_moves_for_packed_drops( - tcx, &mut result, instance.def_id()); - no_landing_pads::no_landing_pads(tcx, &mut result); - remove_noop_landing_pads::remove_noop_landing_pads(tcx, &mut result); - simplify::simplify_cfg(&mut result); - add_call_guards::CriticalCallEdges.add_call_guards(&mut result); + + run_passes(tcx, &mut result, instance, MirPhase::Const, &[ + &add_moves_for_packed_drops::AddMovesForPackedDrops, + &no_landing_pads::NoLandingPads, + &remove_noop_landing_pads::RemoveNoopLandingPads, + &simplify::SimplifyCfg::new("make_shim"), + &add_call_guards::CriticalCallEdges, + ]); + debug!("make_shim({:?}) = {:?}", instance, result); tcx.alloc_mir(result) @@ -148,7 +142,7 @@ enum CallKind { Direct(DefId), } -fn temp_decl(mutability: Mutability, ty: Ty, span: Span) -> LocalDecl { +fn temp_decl(mutability: Mutability, ty: Ty<'_>, span: Span) -> LocalDecl<'_> { let source_info = SourceInfo { scope: OUTERMOST_SOURCE_SCOPE, span }; LocalDecl { mutability, @@ -188,7 +182,7 @@ fn build_drop_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let substs = if let Some(ty) = ty { tcx.intern_substs(&[ty.into()]) } else { - Substs::identity_for_item(tcx, def_id) + InternalSubsts::identity_for_item(tcx, def_id) }; let sig = tcx.fn_sig(def_id).subst(tcx, substs); let sig = tcx.erase_late_bound_regions(&sig); @@ -217,29 +211,22 @@ fn build_drop_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, IndexVec::new(), None, local_decls_for_sig(&sig, span), + IndexVec::new(), sig.inputs().len(), vec![], - span + span, + vec![], ); if let Some(..) = ty { // The first argument (index 0), but add 1 for the return value. - let dropee_ptr = Place::Local(Local::new(1+0)); + let dropee_ptr = Place::Base(PlaceBase::Local(Local::new(1+0))); if tcx.sess.opts.debugging_opts.mir_emit_retag { - // Function arguments should be retagged + // Function arguments should be retagged, and we make this one raw. mir.basic_blocks_mut()[START_BLOCK].statements.insert(0, Statement { source_info, - kind: StatementKind::Retag { - fn_entry: true, - two_phase: false, - place: dropee_ptr.clone(), - }, + kind: StatementKind::Retag(RetagKind::Raw, dropee_ptr.clone()), }); - // We use raw ptr operations, better prepare the alias tracking for that - mir.basic_blocks_mut()[START_BLOCK].statements.insert(1, Statement { - source_info, - kind: StatementKind::EscapeToRaw(Operand::Copy(dropee_ptr.clone())), - }) } let patch = { let param_env = tcx.param_env(def_id).with_reveal_all(); @@ -276,7 +263,7 @@ pub struct DropShimElaborator<'a, 'tcx: 'a> { } impl<'a, 'tcx> fmt::Debug for DropShimElaborator<'a, 'tcx> { - fn fmt(&self, _f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { Ok(()) } } @@ -318,7 +305,7 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> { } } -/// Build a `Clone::clone` shim for `self_ty`. Here, `def_id` is `Clone::clone`. +/// Builds a `Clone::clone` shim for `self_ty`. Here, `def_id` is `Clone::clone`. fn build_clone_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, self_ty: Ty<'tcx>) @@ -327,10 +314,10 @@ fn build_clone_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, debug!("build_clone_shim(def_id={:?})", def_id); let mut builder = CloneShimBuilder::new(tcx, def_id, self_ty); - let is_copy = !self_ty.moves_by_default(tcx, tcx.param_env(def_id), builder.span); + let is_copy = self_ty.is_copy_modulo_regions(tcx, tcx.param_env(def_id), builder.span); - let dest = Place::Local(RETURN_PLACE); - let src = Place::Local(Local::new(1+0)).deref(); + let dest = Place::RETURN_PLACE; + let src = Place::Base(PlaceBase::Local(Local::new(1+0))).deref(); match self_ty.sty { _ if is_copy => builder.copy_shim(), @@ -394,9 +381,11 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { IndexVec::new(), None, self.local_decls, + IndexVec::new(), self.sig.inputs().len(), vec![], - self.span + self.span, + vec![], ) } @@ -434,10 +423,10 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { } fn copy_shim(&mut self) { - let rcvr = Place::Local(Local::new(1+0)).deref(); + let rcvr = Place::Base(PlaceBase::Local(Local::new(1+0))).deref(); let ret_statement = self.make_statement( StatementKind::Assign( - Place::Local(RETURN_PLACE), + Place::RETURN_PLACE, box Rvalue::Use(Operand::Copy(rcvr)) ) ); @@ -446,9 +435,9 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { fn make_place(&mut self, mutability: Mutability, ty: Ty<'tcx>) -> Place<'tcx> { let span = self.span; - Place::Local( + Place::Base(PlaceBase::Local( self.local_decls.push(temp_decl(mutability, ty, span)) - ) + )) } fn make_clone_call( @@ -461,12 +450,7 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { ) { let tcx = self.tcx; - let substs = Substs::for_item(tcx, self.def_id, |param, _| { - match param.kind { - GenericParamDefKind::Lifetime => tcx.types.re_erased.into(), - GenericParamDefKind::Type {..} => ty.into(), - } - }); + let substs = tcx.mk_substs_trait(ty, &[]); // `func == Clone::clone(&ty) -> ty` let func_ty = tcx.mk_fn_def(self.def_id, substs); @@ -474,7 +458,9 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { span: self.span, ty: func_ty, user_ty: None, - literal: ty::Const::zero_sized(self.tcx, func_ty), + literal: tcx.mk_const( + ty::Const::zero_sized(func_ty), + ), }); let ref_loc = self.make_place( @@ -534,7 +520,9 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { span: self.span, ty: self.tcx.types.usize, user_ty: None, - literal: ty::Const::from_usize(self.tcx, value), + literal: self.tcx.mk_const( + ty::Const::from_usize(self.tcx, value), + ), } } @@ -552,7 +540,7 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { let inits = vec![ self.make_statement( StatementKind::Assign( - Place::Local(beg), + Place::Base(PlaceBase::Local(beg)), box Rvalue::Use(Operand::Constant(self.make_usize(0))) ) ), @@ -570,7 +558,11 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { // BB #3; // } // BB #4; - self.loop_header(Place::Local(beg), end, BasicBlock::new(2), BasicBlock::new(4), false); + self.loop_header(Place::Base(PlaceBase::Local(beg)), + end, + BasicBlock::new(2), + BasicBlock::new(4), + false); // BB #2 // `dest[i] = Clone::clone(src[beg])`; @@ -586,10 +578,10 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { let statements = vec![ self.make_statement( StatementKind::Assign( - Place::Local(beg), + Place::Base(PlaceBase::Local(beg)), box Rvalue::BinaryOp( BinOp::Add, - Operand::Copy(Place::Local(beg)), + Operand::Copy(Place::Base(PlaceBase::Local(beg))), Operand::Constant(self.make_usize(1)) ) ) @@ -609,7 +601,7 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { let beg = self.local_decls.push(temp_decl(Mutability::Mut, tcx.types.usize, span)); let init = self.make_statement( StatementKind::Assign( - Place::Local(beg), + Place::Base(PlaceBase::Local(beg)), box Rvalue::Use(Operand::Constant(self.make_usize(0))) ) ); @@ -620,7 +612,7 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { // BB #8; // } // BB #9; - self.loop_header(Place::Local(beg), Place::Local(end), + self.loop_header(Place::Base(PlaceBase::Local(beg)), Place::Base(PlaceBase::Local(end)), BasicBlock::new(7), BasicBlock::new(9), true); // BB #7 (cleanup) @@ -636,10 +628,10 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { // `goto #6;` let statement = self.make_statement( StatementKind::Assign( - Place::Local(beg), + Place::Base(PlaceBase::Local(beg)), box Rvalue::BinaryOp( BinOp::Add, - Operand::Copy(Place::Local(beg)), + Operand::Copy(Place::Base(PlaceBase::Local(beg))), Operand::Constant(self.make_usize(1)) ) ) @@ -697,7 +689,7 @@ impl<'a, 'tcx> CloneShimBuilder<'a, 'tcx> { } } -/// Build a "call" shim for `def_id`. The shim calls the +/// Builds a "call" shim for `def_id`. The shim calls the /// function specified by `call_kind`, first adjusting its first /// argument according to `rcvr_adjustment`. /// @@ -724,7 +716,7 @@ fn build_call_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let source_info = SourceInfo { span, scope: OUTERMOST_SOURCE_SCOPE }; let rcvr_arg = Local::new(1+0); - let rcvr_l = Place::Local(rcvr_arg); + let rcvr_l = Place::Base(PlaceBase::Local(rcvr_arg)); let mut statements = vec![]; let rcvr = match rcvr_adjustment { @@ -754,11 +746,11 @@ fn build_call_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, statements.push(Statement { source_info, kind: StatementKind::Assign( - Place::Local(ref_rcvr), + Place::Base(PlaceBase::Local(ref_rcvr)), box Rvalue::Ref(tcx.types.re_erased, borrow_kind, rcvr_l) ) }); - Operand::Move(Place::Local(ref_rcvr)) + Operand::Move(Place::Base(PlaceBase::Local(ref_rcvr))) } }; @@ -770,7 +762,9 @@ fn build_call_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, span, ty, user_ty: None, - literal: ty::Const::zero_sized(tcx, ty), + literal: tcx.mk_const( + ty::Const::zero_sized(ty) + ), }), vec![rcvr]) } @@ -778,12 +772,12 @@ fn build_call_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if let Some(untuple_args) = untuple_args { args.extend(untuple_args.iter().enumerate().map(|(i, ity)| { - let arg_place = Place::Local(Local::new(1+1)); + let arg_place = Place::Base(PlaceBase::Local(Local::new(1+1))); Operand::Move(arg_place.field(Field::new(i), *ity)) })); } else { args.extend((1..sig.inputs().len()).map(|i| { - Operand::Move(Place::Local(Local::new(1+i))) + Operand::Move(Place::Base(PlaceBase::Local(Local::new(1+i)))) })); } @@ -801,7 +795,7 @@ fn build_call_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, block(&mut blocks, statements, TerminatorKind::Call { func: callee, args, - destination: Some((Place::Local(RETURN_PLACE), + destination: Some((Place::RETURN_PLACE, BasicBlock::new(1))), cleanup: if let Adjustment::RefMut = rcvr_adjustment { Some(BasicBlock::new(3)) @@ -814,7 +808,7 @@ fn build_call_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if let Adjustment::RefMut = rcvr_adjustment { // BB #1 - drop for Self block(&mut blocks, vec![], TerminatorKind::Drop { - location: Place::Local(rcvr_arg), + location: Place::Base(PlaceBase::Local(rcvr_arg)), target: BasicBlock::new(2), unwind: None }, false); @@ -824,7 +818,7 @@ fn build_call_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if let Adjustment::RefMut = rcvr_adjustment { // BB #3 - drop if closure panics block(&mut blocks, vec![], TerminatorKind::Drop { - location: Place::Local(rcvr_arg), + location: Place::Base(PlaceBase::Local(rcvr_arg)), target: BasicBlock::new(4), unwind: None }, true); @@ -842,9 +836,11 @@ fn build_call_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, IndexVec::new(), None, local_decls, + IndexVec::new(), sig.inputs().len(), vec![], - span + span, + vec![], ); if let Abi::RustCall = sig.abi { mir.spread_arg = Some(Local::new(sig.inputs().len())); @@ -853,14 +849,14 @@ fn build_call_shim<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } pub fn build_adt_ctor<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a, 'gcx, 'tcx>, - ctor_id: ast::NodeId, + ctor_id: hir::HirId, fields: &[hir::StructField], span: Span) -> Mir<'tcx> { let tcx = infcx.tcx; let gcx = tcx.global_tcx(); - let def_id = tcx.hir().local_def_id(ctor_id); + let def_id = tcx.hir().local_def_id_from_hir_id(ctor_id); let param_env = gcx.param_env(def_id); // Normalize the sig. @@ -884,7 +880,7 @@ pub fn build_adt_ctor<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a, 'gcx, 'tcx>, }; let variant_no = if adt_def.is_enum() { - adt_def.variant_index_with_id(def_id) + adt_def.variant_index_with_ctor_id(def_id) } else { VariantIdx::new(0) }; @@ -894,11 +890,11 @@ pub fn build_adt_ctor<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a, 'gcx, 'tcx>, statements: vec![Statement { source_info, kind: StatementKind::Assign( - Place::Local(RETURN_PLACE), + Place::RETURN_PLACE, box Rvalue::Aggregate( box AggregateKind::Adt(adt_def, variant_no, substs, None, None), (1..sig.inputs().len()+1).map(|i| { - Operand::Move(Place::Local(Local::new(i))) + Operand::Move(Place::Base(PlaceBase::Local(Local::new(i)))) }).collect() ) ) @@ -919,8 +915,10 @@ pub fn build_adt_ctor<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a, 'gcx, 'tcx>, IndexVec::new(), None, local_decls, + IndexVec::new(), sig.inputs().len(), vec![], - span + span, + vec![], ) } diff --git a/src/librustc_mir/transform/add_call_guards.rs b/src/librustc_mir/transform/add_call_guards.rs index 2692790444000..88042d64e96b7 100644 --- a/src/librustc_mir/transform/add_call_guards.rs +++ b/src/librustc_mir/transform/add_call_guards.rs @@ -1,17 +1,7 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::ty::TyCtxt; use rustc::mir::*; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -use transform::{MirPass, MirSource}; +use crate::transform::{MirPass, MirSource}; #[derive(PartialEq)] pub enum AddCallGuards { @@ -43,14 +33,14 @@ pub use self::AddCallGuards::*; impl MirPass for AddCallGuards { fn run_pass<'a, 'tcx>(&self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, - _src: MirSource, + _src: MirSource<'tcx>, mir: &mut Mir<'tcx>) { self.add_call_guards(mir); } } impl AddCallGuards { - pub fn add_call_guards(&self, mir: &mut Mir) { + pub fn add_call_guards(&self, mir: &mut Mir<'_>) { let pred_count: IndexVec<_, _> = mir.predecessors().iter().map(|ps| ps.len()).collect(); diff --git a/src/librustc_mir/transform/add_moves_for_packed_drops.rs b/src/librustc_mir/transform/add_moves_for_packed_drops.rs index 203669c61badd..b6436ec70eef2 100644 --- a/src/librustc_mir/transform/add_moves_for_packed_drops.rs +++ b/src/librustc_mir/transform/add_moves_for_packed_drops.rs @@ -1,20 +1,10 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir::def_id::DefId; use rustc::mir::*; use rustc::ty::TyCtxt; -use transform::{MirPass, MirSource}; -use util::patch::MirPatch; -use util; +use crate::transform::{MirPass, MirSource}; +use crate::util::patch::MirPatch; +use crate::util; // This pass moves values being dropped that are within a packed // struct to a separate local before dropping them, to ensure that @@ -52,11 +42,11 @@ pub struct AddMovesForPackedDrops; impl MirPass for AddMovesForPackedDrops { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - src: MirSource, + src: MirSource<'tcx>, mir: &mut Mir<'tcx>) { debug!("add_moves_for_packed_drops({:?} @ {:?})", src, mir.span); - add_moves_for_packed_drops(tcx, mir, src.def_id); + add_moves_for_packed_drops(tcx, mir, src.def_id()); } } @@ -116,7 +106,7 @@ fn add_move_for_packed_drop<'a, 'tcx>( }; let source_info = terminator.source_info; - let ty = location.ty(mir, tcx).to_ty(tcx); + let ty = location.ty(mir, tcx).ty; let temp = patch.new_temp(ty, terminator.source_info.span); let storage_dead_block = patch.new_block(BasicBlockData { @@ -131,10 +121,10 @@ fn add_move_for_packed_drop<'a, 'tcx>( patch.add_statement( loc, StatementKind::StorageLive(temp)); - patch.add_assign(loc, Place::Local(temp), + patch.add_assign(loc, Place::Base(PlaceBase::Local(temp)), Rvalue::Use(Operand::Move(location.clone()))); patch.patch_terminator(loc.block, TerminatorKind::Drop { - location: Place::Local(temp), + location: Place::Base(PlaceBase::Local(temp)), target: storage_dead_block, unwind }); diff --git a/src/librustc_mir/transform/add_retag.rs b/src/librustc_mir/transform/add_retag.rs index 811b85446cb23..a393847fd4922 100644 --- a/src/librustc_mir/transform/add_retag.rs +++ b/src/librustc_mir/transform/add_retag.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This pass adds validation calls (AcquireValid, ReleaseValid) where appropriate. //! It has to be run really early, before transformations like inlining, because //! introducing these calls *adds* UB -- so, conceptually, this pass is actually part @@ -16,7 +6,7 @@ use rustc::ty::{self, Ty, TyCtxt}; use rustc::mir::*; -use transform::{MirPass, MirSource}; +use crate::transform::{MirPass, MirSource}; pub struct AddRetag; @@ -31,9 +21,8 @@ fn is_stable<'tcx>( match *place { // Locals and statics have stable addresses, for sure - Local { .. } | - Promoted { .. } | - Static { .. } => + Base(PlaceBase::Local { .. }) | + Base(PlaceBase::Static { .. }) => true, // Recurse for projections Projection(ref proj) => { @@ -87,7 +76,7 @@ fn may_have_reference<'a, 'gcx, 'tcx>(ty: Ty<'tcx>, tcx: TyCtxt<'a, 'gcx, 'tcx>) impl MirPass for AddRetag { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - _src: MirSource, + _src: MirSource<'tcx>, mir: &mut Mir<'tcx>) { if !tcx.sess.opts.debugging_opts.mir_emit_retag { @@ -98,7 +87,7 @@ impl MirPass for AddRetag { let needs_retag = |place: &Place<'tcx>| { // FIXME: Instead of giving up for unstable places, we should introduce // a temporary and retag on that. - is_stable(place) && may_have_reference(place.ty(&*local_decls, tcx).to_ty(tcx), tcx) + is_stable(place) && may_have_reference(place.ty(&*local_decls, tcx).ty, tcx) }; // PART 1 @@ -111,14 +100,14 @@ impl MirPass for AddRetag { }; // Gather all arguments, skip return value. let places = local_decls.iter_enumerated().skip(1).take(arg_count) - .map(|(local, _)| Place::Local(local)) + .map(|(local, _)| Place::Base(PlaceBase::Local(local))) .filter(needs_retag) .collect::>(); // Emit their retags. basic_blocks[START_BLOCK].statements.splice(0..0, places.into_iter().map(|place| Statement { source_info, - kind: StatementKind::Retag { fn_entry: true, two_phase: false, place }, + kind: StatementKind::Retag(RetagKind::FnEntry, place), }) ); } @@ -154,7 +143,7 @@ impl MirPass for AddRetag { for (source_info, dest_place, dest_block) in returns { basic_blocks[dest_block].statements.insert(0, Statement { source_info, - kind: StatementKind::Retag { fn_entry: false, two_phase: false, place: dest_place }, + kind: StatementKind::Retag(RetagKind::Default, dest_place), }); } @@ -164,9 +153,9 @@ impl MirPass for AddRetag { // We want to insert statements as we iterate. To this end, we // iterate backwards using indices. for i in (0..block_data.statements.len()).rev() { - match block_data.statements[i].kind { - // If we are casting *from* a reference, we may have to escape-to-raw. - StatementKind::Assign(_, box Rvalue::Cast( + let (retag_kind, place) = match block_data.statements[i].kind { + // If we are casting *from* a reference, we may have to retag-as-raw. + StatementKind::Assign(ref place, box Rvalue::Cast( CastKind::Misc, ref src, dest_ty, @@ -175,42 +164,35 @@ impl MirPass for AddRetag { if src_ty.is_region_ptr() { // The only `Misc` casts on references are those creating raw pointers. assert!(dest_ty.is_unsafe_ptr()); - // Insert escape-to-raw before the cast. We are not concerned - // with stability here: Our EscapeToRaw will not change the value - // that the cast will then use. - // `src` might be a "move", but we rely on this not actually moving - // but just doing a memcpy. It is crucial that we do EscapeToRaw - // on the src because we need it with its original type. - let source_info = block_data.statements[i].source_info; - block_data.statements.insert(i, Statement { - source_info, - kind: StatementKind::EscapeToRaw(src.clone()), - }); + (RetagKind::Raw, place.clone()) + } else { + // Some other cast, no retag + continue } } // Assignments of reference or ptr type are the ones where we may have // to update tags. This includes `x = &[mut] ...` and hence // we also retag after taking a reference! StatementKind::Assign(ref place, box ref rvalue) if needs_retag(place) => { - let two_phase = match rvalue { - Rvalue::Ref(_, borrow_kind, _) => - borrow_kind.allows_two_phase_borrow(), - _ => false + let kind = match rvalue { + Rvalue::Ref(_, borrow_kind, _) + if borrow_kind.allows_two_phase_borrow() + => + RetagKind::TwoPhase, + _ => + RetagKind::Default, }; - // Insert a retag after the assignment. - let source_info = block_data.statements[i].source_info; - block_data.statements.insert(i+1, Statement { - source_info, - kind: StatementKind::Retag { - fn_entry: false, - two_phase, - place: place.clone(), - }, - }); + (kind, place.clone()) } // Do nothing for the rest - _ => {}, + _ => continue, }; + // Insert a retag after the statement. + let source_info = block_data.statements[i].source_info; + block_data.statements.insert(i+1, Statement { + source_info, + kind: StatementKind::Retag(retag_kind, place), + }); } } } diff --git a/src/librustc_mir/transform/check_unsafety.rs b/src/librustc_mir/transform/check_unsafety.rs index 3607869384077..93f3afe1aea6b 100644 --- a/src/librustc_mir/transform/check_unsafety.rs +++ b/src/librustc_mir/transform/check_unsafety.rs @@ -1,19 +1,10 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::indexed_vec::IndexVec; use rustc_data_structures::sync::Lrc; use rustc::ty::query::Providers; use rustc::ty::{self, TyCtxt}; +use rustc::ty::cast::CastTy; use rustc::hir; use rustc::hir::Node; use rustc::hir::def_id::DefId; @@ -21,37 +12,42 @@ use rustc::lint::builtin::{SAFE_EXTERN_STATICS, SAFE_PACKED_BORROWS, UNUSED_UNSA use rustc::mir::*; use rustc::mir::visit::{PlaceContext, Visitor, MutatingUseContext}; -use syntax::ast; use syntax::symbol::Symbol; -use syntax::feature_gate::{emit_feature_err, GateIssue}; use std::ops::Bound; -use util; +use crate::util; pub struct UnsafetyChecker<'a, 'tcx: 'a> { mir: &'a Mir<'tcx>, + const_context: bool, min_const_fn: bool, source_scope_local_data: &'a IndexVec, violations: Vec, source_info: SourceInfo, tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, - /// mark an `unsafe` block as used, so we don't lint it - used_unsafe: FxHashSet, - inherited_blocks: Vec<(ast::NodeId, bool)>, + /// Mark an `unsafe` block as used, so we don't lint it. + used_unsafe: FxHashSet, + inherited_blocks: Vec<(hir::HirId, bool)>, } impl<'a, 'gcx, 'tcx> UnsafetyChecker<'a, 'tcx> { fn new( + const_context: bool, min_const_fn: bool, mir: &'a Mir<'tcx>, source_scope_local_data: &'a IndexVec, tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, ) -> Self { + // sanity check + if min_const_fn { + assert!(const_context); + } Self { mir, + const_context, min_const_fn, source_scope_local_data, violations: vec![], @@ -97,7 +93,7 @@ impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> { if let hir::Unsafety::Unsafe = sig.unsafety() { self.require_unsafe("call to unsafe function", "consult the function's documentation for information on how to avoid \ - undefined behavior", UnsafetyViolationKind::GatedConstFnCall) + undefined behavior", UnsafetyViolationKind::GeneralAndConstFn) } } } @@ -117,7 +113,6 @@ impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> { StatementKind::StorageLive(..) | StatementKind::StorageDead(..) | StatementKind::Retag { .. } | - StatementKind::EscapeToRaw { .. } | StatementKind::AscribeUserType(..) | StatementKind::Nop => { // safe (at least as emitted during MIR construction) @@ -136,29 +131,70 @@ impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> { rvalue: &Rvalue<'tcx>, location: Location) { - if let &Rvalue::Aggregate(box ref aggregate, _) = rvalue { - match aggregate { - &AggregateKind::Array(..) | - &AggregateKind::Tuple => {} - &AggregateKind::Adt(ref def, ..) => { - match self.tcx.layout_scalar_valid_range(def.did) { - (Bound::Unbounded, Bound::Unbounded) => {}, - _ => self.require_unsafe( - "initializing type with `rustc_layout_scalar_valid_range` attr", - "initializing a layout restricted type's field with a value outside \ - the valid range is undefined behavior", - UnsafetyViolationKind::GeneralAndConstFn, - ), + match rvalue { + Rvalue::Aggregate(box ref aggregate, _) => { + match aggregate { + &AggregateKind::Array(..) | + &AggregateKind::Tuple => {} + &AggregateKind::Adt(ref def, ..) => { + match self.tcx.layout_scalar_valid_range(def.did) { + (Bound::Unbounded, Bound::Unbounded) => {}, + _ => self.require_unsafe( + "initializing type with `rustc_layout_scalar_valid_range` attr", + "initializing a layout restricted type's field with a value \ + outside the valid range is undefined behavior", + UnsafetyViolationKind::GeneralAndConstFn, + ), + } + } + &AggregateKind::Closure(def_id, _) | + &AggregateKind::Generator(def_id, _, _) => { + let UnsafetyCheckResult { + violations, unsafe_blocks + } = self.tcx.unsafety_check_result(def_id); + self.register_violations(&violations, &unsafe_blocks); } } - &AggregateKind::Closure(def_id, _) | - &AggregateKind::Generator(def_id, _, _) => { - let UnsafetyCheckResult { - violations, unsafe_blocks - } = self.tcx.unsafety_check_result(def_id); - self.register_violations(&violations, &unsafe_blocks); + }, + // casting pointers to ints is unsafe in const fn because the const evaluator cannot + // possibly know what the result of various operations like `address / 2` would be + // pointers during const evaluation have no integral address, only an abstract one + Rvalue::Cast(CastKind::Misc, ref operand, cast_ty) + if self.const_context && self.tcx.features().const_raw_ptr_to_usize_cast => { + let operand_ty = operand.ty(self.mir, self.tcx); + let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast"); + let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); + match (cast_in, cast_out) { + (CastTy::Ptr(_), CastTy::Int(_)) | + (CastTy::FnPtr, CastTy::Int(_)) => { + self.register_violations(&[UnsafetyViolation { + source_info: self.source_info, + description: Symbol::intern("cast of pointer to int").as_interned_str(), + details: Symbol::intern("casting pointers to integers in constants") + .as_interned_str(), + kind: UnsafetyViolationKind::General, + }], &[]); + }, + _ => {}, + } + } + // raw pointer and fn pointer operations are unsafe as it is not clear whether one + // pointer would be "less" or "equal" to another, because we cannot know where llvm + // or the linker will place various statics in memory. Without this information the + // result of a comparison of addresses would differ between runtime and compile-time. + Rvalue::BinaryOp(_, ref lhs, _) + if self.const_context && self.tcx.features().const_compare_raw_pointers => { + if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(self.mir, self.tcx).sty { + self.register_violations(&[UnsafetyViolation { + source_info: self.source_info, + description: Symbol::intern("pointer operation").as_interned_str(), + details: Symbol::intern("operations on pointers in constants") + .as_interned_str(), + kind: UnsafetyViolationKind::General, + }], &[]); } } + _ => {}, } self.super_rvalue(rvalue, location); } @@ -191,7 +227,7 @@ impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> { } let is_borrow_of_interior_mut = context.is_borrow() && !base .ty(self.mir, self.tcx) - .to_ty(self.tcx) + .ty .is_freeze(self.tcx, self.param_env, self.source_info.span); // prevent // * `&mut x.field` @@ -205,7 +241,7 @@ impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> { ); } let old_source_info = self.source_info; - if let &Place::Local(local) = base { + if let &Place::Base(PlaceBase::Local(local)) = base { if self.mir.local_decls[local].internal { // Internal locals are used in the `move_val_init` desugaring. // We want to check unsafety against the source info of the @@ -213,7 +249,7 @@ impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> { self.source_info = self.mir.local_decls[local].source_info; } } - let base_ty = base.ty(self.mir, self.tcx).to_ty(self.tcx); + let base_ty = base.ty(self.mir, self.tcx).ty; match base_ty.sty { ty::RawPtr(..) => { self.require_unsafe("dereference of raw pointer", @@ -236,8 +272,11 @@ impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> { "non-field projection {:?} from union?", place) }; - if elem_ty.moves_by_default(self.tcx, self.param_env, - self.source_info.span) { + if !elem_ty.is_copy_modulo_regions( + self.tcx, + self.param_env, + self.source_info.span, + ) { self.require_unsafe( "assignment to non-`Copy` union field", "the previous content of the field will be dropped, which \ @@ -258,13 +297,15 @@ impl<'a, 'tcx> Visitor<'tcx> for UnsafetyChecker<'a, 'tcx> { } self.source_info = old_source_info; } - &Place::Local(..) => { + &Place::Base(PlaceBase::Local(..)) => { // locals are safe } - &Place::Promoted(_) => { + &Place::Base(PlaceBase::Static(box Static { kind: StaticKind::Promoted(_), .. })) => { bug!("unsafety checking should happen before promotion") } - &Place::Static(box Static { def_id, ty: _ }) => { + &Place::Base( + PlaceBase::Static(box Static { kind: StaticKind::Static(def_id), .. }) + ) => { if self.tcx.is_static(def_id) == Some(hir::Mutability::MutMutable) { self.require_unsafe("use of mutable static", "mutable statics can be mutated by multiple threads: aliasing violations \ @@ -309,7 +350,7 @@ impl<'a, 'tcx> UnsafetyChecker<'a, 'tcx> { fn register_violations(&mut self, violations: &[UnsafetyViolation], - unsafe_blocks: &[(ast::NodeId, bool)]) { + unsafe_blocks: &[(hir::HirId, bool)]) { let safety = self.source_scope_local_data[self.source_info.scope].safety; let within_unsafe = match safety { // `unsafe` blocks are required in safe code @@ -326,11 +367,6 @@ impl<'a, 'tcx> UnsafetyChecker<'a, 'tcx> { // compat lint violation.kind = UnsafetyViolationKind::General; }, - UnsafetyViolationKind::GatedConstFnCall => { - // safe code can't call unsafe const fns, this `UnsafetyViolationKind` - // is only relevant for `Safety::ExplicitUnsafe` in `unsafe const fn`s - violation.kind = UnsafetyViolationKind::General; - } } if !self.violations.contains(&violation) { self.violations.push(violation) @@ -340,26 +376,15 @@ impl<'a, 'tcx> UnsafetyChecker<'a, 'tcx> { } // `unsafe` function bodies allow unsafe without additional unsafe blocks Safety::BuiltinUnsafe | Safety::FnUnsafe => true, - Safety::ExplicitUnsafe(node_id) => { + Safety::ExplicitUnsafe(hir_id) => { // mark unsafe block as used if there are any unsafe operations inside if !violations.is_empty() { - self.used_unsafe.insert(node_id); + self.used_unsafe.insert(hir_id); } // only some unsafety is allowed in const fn if self.min_const_fn { - let min_const_unsafe_fn = self.tcx.features().min_const_unsafe_fn; for violation in violations { match violation.kind { - UnsafetyViolationKind::GatedConstFnCall if min_const_unsafe_fn => { - // these function calls to unsafe functions are allowed - // if `#![feature(min_const_unsafe_fn)]` is active - }, - UnsafetyViolationKind::GatedConstFnCall => { - // without the feature gate, we report errors - if !self.violations.contains(&violation) { - self.violations.push(violation.clone()) - } - } // these unsafe things are stable in const fn UnsafetyViolationKind::GeneralAndConstFn => {}, // these things are forbidden in const fns @@ -381,8 +406,8 @@ impl<'a, 'tcx> UnsafetyChecker<'a, 'tcx> { true } }; - self.inherited_blocks.extend(unsafe_blocks.iter().map(|&(node_id, is_used)| { - (node_id, is_used && !within_unsafe) + self.inherited_blocks.extend(unsafe_blocks.iter().map(|&(hir_id, is_used)| { + (hir_id, is_used && !within_unsafe) })); } fn check_mut_borrowing_layout_constrained_field( @@ -395,7 +420,7 @@ impl<'a, 'tcx> UnsafetyChecker<'a, 'tcx> { }) = place { match *elem { ProjectionElem::Field(..) => { - let ty = base.ty(&self.mir.local_decls, self.tcx).to_ty(self.tcx); + let ty = base.ty(&self.mir.local_decls, self.tcx).ty; match ty.sty { ty::Adt(def, _) => match self.tcx.layout_scalar_valid_range(def.did) { (Bound::Unbounded, Bound::Unbounded) => {}, @@ -434,7 +459,7 @@ impl<'a, 'tcx> UnsafetyChecker<'a, 'tcx> { } } -pub(crate) fn provide(providers: &mut Providers) { +pub(crate) fn provide(providers: &mut Providers<'_>) { *providers = Providers { unsafety_check_result, unsafe_derive_on_repr_packed, @@ -443,8 +468,8 @@ pub(crate) fn provide(providers: &mut Providers) { } struct UnusedUnsafeVisitor<'a> { - used_unsafe: &'a FxHashSet, - unsafe_blocks: &'a mut Vec<(ast::NodeId, bool)>, + used_unsafe: &'a FxHashSet, + unsafe_blocks: &'a mut Vec<(hir::HirId, bool)>, } impl<'a, 'tcx> hir::intravisit::Visitor<'tcx> for UnusedUnsafeVisitor<'a> { @@ -458,19 +483,19 @@ impl<'a, 'tcx> hir::intravisit::Visitor<'tcx> for UnusedUnsafeVisitor<'a> { hir::intravisit::walk_block(self, block); if let hir::UnsafeBlock(hir::UserProvided) = block.rules { - self.unsafe_blocks.push((block.id, self.used_unsafe.contains(&block.id))); + self.unsafe_blocks.push((block.hir_id, self.used_unsafe.contains(&block.hir_id))); } } } fn check_unused_unsafe<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, - used_unsafe: &FxHashSet, - unsafe_blocks: &'a mut Vec<(ast::NodeId, bool)>) + used_unsafe: &FxHashSet, + unsafe_blocks: &'a mut Vec<(hir::HirId, bool)>) { let body_id = - tcx.hir().as_local_node_id(def_id).and_then(|node_id| { - tcx.hir().maybe_body_owned_by(node_id) + tcx.hir().as_local_hir_id(def_id).and_then(|hir_id| { + tcx.hir().maybe_body_owned_by_by_hir_id(hir_id) }); let body_id = match body_id { @@ -509,8 +534,16 @@ fn unsafety_check_result<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) }; let param_env = tcx.param_env(def_id); + + let id = tcx.hir().as_local_hir_id(def_id).unwrap(); + let (const_context, min_const_fn) = match tcx.hir().body_owner_kind_by_hir_id(id) { + hir::BodyOwnerKind::Closure => (false, false), + hir::BodyOwnerKind::Fn => (tcx.is_const_fn(def_id), tcx.is_min_const_fn(def_id)), + hir::BodyOwnerKind::Const | + hir::BodyOwnerKind::Static(_) => (true, false), + }; let mut checker = UnsafetyChecker::new( - tcx.is_min_const_fn(def_id), + const_context, min_const_fn, mir, source_scope_local_data, tcx, param_env); checker.visit_mir(mir); @@ -522,38 +555,37 @@ fn unsafety_check_result<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) } fn unsafe_derive_on_repr_packed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) { - let lint_node_id = match tcx.hir().as_local_node_id(def_id) { - Some(node_id) => node_id, - None => bug!("checking unsafety for non-local def id {:?}", def_id) - }; + let lint_hir_id = tcx.hir().as_local_hir_id(def_id).unwrap_or_else(|| + bug!("checking unsafety for non-local def id {:?}", def_id)); // FIXME: when we make this a hard error, this should have its // own error code. - let message = if tcx.generics_of(def_id).own_counts().types != 0 { + let counts = tcx.generics_of(def_id).own_counts(); + let message = if counts.types + counts.consts != 0 { "#[derive] can't be used on a #[repr(packed)] struct with \ - type parameters (error E0133)".to_string() + type or const parameters (error E0133)".to_string() } else { "#[derive] can't be used on a #[repr(packed)] struct that \ does not derive Copy (error E0133)".to_string() }; - tcx.lint_node(SAFE_PACKED_BORROWS, - lint_node_id, - tcx.def_span(def_id), - &message); + tcx.lint_hir(SAFE_PACKED_BORROWS, + lint_hir_id, + tcx.def_span(def_id), + &message); } -/// Return the NodeId for an enclosing scope that is also `unsafe` -fn is_enclosed(tcx: TyCtxt, - used_unsafe: &FxHashSet, - id: ast::NodeId) -> Option<(String, ast::NodeId)> { - let parent_id = tcx.hir().get_parent_node(id); +/// Returns the `HirId` for an enclosing scope that is also `unsafe`. +fn is_enclosed(tcx: TyCtxt<'_, '_, '_>, + used_unsafe: &FxHashSet, + id: hir::HirId) -> Option<(String, hir::HirId)> { + let parent_id = tcx.hir().get_parent_node_by_hir_id(id); if parent_id != id { if used_unsafe.contains(&parent_id) { Some(("block".to_string(), parent_id)) } else if let Some(Node::Item(&hir::Item { node: hir::ItemKind::Fn(_, header, _, _), .. - })) = tcx.hir().find(parent_id) { + })) = tcx.hir().find_by_hir_id(parent_id) { match header.unsafety { hir::Unsafety::Unsafe => Some(("fn".to_string(), parent_id)), hir::Unsafety::Normal => None, @@ -566,13 +598,15 @@ fn is_enclosed(tcx: TyCtxt, } } -fn report_unused_unsafe(tcx: TyCtxt, used_unsafe: &FxHashSet, id: ast::NodeId) { - let span = tcx.sess.source_map().def_span(tcx.hir().span(id)); +fn report_unused_unsafe(tcx: TyCtxt<'_, '_, '_>, + used_unsafe: &FxHashSet, + id: hir::HirId) { + let span = tcx.sess.source_map().def_span(tcx.hir().span_by_hir_id(id)); let msg = "unnecessary `unsafe` block"; - let mut db = tcx.struct_span_lint_node(UNUSED_UNSAFE, id, span, msg); + let mut db = tcx.struct_span_lint_hir(UNUSED_UNSAFE, id, span, msg); db.span_label(span, msg); if let Some((kind, id)) = is_enclosed(tcx, used_unsafe, id) { - db.span_label(tcx.sess.source_map().def_span(tcx.hir().span(id)), + db.span_label(tcx.sess.source_map().def_span(tcx.hir().span_by_hir_id(id)), format!("because it's nested under this `unsafe` {}", kind)); } db.emit(); @@ -621,30 +655,20 @@ pub fn check_unsafety<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) { .note(&details.as_str()[..]) .emit(); } - UnsafetyViolationKind::GatedConstFnCall => { - emit_feature_err( - &tcx.sess.parse_sess, - "min_const_unsafe_fn", - source_info.span, - GateIssue::Language, - "calls to `const unsafe fn` in const fns are unstable", - ); - - } - UnsafetyViolationKind::ExternStatic(lint_node_id) => { + UnsafetyViolationKind::ExternStatic(lint_hir_id) => { tcx.lint_node_note(SAFE_EXTERN_STATICS, - lint_node_id, + lint_hir_id, source_info.span, &format!("{} is unsafe and requires unsafe function or block \ (error E0133)", &description.as_str()[..]), &details.as_str()[..]); } - UnsafetyViolationKind::BorrowPacked(lint_node_id) => { + UnsafetyViolationKind::BorrowPacked(lint_hir_id) => { if let Some(impl_def_id) = builtin_derive_def_id(tcx, def_id) { tcx.unsafe_derive_on_repr_packed(impl_def_id); } else { tcx.lint_node_note(SAFE_PACKED_BORROWS, - lint_node_id, + lint_hir_id, source_info.span, &format!("{} is unsafe and requires unsafe function or block \ (error E0133)", &description.as_str()[..]), @@ -655,7 +679,7 @@ pub fn check_unsafety<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) { } let mut unsafe_blocks: Vec<_> = unsafe_blocks.into_iter().collect(); - unsafe_blocks.sort(); + unsafe_blocks.sort_by_cached_key(|(hir_id, _)| tcx.hir().hir_to_node_id(*hir_id)); let used_unsafe: FxHashSet<_> = unsafe_blocks.iter() .flat_map(|&&(id, used)| if used { Some(id) } else { None }) .collect(); diff --git a/src/librustc_mir/transform/cleanup_post_borrowck.rs b/src/librustc_mir/transform/cleanup_post_borrowck.rs index 6d7fc404edbb1..349b27523a0a1 100644 --- a/src/librustc_mir/transform/cleanup_post_borrowck.rs +++ b/src/librustc_mir/transform/cleanup_post_borrowck.rs @@ -1,19 +1,9 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This module provides two passes: +//! This module provides a pass to replacing the following statements with +//! [`Nop`]s //! -//! - [`CleanAscribeUserType`], that replaces all [`AscribeUserType`] -//! statements with [`Nop`]. -//! - [`CleanFakeReadsAndBorrows`], that replaces all [`FakeRead`] statements -//! and borrows that are read by [`ForMatchGuard`] fake reads with [`Nop`]. +//! - [`AscribeUserType`] +//! - [`FakeRead`] +//! - [`Assign`] statements with a [`Shallow`] borrow //! //! The `CleanFakeReadsAndBorrows` "pass" is actually implemented as two //! traversals (aka visits) of the input MIR. The first traversal, @@ -21,102 +11,41 @@ //! temporaries read by [`ForMatchGuard`] reads, and [`DeleteFakeBorrows`] //! deletes the initialization of those temporaries. //! -//! [`CleanAscribeUserType`]: cleanup_post_borrowck::CleanAscribeUserType -//! [`CleanFakeReadsAndBorrows`]: cleanup_post_borrowck::CleanFakeReadsAndBorrows -//! [`DeleteAndRecordFakeReads`]: cleanup_post_borrowck::DeleteAndRecordFakeReads -//! [`DeleteFakeBorrows`]: cleanup_post_borrowck::DeleteFakeBorrows //! [`AscribeUserType`]: rustc::mir::StatementKind::AscribeUserType -//! [`Nop`]: rustc::mir::StatementKind::Nop +//! [`Shallow`]: rustc::mir::BorrowKind::Shallow //! [`FakeRead`]: rustc::mir::StatementKind::FakeRead -//! [`ForMatchGuard`]: rustc::mir::FakeReadCause::ForMatchGuard - -use rustc_data_structures::fx::FxHashSet; +//! [`Nop`]: rustc::mir::StatementKind::Nop -use rustc::mir::{BasicBlock, FakeReadCause, Local, Location, Mir, Place}; +use rustc::mir::{BasicBlock, BorrowKind, Rvalue, Location, Mir}; use rustc::mir::{Statement, StatementKind}; use rustc::mir::visit::MutVisitor; use rustc::ty::TyCtxt; -use transform::{MirPass, MirSource}; +use crate::transform::{MirPass, MirSource}; -pub struct CleanAscribeUserType; +pub struct CleanupNonCodegenStatements; -pub struct DeleteAscribeUserType; +pub struct DeleteNonCodegenStatements; -impl MirPass for CleanAscribeUserType { +impl MirPass for CleanupNonCodegenStatements { fn run_pass<'a, 'tcx>(&self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, - _source: MirSource, + _source: MirSource<'tcx>, mir: &mut Mir<'tcx>) { - let mut delete = DeleteAscribeUserType; + let mut delete = DeleteNonCodegenStatements; delete.visit_mir(mir); } } -impl<'tcx> MutVisitor<'tcx> for DeleteAscribeUserType { - fn visit_statement(&mut self, - block: BasicBlock, - statement: &mut Statement<'tcx>, - location: Location) { - if let StatementKind::AscribeUserType(..) = statement.kind { - statement.make_nop(); - } - self.super_statement(block, statement, location); - } -} - -pub struct CleanFakeReadsAndBorrows; - -#[derive(Default)] -pub struct DeleteAndRecordFakeReads { - fake_borrow_temporaries: FxHashSet, -} - -pub struct DeleteFakeBorrows { - fake_borrow_temporaries: FxHashSet, -} - -// Removes any FakeReads from the MIR -impl MirPass for CleanFakeReadsAndBorrows { - fn run_pass<'a, 'tcx>(&self, - _tcx: TyCtxt<'a, 'tcx, 'tcx>, - _source: MirSource, - mir: &mut Mir<'tcx>) { - let mut delete_reads = DeleteAndRecordFakeReads::default(); - delete_reads.visit_mir(mir); - let mut delete_borrows = DeleteFakeBorrows { - fake_borrow_temporaries: delete_reads.fake_borrow_temporaries, - }; - delete_borrows.visit_mir(mir); - } -} - -impl<'tcx> MutVisitor<'tcx> for DeleteAndRecordFakeReads { - fn visit_statement(&mut self, - block: BasicBlock, - statement: &mut Statement<'tcx>, - location: Location) { - if let StatementKind::FakeRead(cause, ref place) = statement.kind { - if let FakeReadCause::ForMatchGuard = cause { - match *place { - Place::Local(local) => self.fake_borrow_temporaries.insert(local), - _ => bug!("Fake match guard read of non-local: {:?}", place), - }; - } - statement.make_nop(); - } - self.super_statement(block, statement, location); - } -} - -impl<'tcx> MutVisitor<'tcx> for DeleteFakeBorrows { +impl<'tcx> MutVisitor<'tcx> for DeleteNonCodegenStatements { fn visit_statement(&mut self, block: BasicBlock, statement: &mut Statement<'tcx>, location: Location) { - if let StatementKind::Assign(Place::Local(local), _) = statement.kind { - if self.fake_borrow_temporaries.contains(&local) { - statement.make_nop(); - } + match statement.kind { + StatementKind::AscribeUserType(..) + | StatementKind::Assign(_, box Rvalue::Ref(_, BorrowKind::Shallow, _)) + | StatementKind::FakeRead(..) => statement.make_nop(), + _ => (), } self.super_statement(block, statement, location); } diff --git a/src/librustc_mir/transform/const_prop.rs b/src/librustc_mir/transform/const_prop.rs index acae03f7f94f5..e0ff71cbe52f8 100644 --- a/src/librustc_mir/transform/const_prop.rs +++ b/src/librustc_mir/transform/const_prop.rs @@ -1,26 +1,16 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Propagates constants for early reporting of statically known //! assertion failures use rustc::hir::def::Def; -use rustc::mir::{Constant, Location, Place, Mir, Operand, Rvalue, Local}; -use rustc::mir::{NullOp, UnOp, StatementKind, Statement, BasicBlock, LocalKind}; +use rustc::mir::{Constant, Location, Place, PlaceBase, Mir, Operand, Rvalue, Local}; +use rustc::mir::{NullOp, UnOp, StatementKind, Statement, BasicBlock, LocalKind, Static, StaticKind}; use rustc::mir::{TerminatorKind, ClearCrossCrate, SourceInfo, BinOp, ProjectionElem}; use rustc::mir::visit::{Visitor, PlaceContext, MutatingUseContext, NonMutatingUseContext}; -use rustc::mir::interpret::{EvalErrorKind, Scalar, GlobalId, EvalResult}; +use rustc::mir::interpret::{InterpError, Scalar, GlobalId, EvalResult}; use rustc::ty::{TyCtxt, self, Instance}; use syntax::source_map::{Span, DUMMY_SP}; -use rustc::ty::subst::Substs; +use rustc::ty::subst::InternalSubsts; use rustc_data_structures::indexed_vec::IndexVec; use rustc::ty::ParamEnv; use rustc::ty::layout::{ @@ -28,18 +18,18 @@ use rustc::ty::layout::{ HasTyCtxt, TargetDataLayout, HasDataLayout, }; -use interpret::{self, EvalContext, ScalarMaybeUndef, Immediate, OpTy, MemoryKind}; -use const_eval::{ - CompileTimeInterpreter, const_to_op, error_to_const_error, eval_promoted, mk_borrowck_eval_cx +use crate::interpret::{InterpretCx, ScalarMaybeUndef, Immediate, OpTy, ImmTy, MemoryKind}; +use crate::const_eval::{ + CompileTimeInterpreter, error_to_const_error, eval_promoted, mk_eval_cx, }; -use transform::{MirPass, MirSource}; +use crate::transform::{MirPass, MirSource}; pub struct ConstProp; impl MirPass for ConstProp { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - source: MirSource, + source: MirSource<'tcx>, mir: &mut Mir<'tcx>) { // will be evaluated by miri and produce its errors there if source.promoted.is_some() { @@ -47,11 +37,11 @@ impl MirPass for ConstProp { } use rustc::hir::map::blocks::FnLikeNode; - let node_id = tcx.hir().as_local_node_id(source.def_id) - .expect("Non-local call to local provider is_const_fn"); + let hir_id = tcx.hir().as_local_hir_id(source.def_id()) + .expect("Non-local call to local provider is_const_fn"); - let is_fn_like = FnLikeNode::from_node(tcx.hir().get(node_id)).is_some(); - let is_assoc_const = match tcx.describe_def(source.def_id) { + let is_fn_like = FnLikeNode::from_node(tcx.hir().get_by_hir_id(hir_id)).is_some(); + let is_assoc_const = match tcx.describe_def(source.def_id()) { Some(Def::AssociatedConst(_)) => true, _ => false, }; @@ -59,11 +49,11 @@ impl MirPass for ConstProp { // Only run const prop on functions, methods, closures and associated constants if !is_fn_like && !is_assoc_const { // skip anon_const/statics/consts because they'll be evaluated by miri anyway - trace!("ConstProp skipped for {:?}", source.def_id); + trace!("ConstProp skipped for {:?}", source.def_id()); return } - trace!("ConstProp starting for {:?}", source.def_id); + trace!("ConstProp starting for {:?}", source.def_id()); // FIXME(oli-obk, eddyb) Optimize locals (or even local paths) to hold // constants, instead of just checking for const-folding succeeding. @@ -72,7 +62,7 @@ impl MirPass for ConstProp { let mut optimization_finder = ConstPropagator::new(mir, tcx, source); optimization_finder.visit_mir(mir); - trace!("ConstProp done for {:?}", source.def_id); + trace!("ConstProp done for {:?}", source.def_id()); } } @@ -80,10 +70,10 @@ type Const<'tcx> = (OpTy<'tcx>, Span); /// Finds optimization opportunities on the MIR. struct ConstPropagator<'a, 'mir, 'tcx:'a+'mir> { - ecx: EvalContext<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>, + ecx: InterpretCx<'a, 'mir, 'tcx, CompileTimeInterpreter<'a, 'mir, 'tcx>>, mir: &'mir Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>, - source: MirSource, + source: MirSource<'tcx>, places: IndexVec>>, can_const_prop: IndexVec, param_env: ParamEnv<'tcx>, @@ -116,12 +106,10 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { fn new( mir: &'mir Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>, - source: MirSource, + source: MirSource<'tcx>, ) -> ConstPropagator<'a, 'mir, 'tcx> { - let param_env = tcx.param_env(source.def_id); - let substs = Substs::identity_for_item(tcx, source.def_id); - let instance = Instance::new(source.def_id, substs); - let ecx = mk_borrowck_eval_cx(tcx, instance, mir, DUMMY_SP).unwrap(); + let param_env = tcx.param_env(source.def_id()); + let ecx = mk_eval_cx(tcx, tcx.def_span(source.def_id()), param_env); ConstPropagator { ecx, mir, @@ -156,7 +144,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { Ok(val) => Some(val), Err(error) => { let diagnostic = error_to_const_error(&self.ecx, error); - use rustc::mir::interpret::EvalErrorKind::*; + use rustc::mir::interpret::InterpError::*; match diagnostic.error { // don't report these, they make no sense in a const prop context | MachineError(_) @@ -249,6 +237,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { self.ecx.tcx, "this expression will panic at runtime", lint_root, + None, ); } } @@ -265,7 +254,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { source_info: SourceInfo, ) -> Option> { self.ecx.tcx.span = source_info.span; - match const_to_op(&self.ecx, c.literal) { + match self.ecx.eval_const_to_op(*c.literal, None) { Ok(op) => { Some((op, c.span)) }, @@ -279,7 +268,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { fn eval_place(&mut self, place: &Place<'tcx>, source_info: SourceInfo) -> Option> { match *place { - Place::Local(loc) => self.places[loc].clone(), + Place::Base(PlaceBase::Local(loc)) => self.places[loc].clone(), Place::Projection(ref proj) => match proj.elem { ProjectionElem::Field(field, _) => { trace!("field proj on {:?}", proj.base); @@ -294,17 +283,19 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { // an `Index` projection would throw us off-track. _ => None, }, - Place::Promoted(ref promoted) => { - let generics = self.tcx.generics_of(self.source.def_id); + Place::Base( + PlaceBase::Static(box Static {kind: StaticKind::Promoted(promoted), ..}) + ) => { + let generics = self.tcx.generics_of(self.source.def_id()); if generics.requires_monomorphization(self.tcx) { // FIXME: can't handle code with generics return None; } - let substs = Substs::identity_for_item(self.tcx, self.source.def_id); - let instance = Instance::new(self.source.def_id, substs); + let substs = InternalSubsts::identity_for_item(self.tcx, self.source.def_id()); + let instance = Instance::new(self.source.def_id(), substs); let cid = GlobalId { instance, - promoted: Some(promoted.0), + promoted: Some(promoted), }; // cannot use `const_eval` here, because that would require having the MIR // for the current function available, but we're producing said MIR right now @@ -346,7 +337,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { Rvalue::Cast(kind, ref operand, _) => { let (op, span) = self.eval_operand(operand, source_info)?; self.use_ecx(source_info, |this| { - let dest = this.ecx.allocate(place_layout, MemoryKind::Stack)?; + let dest = this.ecx.allocate(place_layout, MemoryKind::Stack); this.ecx.cast(op, kind, dest.into())?; Ok((dest.into(), span)) }) @@ -356,23 +347,23 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { Rvalue::Len(_) => None, Rvalue::NullaryOp(NullOp::SizeOf, ty) => { type_size_of(self.tcx, self.param_env, ty).and_then(|n| Some(( - OpTy { - op: interpret::Operand::Immediate(Immediate::Scalar( + ImmTy { + imm: Immediate::Scalar( Scalar::Bits { bits: n as u128, size: self.tcx.data_layout.pointer_size.bytes() as u8, }.into() - )), + ), layout: self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).ok()?, - }, + }.into(), span, ))) } Rvalue::UnaryOp(op, ref arg) => { - let def_id = if self.tcx.is_closure(self.source.def_id) { - self.tcx.closure_base_def_id(self.source.def_id) + let def_id = if self.tcx.is_closure(self.source.def_id()) { + self.tcx.closure_base_def_id(self.source.def_id()) } else { - self.source.def_id + self.source.def_id() }; let generics = self.tcx.generics_of(def_id); if generics.requires_monomorphization(self.tcx) { @@ -382,13 +373,12 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { let (arg, _) = self.eval_operand(arg, source_info)?; let val = self.use_ecx(source_info, |this| { - let prim = this.ecx.read_scalar(arg)?.not_undef()?; + let prim = this.ecx.read_immediate(arg)?; match op { UnOp::Neg => { // Need to do overflow check here: For actual CTFE, MIR // generation emits code that does this before calling the op. - let size = arg.layout.size; - if prim.to_bits(size)? == (1 << (size.bits() - 1)) { + if prim.to_bits()? == (1 << (prim.layout.size.bits() - 1)) { return err!(OverflowNeg); } } @@ -397,22 +387,22 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { } } // Now run the actual operation. - this.ecx.unary_op(op, prim, arg.layout) + this.ecx.unary_op(op, prim) })?; - let res = OpTy { - op: interpret::Operand::Immediate(Immediate::Scalar(val.into())), + let res = ImmTy { + imm: Immediate::Scalar(val.into()), layout: place_layout, }; - Some((res, span)) + Some((res.into(), span)) } Rvalue::CheckedBinaryOp(op, ref left, ref right) | Rvalue::BinaryOp(op, ref left, ref right) => { trace!("rvalue binop {:?} for {:?} and {:?}", op, left, right); let right = self.eval_operand(right, source_info)?; - let def_id = if self.tcx.is_closure(self.source.def_id) { - self.tcx.closure_base_def_id(self.source.def_id) + let def_id = if self.tcx.is_closure(self.source.def_id()) { + self.tcx.closure_base_def_id(self.source.def_id()) } else { - self.source.def_id + self.source.def_id() }; let generics = self.tcx.generics_of(def_id); if generics.requires_monomorphization(self.tcx) { @@ -443,10 +433,10 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { } else { "left" }; - let node_id = source_scope_local_data[source_info.scope].lint_root; - self.tcx.lint_node( + let hir_id = source_scope_local_data[source_info.scope].lint_root; + self.tcx.lint_hir( ::rustc::lint::builtin::EXCEEDING_BITSHIFTS, - node_id, + hir_id, span, &format!("attempt to shift {} with overflow", dir)); return None; @@ -458,7 +448,7 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { })?; trace!("const evaluating {:?} for {:?} and {:?}", op, left, right); let (val, overflow) = self.use_ecx(source_info, |this| { - this.ecx.binary_op_imm(op, l, r) + this.ecx.binary_op(op, l, r) })?; let val = if let Rvalue::CheckedBinaryOp(..) = *rvalue { Immediate::ScalarPair( @@ -467,17 +457,17 @@ impl<'a, 'mir, 'tcx> ConstPropagator<'a, 'mir, 'tcx> { ) } else { if overflow { - let err = EvalErrorKind::Overflow(op).into(); + let err = InterpError::Overflow(op).into(); let _: Option<()> = self.use_ecx(source_info, |_| Err(err)); return None; } Immediate::Scalar(val.into()) }; - let res = OpTy { - op: interpret::Operand::Immediate(val), + let res = ImmTy { + imm: val, layout: place_layout, }; - Some((res, span)) + Some((res.into(), span)) }, } } @@ -497,7 +487,7 @@ struct CanConstProp { impl CanConstProp { /// returns true if `local` can be propagated - fn check(mir: &Mir) -> IndexVec { + fn check(mir: &Mir<'_>) -> IndexVec { let mut cpv = CanConstProp { can_const_prop: IndexVec::from_elem(true, &mir.local_decls), found_assignment: IndexVec::from_elem(false, &mir.local_decls), @@ -566,10 +556,10 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { if let StatementKind::Assign(ref place, ref rval) = statement.kind { let place_ty: ty::Ty<'tcx> = place .ty(&self.mir.local_decls, self.tcx) - .to_ty(self.tcx); + .ty; if let Ok(place_layout) = self.tcx.layout_of(self.param_env.and(place_ty)) { if let Some(value) = self.const_prop(rval, place_layout, statement.source_info) { - if let Place::Local(local) = *place { + if let Place::Base(PlaceBase::Local(local)) = *place { trace!("checking whether {:?} can be stored to {:?}", value, local); if self.can_const_prop[local] { trace!("storing {:?} to {:?}", value, local); @@ -604,7 +594,7 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { while let Place::Projection(ref proj) = *place { place = &proj.base; } - if let Place::Local(local) = *place { + if let Place::Base(PlaceBase::Local(local)) = *place { self.places[local] = None; } }, @@ -616,12 +606,12 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { .unwrap() .source_info .span; - let node_id = self + let hir_id = self .tcx .hir() - .as_local_node_id(self.source.def_id) + .as_local_hir_id(self.source.def_id()) .expect("some part of a failing const eval must be local"); - use rustc::mir::interpret::EvalErrorKind::*; + use rustc::mir::interpret::InterpError::*; let msg = match msg { Overflow(_) | OverflowNeg | @@ -656,9 +646,9 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for ConstPropagator<'b, 'a, 'tcx> { // Need proper const propagator for these _ => return, }; - self.tcx.lint_node( + self.tcx.lint_hir( ::rustc::lint::builtin::CONST_ERR, - node_id, + hir_id, span, &msg, ); diff --git a/src/librustc_mir/transform/copy_prop.rs b/src/librustc_mir/transform/copy_prop.rs index 6d0b25b2c6940..817a2f31c0736 100644 --- a/src/librustc_mir/transform/copy_prop.rs +++ b/src/librustc_mir/transform/copy_prop.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Trivial copy propagation pass. //! //! This uses def-use analysis to remove values that have exactly one def and one use, which must @@ -29,18 +19,20 @@ //! (non-mutating) use of `SRC`. These restrictions are conservative and may be relaxed in the //! future. -use rustc::mir::{Constant, Local, LocalKind, Location, Place, Mir, Operand, Rvalue, StatementKind}; +use rustc::mir::{ + Constant, Local, LocalKind, Location, Place, PlaceBase, Mir, Operand, Rvalue, StatementKind +}; use rustc::mir::visit::MutVisitor; use rustc::ty::TyCtxt; -use transform::{MirPass, MirSource}; -use util::def_use::DefUseAnalysis; +use crate::transform::{MirPass, MirSource}; +use crate::util::def_use::DefUseAnalysis; pub struct CopyPropagation; impl MirPass for CopyPropagation { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - _source: MirSource, + _source: MirSource<'tcx>, mir: &mut Mir<'tcx>) { // We only run when the MIR optimization level is > 1. // This avoids a slow pass, and messing up debug info. @@ -104,8 +96,10 @@ impl MirPass for CopyPropagation { // That use of the source must be an assignment. match statement.kind { - StatementKind::Assign(Place::Local(local), box Rvalue::Use(ref operand)) if - local == dest_local => { + StatementKind::Assign( + Place::Base(PlaceBase::Local(local)), + box Rvalue::Use(ref operand) + ) if local == dest_local => { let maybe_action = match *operand { Operand::Copy(ref src_place) | Operand::Move(ref src_place) => { @@ -154,12 +148,12 @@ fn eliminate_self_assignments<'tcx>( if let Some(stmt) = mir[location.block].statements.get(location.statement_index) { match stmt.kind { StatementKind::Assign( - Place::Local(local), - box Rvalue::Use(Operand::Copy(Place::Local(src_local))), + Place::Base(PlaceBase::Local(local)), + box Rvalue::Use(Operand::Copy(Place::Base(PlaceBase::Local(src_local)))), ) | StatementKind::Assign( - Place::Local(local), - box Rvalue::Use(Operand::Move(Place::Local(src_local))), + Place::Base(PlaceBase::Local(local)), + box Rvalue::Use(Operand::Move(Place::Base(PlaceBase::Local(src_local)))), ) if local == dest_local && dest_local == src_local => {} _ => { continue; @@ -183,10 +177,10 @@ enum Action<'tcx> { } impl<'tcx> Action<'tcx> { - fn local_copy(mir: &Mir<'tcx>, def_use_analysis: &DefUseAnalysis, src_place: &Place<'tcx>) + fn local_copy(mir: &Mir<'tcx>, def_use_analysis: &DefUseAnalysis<'_>, src_place: &Place<'tcx>) -> Option> { // The source must be a local. - let src_local = if let Place::Local(local) = *src_place { + let src_local = if let Place::Base(PlaceBase::Local(local)) = *src_place { local } else { debug!(" Can't copy-propagate local: source is not a local"); @@ -340,8 +334,8 @@ impl<'tcx> MutVisitor<'tcx> for ConstantPropagationVisitor<'tcx> { self.super_operand(operand, location); match *operand { - Operand::Copy(Place::Local(local)) | - Operand::Move(Place::Local(local)) if local == self.dest_local => {} + Operand::Copy(Place::Base(PlaceBase::Local(local))) | + Operand::Move(Place::Base(PlaceBase::Local(local))) if local == self.dest_local => {} _ => return, } diff --git a/src/librustc_mir/transform/deaggregator.rs b/src/librustc_mir/transform/deaggregator.rs index 8a14890f92e1e..9061dfff76fe8 100644 --- a/src/librustc_mir/transform/deaggregator.rs +++ b/src/librustc_mir/transform/deaggregator.rs @@ -1,24 +1,14 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::ty::TyCtxt; use rustc::mir::*; use rustc_data_structures::indexed_vec::Idx; -use transform::{MirPass, MirSource}; +use crate::transform::{MirPass, MirSource}; pub struct Deaggregator; impl MirPass for Deaggregator { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - _source: MirSource, + _source: MirSource<'tcx>, mir: &mut Mir<'tcx>) { let (basic_blocks, local_decls) = mir.basic_blocks_and_local_decls_mut(); let local_decls = &*local_decls; diff --git a/src/librustc_mir/transform/dump_mir.rs b/src/librustc_mir/transform/dump_mir.rs index a16ef2adea9be..81e48fe2dbe3b 100644 --- a/src/librustc_mir/transform/dump_mir.rs +++ b/src/librustc_mir/transform/dump_mir.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This pass just dumps MIR at a specified point. use std::borrow::Cow; @@ -18,8 +8,8 @@ use std::io; use rustc::mir::Mir; use rustc::session::config::{OutputFilenames, OutputType}; use rustc::ty::TyCtxt; -use transform::{MirPass, MirSource}; -use util as mir_util; +use crate::transform::{MirPass, MirSource}; +use crate::util as mir_util; pub struct Marker(pub &'static str); @@ -30,7 +20,7 @@ impl MirPass for Marker { fn run_pass<'a, 'tcx>(&self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, - _source: MirSource, + _source: MirSource<'tcx>, _mir: &mut Mir<'tcx>) { } @@ -41,7 +31,7 @@ pub struct Disambiguator { } impl fmt::Display for Disambiguator { - fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { let title = if self.is_after { "after" } else { "before" }; write!(formatter, "{}", title) } @@ -51,7 +41,7 @@ impl fmt::Display for Disambiguator { pub fn on_mir_pass<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, pass_num: &dyn fmt::Display, pass_name: &str, - source: MirSource, + source: MirSource<'tcx>, mir: &Mir<'tcx>, is_after: bool) { if mir_util::dump_enabled(tcx, pass_name, source) { diff --git a/src/librustc_mir/transform/elaborate_drops.rs b/src/librustc_mir/transform/elaborate_drops.rs index c8056ea3e1f94..023a61588c42e 100644 --- a/src/librustc_mir/transform/elaborate_drops.rs +++ b/src/librustc_mir/transform/elaborate_drops.rs @@ -1,45 +1,35 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use dataflow::move_paths::{HasMoveData, MoveData, MovePathIndex, LookupResult}; -use dataflow::{MaybeInitializedPlaces, MaybeUninitializedPlaces}; -use dataflow::{DataflowResults}; -use dataflow::{on_all_children_bits, on_all_drop_children_bits}; -use dataflow::{drop_flag_effects_for_location, on_lookup_result_bits}; -use dataflow::MoveDataParamEnv; -use dataflow::{self, do_dataflow, DebugFormatted}; +use crate::dataflow::move_paths::{HasMoveData, MoveData, MovePathIndex, LookupResult}; +use crate::dataflow::{MaybeInitializedPlaces, MaybeUninitializedPlaces}; +use crate::dataflow::{DataflowResults}; +use crate::dataflow::{on_all_children_bits, on_all_drop_children_bits}; +use crate::dataflow::{drop_flag_effects_for_location, on_lookup_result_bits}; +use crate::dataflow::MoveDataParamEnv; +use crate::dataflow::{self, do_dataflow, DebugFormatted}; +use crate::transform::{MirPass, MirSource}; +use crate::util::patch::MirPatch; +use crate::util::elaborate_drops::{DropFlagState, Unwind, elaborate_drop}; +use crate::util::elaborate_drops::{DropElaborator, DropStyle, DropFlagMode}; use rustc::ty::{self, TyCtxt}; use rustc::ty::layout::VariantIdx; +use rustc::hir; use rustc::mir::*; use rustc::util::nodemap::FxHashMap; use rustc_data_structures::bit_set::BitSet; use std::fmt; -use syntax::ast; use syntax_pos::Span; -use transform::{MirPass, MirSource}; -use util::patch::MirPatch; -use util::elaborate_drops::{DropFlagState, Unwind, elaborate_drop}; -use util::elaborate_drops::{DropElaborator, DropStyle, DropFlagMode}; pub struct ElaborateDrops; impl MirPass for ElaborateDrops { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - src: MirSource, + src: MirSource<'tcx>, mir: &mut Mir<'tcx>) { debug!("elaborate_drops({:?} @ {:?})", src, mir.span); - let id = tcx.hir().as_local_node_id(src.def_id).unwrap(); - let param_env = tcx.param_env(src.def_id).with_reveal_all(); + let def_id = src.def_id(); + let param_env = tcx.param_env(src.def_id()).with_reveal_all(); let move_data = match MoveData::gather_moves(mir, tcx) { Ok(move_data) => move_data, Err((move_data, _move_errors)) => { @@ -60,13 +50,13 @@ impl MirPass for ElaborateDrops { move_data, param_env, }; - let dead_unwinds = find_dead_unwinds(tcx, mir, id, &env); + let dead_unwinds = find_dead_unwinds(tcx, mir, def_id, &env); let flow_inits = - do_dataflow(tcx, mir, id, &[], &dead_unwinds, + do_dataflow(tcx, mir, def_id, &[], &dead_unwinds, MaybeInitializedPlaces::new(tcx, mir, &env), |bd, p| DebugFormatted::new(&bd.move_data().move_paths[p])); let flow_uninits = - do_dataflow(tcx, mir, id, &[], &dead_unwinds, + do_dataflow(tcx, mir, def_id, &[], &dead_unwinds, MaybeUninitializedPlaces::new(tcx, mir, &env), |bd, p| DebugFormatted::new(&bd.move_data().move_paths[p])); @@ -84,13 +74,13 @@ impl MirPass for ElaborateDrops { } } -/// Return the set of basic blocks whose unwind edges are known +/// Returns the set of basic blocks whose unwind edges are known /// to not be reachable, because they are `drop` terminators /// that can't drop anything. fn find_dead_unwinds<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &Mir<'tcx>, - id: ast::NodeId, + def_id: hir::def_id::DefId, env: &MoveDataParamEnv<'tcx, 'tcx>) -> BitSet { @@ -99,7 +89,7 @@ fn find_dead_unwinds<'a, 'tcx>( // reach cleanup blocks, which can't have unwind edges themselves. let mut dead_unwinds = BitSet::new_empty(mir.basic_blocks().len()); let flow_inits = - do_dataflow(tcx, mir, id, &[], &dead_unwinds, + do_dataflow(tcx, mir, def_id, &[], &dead_unwinds, MaybeInitializedPlaces::new(tcx, mir, &env), |bd, p| DebugFormatted::new(&bd.move_data().move_paths[p])); for (bb, bb_data) in mir.basic_blocks().iter_enumerated() { @@ -184,7 +174,7 @@ struct Elaborator<'a, 'b: 'a, 'tcx: 'b> { } impl<'a, 'b, 'tcx> fmt::Debug for Elaborator<'a, 'b, 'tcx> { - fn fmt(&self, _f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result { Ok(()) } } @@ -303,8 +293,8 @@ struct ElaborateDropsCtxt<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &'a Mir<'tcx>, env: &'a MoveDataParamEnv<'tcx, 'tcx>, - flow_inits: DataflowResults>, - flow_uninits: DataflowResults>, + flow_inits: DataflowResults<'tcx, MaybeInitializedPlaces<'a, 'tcx, 'tcx>>, + flow_uninits: DataflowResults<'tcx, MaybeUninitializedPlaces<'a, 'tcx, 'tcx>>, drop_flags: FxHashMap, patch: MirPatch<'tcx>, } @@ -340,7 +330,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { } fn drop_flag(&mut self, index: MovePathIndex) -> Option> { - self.drop_flags.get(&index).map(|t| Place::Local(*t)) + self.drop_flags.get(&index).map(|t| Place::Base(PlaceBase::Local(*t))) } /// create a patch that elaborates all drops in the input @@ -543,7 +533,9 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { span, ty: self.tcx.types.bool, user_ty: None, - literal: ty::Const::from_bool(self.tcx, val), + literal: self.tcx.mk_const( + ty::Const::from_bool(self.tcx, val), + ), }))) } @@ -551,7 +543,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { if let Some(&flag) = self.drop_flags.get(&path) { let span = self.patch.source_info_for_location(self.mir, loc).span; let val = self.constant_bool(span, val.value()); - self.patch.add_assign(loc, Place::Local(flag), val); + self.patch.add_assign(loc, Place::Base(PlaceBase::Local(flag)), val); } } @@ -560,7 +552,7 @@ impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> { let span = self.patch.source_info_for_location(self.mir, loc).span; let false_ = self.constant_bool(span, false); for flag in self.drop_flags.values() { - self.patch.add_assign(loc, Place::Local(*flag), false_.clone()); + self.patch.add_assign(loc, Place::Base(PlaceBase::Local(*flag)), false_.clone()); } } diff --git a/src/librustc_mir/transform/erase_regions.rs b/src/librustc_mir/transform/erase_regions.rs index a5b5a7e86d21b..a853f8d92beae 100644 --- a/src/librustc_mir/transform/erase_regions.rs +++ b/src/librustc_mir/transform/erase_regions.rs @@ -1,24 +1,14 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This pass erases all early-bound regions from the types occurring in the MIR. //! We want to do this once just before codegen, so codegen does not have to take //! care erasing regions all over the place. -//! NOTE: We do NOT erase regions of statements that are relevant for -//! "types-as-contracts"-validation, namely, AcquireValid, ReleaseValid +//! N.B., we do _not_ erase regions of statements that are relevant for +//! "types-as-contracts"-validation, namely, `AcquireValid` and `ReleaseValid`. -use rustc::ty::subst::Substs; +use rustc::ty::subst::SubstsRef; use rustc::ty::{self, Ty, TyCtxt}; use rustc::mir::*; use rustc::mir::visit::{MutVisitor, TyContext}; -use transform::{MirPass, MirSource}; +use crate::transform::{MirPass, MirSource}; struct EraseRegionsVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -46,7 +36,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> { *constant = self.tcx.erase_regions(constant); } - fn visit_substs(&mut self, substs: &mut &'tcx Substs<'tcx>, _: Location) { + fn visit_substs(&mut self, substs: &mut SubstsRef<'tcx>, _: Location) { *substs = self.tcx.erase_regions(substs); } @@ -63,7 +53,7 @@ pub struct EraseRegions; impl MirPass for EraseRegions { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - _: MirSource, + _: MirSource<'tcx>, mir: &mut Mir<'tcx>) { EraseRegionsVisitor::new(tcx).visit_mir(mir); } diff --git a/src/librustc_mir/transform/generator.rs b/src/librustc_mir/transform/generator.rs index f870e4a2a4227..e63c1899fe583 100644 --- a/src/librustc_mir/transform/generator.rs +++ b/src/librustc_mir/transform/generator.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This is the implementation of the pass which transforms generators into state machines. //! //! MIR generation for generators creates a function which has a self argument which @@ -36,7 +26,7 @@ //! } //! //! This pass computes the meaning of the state field and the MIR locals which are live -//! across a suspension point. There are however two hardcoded generator states: +//! across a suspension point. There are however three hardcoded generator states: //! 0 - Generator have not been resumed yet //! 1 - Generator has returned / is completed //! 2 - Generator has been poisoned @@ -65,20 +55,20 @@ use rustc::mir::*; use rustc::mir::visit::{PlaceContext, Visitor, MutVisitor}; use rustc::ty::{self, TyCtxt, AdtDef, Ty}; use rustc::ty::layout::VariantIdx; -use rustc::ty::subst::Substs; -use util::dump_mir; -use util::liveness::{self, IdentityMap}; +use rustc::ty::subst::SubstsRef; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::bit_set::BitSet; use std::borrow::Cow; use std::iter::once; use std::mem; -use transform::{MirPass, MirSource}; -use transform::simplify; -use transform::no_landing_pads::no_landing_pads; -use dataflow::{do_dataflow, DebugFormatted, state_for_location}; -use dataflow::{MaybeStorageLive, HaveBeenBorrowedLocals}; +use crate::transform::{MirPass, MirSource}; +use crate::transform::simplify; +use crate::transform::no_landing_pads::no_landing_pads; +use crate::dataflow::{do_dataflow, DebugFormatted, state_for_location}; +use crate::dataflow::{MaybeStorageLive, HaveBeenBorrowedLocals}; +use crate::util::dump_mir; +use crate::util::liveness; pub struct StateTransform; @@ -112,7 +102,7 @@ impl<'tcx> MutVisitor<'tcx> for DerefArgVisitor { place: &mut Place<'tcx>, context: PlaceContext<'tcx>, location: Location) { - if *place == Place::Local(self_arg()) { + if *place == Place::Base(PlaceBase::Local(self_arg())) { *place = Place::Projection(Box::new(Projection { base: place.clone(), elem: ProjectionElem::Deref, @@ -123,21 +113,55 @@ impl<'tcx> MutVisitor<'tcx> for DerefArgVisitor { } } +struct PinArgVisitor<'tcx> { + ref_gen_ty: Ty<'tcx>, +} + +impl<'tcx> MutVisitor<'tcx> for PinArgVisitor<'tcx> { + fn visit_local(&mut self, + local: &mut Local, + _: PlaceContext<'tcx>, + _: Location) { + assert_ne!(*local, self_arg()); + } + + fn visit_place(&mut self, + place: &mut Place<'tcx>, + context: PlaceContext<'tcx>, + location: Location) { + if *place == Place::Base(PlaceBase::Local(self_arg())) { + *place = Place::Projection(Box::new(Projection { + base: place.clone(), + elem: ProjectionElem::Field(Field::new(0), self.ref_gen_ty), + })); + } else { + self.super_place(place, context, location); + } + } +} + fn self_arg() -> Local { Local::new(1) } +/// Generator have not been resumed yet +const UNRESUMED: u32 = 0; +/// Generator has returned / is completed +const RETURNED: u32 = 1; +/// Generator has been poisoned +const POISONED: u32 = 2; + struct SuspensionPoint { state: u32, resume: BasicBlock, drop: Option, - storage_liveness: liveness::LiveVarSet, + storage_liveness: liveness::LiveVarSet, } struct TransformVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, state_adt_ref: &'tcx AdtDef, - state_substs: &'tcx Substs<'tcx>, + state_substs: SubstsRef<'tcx>, // The index of the generator state in the generator struct state_field: usize, @@ -148,7 +172,7 @@ struct TransformVisitor<'a, 'tcx: 'a> { // A map from a suspension point in a block to the locals which have live storage at that point // FIXME(eddyb) This should use `IndexVec>`. - storage_liveness: FxHashMap>, + storage_liveness: FxHashMap, // A list of suspension points, generated during the transform suspension_points: Vec, @@ -166,7 +190,7 @@ impl<'a, 'tcx> TransformVisitor<'a, 'tcx> { // Create a Place referencing a generator struct field fn make_field(&self, idx: usize, ty: Ty<'tcx>) -> Place<'tcx> { - let base = Place::Local(self_arg()); + let base = Place::Base(PlaceBase::Local(self_arg())); let field = Projection { base: base, elem: ProjectionElem::Field(Field::new(idx), ty), @@ -181,11 +205,11 @@ impl<'a, 'tcx> TransformVisitor<'a, 'tcx> { span: source_info.span, ty: self.tcx.types.u32, user_ty: None, - literal: ty::Const::from_bits( + literal: self.tcx.mk_const(ty::Const::from_bits( self.tcx, state_disc.into(), ty::ParamEnv::empty().and(self.tcx.types.u32) - ), + )), }); Statement { source_info, @@ -206,7 +230,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for TransformVisitor<'a, 'tcx> { place: &mut Place<'tcx>, context: PlaceContext<'tcx>, location: Location) { - if let Place::Local(l) = *place { + if let Place::Base(PlaceBase::Local(l)) = *place { // Replace an Local in the remap with a generator struct access if let Some(&(ty, idx)) = self.remap.get(&l) { *place = self.make_field(idx, ty); @@ -232,7 +256,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for TransformVisitor<'a, 'tcx> { let ret_val = match data.terminator().kind { TerminatorKind::Return => Some((VariantIdx::new(1), None, - Operand::Move(Place::Local(self.new_ret_local)), + Operand::Move(Place::Base(PlaceBase::Local(self.new_ret_local))), None)), TerminatorKind::Yield { ref value, resume, drop } => Some((VariantIdx::new(0), Some(resume), @@ -246,7 +270,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for TransformVisitor<'a, 'tcx> { // We must assign the value first in case it gets declared dead below data.statements.push(Statement { source_info, - kind: StatementKind::Assign(Place::Local(RETURN_PLACE), + kind: StatementKind::Assign(Place::RETURN_PLACE, box self.make_state(state_idx, v)), }); let state = if let Some(resume) = resume { // Yield @@ -261,7 +285,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for TransformVisitor<'a, 'tcx> { state } else { // Return - 1 // state for returned + RETURNED // state for returned }; data.statements.push(self.set_state(state, source_info)); data.terminator.as_mut().unwrap().kind = TerminatorKind::Return; @@ -296,6 +320,23 @@ fn make_generator_state_argument_indirect<'a, 'tcx>( DerefArgVisitor.visit_mir(mir); } +fn make_generator_state_argument_pinned<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + mir: &mut Mir<'tcx>) { + let ref_gen_ty = mir.local_decls.raw[1].ty; + + let pin_did = tcx.lang_items().pin_type().unwrap(); + let pin_adt_ref = tcx.adt_def(pin_did); + let substs = tcx.intern_substs(&[ref_gen_ty.into()]); + let pin_ref_gen_ty = tcx.mk_adt(pin_adt_ref, substs); + + // Replace the by ref generator argument + mir.local_decls.raw[1].ty = pin_ref_gen_ty; + + // Add the Pin field access to accesses of the generator state + PinArgVisitor { ref_gen_ty }.visit_mir(mir); +} + fn replace_result_variable<'tcx>( ret_ty: Ty<'tcx>, mir: &mut Mir<'tcx>, @@ -324,7 +365,7 @@ fn replace_result_variable<'tcx>( new_ret_local } -struct StorageIgnored(liveness::LiveVarSet); +struct StorageIgnored(liveness::LiveVarSet); impl<'tcx> Visitor<'tcx> for StorageIgnored { fn visit_statement(&mut self, @@ -339,53 +380,23 @@ impl<'tcx> Visitor<'tcx> for StorageIgnored { } } -struct BorrowedLocals(liveness::LiveVarSet); - -fn mark_as_borrowed<'tcx>(place: &Place<'tcx>, locals: &mut BorrowedLocals) { - match *place { - Place::Local(l) => { locals.0.insert(l); }, - Place::Promoted(_) | - Place::Static(..) => (), - Place::Projection(ref proj) => { - match proj.elem { - // For derefs we don't look any further. - // If it pointed to a Local, it would already be borrowed elsewhere - ProjectionElem::Deref => (), - _ => mark_as_borrowed(&proj.base, locals) - } - } - } -} - -impl<'tcx> Visitor<'tcx> for BorrowedLocals { - fn visit_rvalue(&mut self, - rvalue: &Rvalue<'tcx>, - location: Location) { - if let Rvalue::Ref(_, _, ref place) = *rvalue { - mark_as_borrowed(place, self); - } - - self.super_rvalue(rvalue, location) - } -} - fn locals_live_across_suspend_points( tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &Mir<'tcx>, - source: MirSource, + source: MirSource<'tcx>, movable: bool, ) -> ( - liveness::LiveVarSet, - FxHashMap>, + liveness::LiveVarSet, + FxHashMap, ) { let dead_unwinds = BitSet::new_empty(mir.basic_blocks().len()); - let node_id = tcx.hir().as_local_node_id(source.def_id).unwrap(); + let def_id = source.def_id(); // Calculate when MIR locals have live storage. This gives us an upper bound of their // lifetimes. let storage_live_analysis = MaybeStorageLive::new(mir); let storage_live = - do_dataflow(tcx, mir, node_id, &[], &dead_unwinds, storage_live_analysis, + do_dataflow(tcx, mir, def_id, &[], &dead_unwinds, storage_live_analysis, |bd, p| DebugFormatted::new(&bd.mir().local_decls[p])); // Find the MIR locals which do not use StorageLive/StorageDead statements. @@ -399,7 +410,7 @@ fn locals_live_across_suspend_points( let borrowed_locals = if !movable { let analysis = HaveBeenBorrowedLocals::new(mir); let result = - do_dataflow(tcx, mir, node_id, &[], &dead_unwinds, analysis, + do_dataflow(tcx, mir, def_id, &[], &dead_unwinds, analysis, |bd, p| DebugFormatted::new(&bd.mir().local_decls[p])); Some((analysis, result)) } else { @@ -410,14 +421,12 @@ fn locals_live_across_suspend_points( let mut set = liveness::LiveVarSet::new_empty(mir.local_decls.len()); let mut liveness = liveness::liveness_of_locals( mir, - &IdentityMap::new(mir), ); liveness::dump_mir( tcx, "generator_liveness", source, mir, - &IdentityMap::new(mir), &liveness, ); @@ -480,14 +489,14 @@ fn locals_live_across_suspend_points( } fn compute_layout<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - source: MirSource, + source: MirSource<'tcx>, upvars: Vec>, interior: Ty<'tcx>, movable: bool, mir: &mut Mir<'tcx>) -> (FxHashMap, usize)>, GeneratorLayout<'tcx>, - FxHashMap>) + FxHashMap) { // Use a liveness analysis to compute locals which are live across a suspension point let (live_locals, storage_liveness) = locals_live_across_suspend_points(tcx, @@ -577,9 +586,9 @@ fn insert_switch<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn elaborate_generator_drops<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, mir: &mut Mir<'tcx>) { - use util::elaborate_drops::{elaborate_drop, Unwind}; - use util::patch::MirPatch; - use shim::DropShimElaborator; + use crate::util::elaborate_drops::{elaborate_drop, Unwind}; + use crate::util::patch::MirPatch; + use crate::shim::DropShimElaborator; // Note that `elaborate_drops` only drops the upvars of a generator, and // this is ok because `open_drop` can only be reached within that own @@ -588,50 +597,48 @@ fn elaborate_generator_drops<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let param_env = tcx.param_env(def_id); let gen = self_arg(); - for block in mir.basic_blocks().indices() { - let (target, unwind, source_info) = match mir.basic_blocks()[block].terminator() { + let mut elaborator = DropShimElaborator { + mir: mir, + patch: MirPatch::new(mir), + tcx, + param_env + }; + + for (block, block_data) in mir.basic_blocks().iter_enumerated() { + let (target, unwind, source_info) = match block_data.terminator() { &Terminator { source_info, kind: TerminatorKind::Drop { - location: Place::Local(local), + location: Place::Base(PlaceBase::Local(local)), target, unwind } } if local == gen => (target, unwind, source_info), _ => continue, }; - let unwind = if let Some(unwind) = unwind { - Unwind::To(unwind) - } else { + let unwind = if block_data.is_cleanup { Unwind::InCleanup + } else { + Unwind::To(unwind.unwrap_or_else(|| elaborator.patch.resume_block())) }; - let patch = { - let mut elaborator = DropShimElaborator { - mir: &mir, - patch: MirPatch::new(mir), - tcx, - param_env - }; - elaborate_drop( - &mut elaborator, - source_info, - &Place::Local(gen), - (), - target, - unwind, - block - ); - elaborator.patch - }; - patch.apply(mir); + elaborate_drop( + &mut elaborator, + source_info, + &Place::Base(PlaceBase::Local(gen)), + (), + target, + unwind, + block, + ); } + elaborator.patch.apply(mir); } fn create_generator_drop_shim<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, transform: &TransformVisitor<'a, 'tcx>, def_id: DefId, - source: MirSource, + source: MirSource<'tcx>, gen_ty: Ty<'tcx>, mir: &Mir<'tcx>, drop_clean: BasicBlock) -> Mir<'tcx> { @@ -641,10 +648,10 @@ fn create_generator_drop_shim<'a, 'tcx>( let mut cases = create_cases(&mut mir, transform, |point| point.drop); - cases.insert(0, (0, drop_clean)); + cases.insert(0, (UNRESUMED, drop_clean)); - // The returned state (1) and the poisoned state (2) falls through to - // the default case which is just to return + // The returned state and the poisoned state fall through to the default + // case which is just to return insert_switch(tcx, &mut mir, cases, &transform, TerminatorKind::Return); @@ -689,7 +696,7 @@ fn create_generator_drop_shim<'a, 'tcx>( // Alias tracking must know we changed the type mir.basic_blocks_mut()[START_BLOCK].statements.insert(0, Statement { source_info, - kind: StatementKind::EscapeToRaw(Operand::Copy(Place::Local(self_arg()))), + kind: StatementKind::Retag(RetagKind::Raw, Place::Base(PlaceBase::Local(self_arg()))), }) } @@ -727,7 +734,9 @@ fn insert_panic_block<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, span: mir.span, ty: tcx.types.bool, user_ty: None, - literal: ty::Const::from_bool(tcx, false), + literal: tcx.mk_const( + ty::Const::from_bool(tcx, false), + ), }), expected: true, msg: message, @@ -752,33 +761,34 @@ fn create_generator_resume_function<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, transform: TransformVisitor<'a, 'tcx>, def_id: DefId, - source: MirSource, + source: MirSource<'tcx>, mir: &mut Mir<'tcx>) { // Poison the generator when it unwinds for block in mir.basic_blocks_mut() { let source_info = block.terminator().source_info; if let &TerminatorKind::Resume = &block.terminator().kind { - block.statements.push(transform.set_state(1, source_info)); + block.statements.push(transform.set_state(POISONED, source_info)); } } let mut cases = create_cases(mir, &transform, |point| Some(point.resume)); - use rustc::mir::interpret::EvalErrorKind::{ + use rustc::mir::interpret::InterpError::{ GeneratorResumedAfterPanic, GeneratorResumedAfterReturn, }; - // Jump to the entry point on the 0 state - cases.insert(0, (0, BasicBlock::new(0))); - // Panic when resumed on the returned (1) state - cases.insert(1, (1, insert_panic_block(tcx, mir, GeneratorResumedAfterReturn))); - // Panic when resumed on the poisoned (2) state - cases.insert(2, (2, insert_panic_block(tcx, mir, GeneratorResumedAfterPanic))); + // Jump to the entry point on the unresumed + cases.insert(0, (UNRESUMED, BasicBlock::new(0))); + // Panic when resumed on the returned state + cases.insert(1, (RETURNED, insert_panic_block(tcx, mir, GeneratorResumedAfterReturn))); + // Panic when resumed on the poisoned state + cases.insert(2, (POISONED, insert_panic_block(tcx, mir, GeneratorResumedAfterPanic))); insert_switch(tcx, mir, cases, &transform, TerminatorKind::Unreachable); make_generator_state_argument_indirect(tcx, def_id, mir); + make_generator_state_argument_pinned(tcx, mir); no_landing_pads(tcx, mir); @@ -802,7 +812,7 @@ fn insert_clean_drop<'a, 'tcx>(mir: &mut Mir<'tcx>) -> BasicBlock { // Create a block to destroy an unresumed generators. This can only destroy upvars. let drop_clean = BasicBlock::new(mir.basic_blocks().len()); let term = TerminatorKind::Drop { - location: Place::Local(self_arg()), + location: Place::Base(PlaceBase::Local(self_arg())), target: return_block, unwind: None, }; @@ -862,7 +872,7 @@ fn create_cases<'a, 'tcx, F>(mir: &mut Mir<'tcx>, impl MirPass for StateTransform { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - source: MirSource, + source: MirSource<'tcx>, mir: &mut Mir<'tcx>) { let yield_ty = if let Some(yield_ty) = mir.yield_ty { yield_ty @@ -873,7 +883,7 @@ impl MirPass for StateTransform { assert!(mir.generator_drop.is_none()); - let def_id = source.def_id; + let def_id = source.def_id(); // The first argument is the generator type passed by value let gen_ty = mir.local_decls.raw[1].ty; @@ -937,7 +947,7 @@ impl MirPass for StateTransform { mir.generator_layout = Some(layout); // Insert `drop(generator_struct)` which is used to drop upvars for generators in - // the unresumed (0) state. + // the unresumed state. // This is expanded to a drop ladder in `elaborate_generator_drops`. let drop_clean = insert_clean_drop(mir); diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs index afe0066df1f28..027ae70b06a17 100644 --- a/src/librustc_mir/transform/inline.rs +++ b/src/librustc_mir/transform/inline.rs @@ -1,16 +1,5 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Inlining pass for MIR functions -use rustc::hir; use rustc::hir::CodegenFnAttrFlags; use rustc::hir::def_id::DefId; @@ -20,14 +9,14 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc::mir::*; use rustc::mir::visit::*; use rustc::ty::{self, Instance, InstanceDef, ParamEnv, Ty, TyCtxt}; -use rustc::ty::subst::{Subst,Substs}; +use rustc::ty::subst::{Subst, SubstsRef}; use std::collections::VecDeque; use std::iter; -use transform::{MirPass, MirSource}; +use crate::transform::{MirPass, MirSource}; use super::simplify::{remove_dead_blocks, CfgSimplifier}; -use syntax::{attr}; +use syntax::attr; use rustc_target::spec::abi::Abi; const DEFAULT_THRESHOLD: usize = 50; @@ -43,7 +32,7 @@ pub struct Inline; #[derive(Copy, Clone, Debug)] struct CallSite<'tcx> { callee: DefId, - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, bb: BasicBlock, location: SourceInfo, } @@ -51,7 +40,7 @@ struct CallSite<'tcx> { impl MirPass for Inline { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - source: MirSource, + source: MirSource<'tcx>, mir: &mut Mir<'tcx>) { if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 { Inliner { tcx, source }.run_pass(mir); @@ -61,7 +50,7 @@ impl MirPass for Inline { struct Inliner<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, - source: MirSource, + source: MirSource<'tcx>, } impl<'a, 'tcx> Inliner<'a, 'tcx> { @@ -80,19 +69,18 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let mut callsites = VecDeque::new(); - let param_env = self.tcx.param_env(self.source.def_id); + let param_env = self.tcx.param_env(self.source.def_id()); // Only do inlining into fn bodies. - let id = self.tcx.hir().as_local_node_id(self.source.def_id).unwrap(); - let body_owner_kind = self.tcx.hir().body_owner_kind(id); - - if let (hir::BodyOwnerKind::Fn, None) = (body_owner_kind, self.source.promoted) { - + let id = self.tcx.hir().as_local_hir_id(self.source.def_id()).unwrap(); + if self.tcx.hir().body_owner_kind_by_hir_id(id).is_fn_or_closure() + && self.source.promoted.is_none() + { for (bb, bb_data) in caller_mir.basic_blocks().iter_enumerated() { if let Some(callsite) = self.get_valid_function_call(bb, - bb_data, - caller_mir, - param_env) { + bb_data, + caller_mir, + param_env) { callsites.push_back(callsite); } } @@ -112,22 +100,34 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { continue; } - let callee_mir = match self.tcx.try_optimized_mir(callsite.location.span, - callsite.callee) { - Ok(callee_mir) if self.consider_optimizing(callsite, callee_mir) => { - self.tcx.subst_and_normalize_erasing_regions( - &callsite.substs, - param_env, - callee_mir, - ) + let self_node_id = self.tcx.hir().as_local_node_id(self.source.def_id()).unwrap(); + let callee_node_id = self.tcx.hir().as_local_node_id(callsite.callee); + + let callee_mir = if let Some(callee_node_id) = callee_node_id { + // Avoid a cycle here by only using `optimized_mir` only if we have + // a lower node id than the callee. This ensures that the callee will + // not inline us. This trick only works without incremental compilation. + // So don't do it if that is enabled. + if !self.tcx.dep_graph.is_fully_enabled() + && self_node_id.as_u32() < callee_node_id.as_u32() { + self.tcx.optimized_mir(callsite.callee) + } else { + continue; } - Ok(_) => continue, + } else { + // This cannot result in a cycle since the callee MIR is from another crate + // and is already optimized. + self.tcx.optimized_mir(callsite.callee) + }; - Err(mut bug) => { - // FIXME(#43542) shouldn't have to cancel an error - bug.cancel(); - continue - } + let callee_mir = if self.consider_optimizing(callsite, callee_mir) { + self.tcx.subst_and_normalize_erasing_regions( + &callsite.substs, + param_env, + callee_mir, + ) + } else { + continue; }; let start = caller_mir.basic_blocks().len(); @@ -261,7 +261,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { // inlining. This is to ensure that the final crate doesn't have MIR that // reference unexported symbols if callsite.callee.is_local() { - if callsite.substs.types().count() == 0 && !hinted { + if callsite.substs.non_erasable_generics().count() == 0 && !hinted { debug!(" callee is an exported function - not inlining"); return false; } @@ -288,7 +288,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { // FIXME: Give a bonus to functions with only a single caller - let param_env = tcx.param_env(self.source.def_id); + let param_env = tcx.param_env(self.source.def_id()); let mut first_block = true; let mut cost = 0; @@ -319,8 +319,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { work_list.push(target); // If the location doesn't actually need dropping, treat it like // a regular goto. - let ty = location.ty(callee_mir, tcx).subst(tcx, callsite.substs); - let ty = ty.to_ty(tcx); + let ty = location.ty(callee_mir, tcx).subst(tcx, callsite.substs).ty; if ty.needs_drop(tcx, param_env) { cost += CALL_PENALTY; if let Some(unwind) = unwind { @@ -440,7 +439,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { // Place could result in two different locations if `f` // writes to `i`. To prevent this we need to create a temporary // borrow of the place and pass the destination as `*temp` instead. - fn dest_needs_borrow(place: &Place) -> bool { + fn dest_needs_borrow(place: &Place<'_>) -> bool { match *place { Place::Projection(ref p) => { match p.elem { @@ -451,7 +450,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { } // Static variables need a borrow because the callee // might modify the same static. - Place::Static(_) => true, + Place::Base(PlaceBase::Static(_)) => true, _ => false } } @@ -468,7 +467,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let temp = LocalDecl::new_temp(ty, callsite.location.span); let tmp = caller_mir.local_decls.push(temp); - let tmp = Place::Local(tmp); + let tmp = Place::Base(PlaceBase::Local(tmp)); let stmt = Statement { source_info: callsite.location, @@ -562,8 +561,8 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_mir); assert!(args.next().is_none()); - let tuple = Place::Local(tuple); - let tuple_tys = if let ty::Tuple(s) = tuple.ty(caller_mir, tcx).to_ty(tcx).sty { + let tuple = Place::Base(PlaceBase::Local(tuple)); + let tuple_tys = if let ty::Tuple(s) = tuple.ty(caller_mir, tcx).ty.sty { s } else { bug!("Closure arguments are not passed as a tuple"); @@ -601,7 +600,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { // FIXME: Analysis of the usage of the arguments to avoid // unnecessary temporaries. - if let Operand::Move(Place::Local(local)) = arg { + if let Operand::Move(Place::Base(PlaceBase::Local(local))) = arg { if caller_mir.local_kind(local) == LocalKind::Temp { // Reuse the operand if it's a temporary already return local; @@ -619,7 +618,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> { let stmt = Statement { source_info: callsite.location, - kind: StatementKind::Assign(Place::Local(arg_tmp), box arg), + kind: StatementKind::Assign(Place::Base(PlaceBase::Local(arg_tmp)), box arg), }; caller_mir[callsite.bb].statements.push(stmt); arg_tmp @@ -667,7 +666,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { _location: Location) { if *local == RETURN_PLACE { match self.destination { - Place::Local(l) => { + Place::Base(PlaceBase::Local(l)) => { *local = l; return; }, @@ -688,16 +687,18 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { _location: Location) { match place { - Place::Local(RETURN_PLACE) => { + Place::Base(PlaceBase::Local(RETURN_PLACE)) => { // Return pointer; update the place itself *place = self.destination.clone(); }, - Place::Promoted(ref mut promoted) => { - if let Some(p) = self.promoted_map.get(promoted.0).cloned() { - promoted.0 = p; + Place::Base( + PlaceBase::Static(box Static { kind: StaticKind::Promoted(promoted), .. }) + ) => { + if let Some(p) = self.promoted_map.get(*promoted).cloned() { + *promoted = p; } }, - _ => self.super_place(place, _ctxt, _location), + _ => self.super_place(place, _ctxt, _location) } } @@ -709,16 +710,17 @@ impl<'a, 'tcx> MutVisitor<'tcx> for Integrator<'a, 'tcx> { fn visit_retag( &mut self, - fn_entry: &mut bool, - two_phase: &mut bool, + kind: &mut RetagKind, place: &mut Place<'tcx>, loc: Location, ) { - self.super_retag(fn_entry, two_phase, place, loc); + self.super_retag(kind, place, loc); // We have to patch all inlined retags to be aware that they are no longer // happening on function entry. - *fn_entry = false; + if *kind == RetagKind::FnEntry { + *kind = RetagKind::Default; + } } fn visit_terminator_kind(&mut self, block: BasicBlock, diff --git a/src/librustc_mir/transform/instcombine.rs b/src/librustc_mir/transform/instcombine.rs index 12780ef8be945..8187a81f0edab 100644 --- a/src/librustc_mir/transform/instcombine.rs +++ b/src/librustc_mir/transform/instcombine.rs @@ -1,29 +1,19 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Performs various peephole optimizations. -use rustc::mir::{Constant, Location, Place, Mir, Operand, ProjectionElem, Rvalue, Local}; +use rustc::mir::{Constant, Location, Place, PlaceBase, Mir, Operand, ProjectionElem, Rvalue, Local}; use rustc::mir::visit::{MutVisitor, Visitor}; -use rustc::ty::{TyCtxt, TyKind}; +use rustc::ty::{self, TyCtxt}; use rustc::util::nodemap::{FxHashMap, FxHashSet}; use rustc_data_structures::indexed_vec::Idx; use std::mem; -use transform::{MirPass, MirSource}; +use crate::transform::{MirPass, MirSource}; pub struct InstCombine; impl MirPass for InstCombine { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - _: MirSource, + _: MirSource<'tcx>, mir: &mut Mir<'tcx>) { // We only run when optimizing MIR (at any level). if tcx.sess.opts.debugging_opts.mir_opt_level == 0 { @@ -55,7 +45,7 @@ impl<'tcx> MutVisitor<'tcx> for InstCombineVisitor<'tcx> { let new_place = match *rvalue { Rvalue::Ref(_, _, Place::Projection(ref mut projection)) => { // Replace with dummy - mem::replace(&mut projection.base, Place::Local(Local::new(0))) + mem::replace(&mut projection.base, Place::Base(PlaceBase::Local(Local::new(0)))) } _ => bug!("Detected `&*` but didn't find `&*`!"), }; @@ -92,15 +82,15 @@ impl<'b, 'a, 'tcx> Visitor<'tcx> for OptimizationFinder<'b, 'a, 'tcx> { fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) { if let Rvalue::Ref(_, _, Place::Projection(ref projection)) = *rvalue { if let ProjectionElem::Deref = projection.elem { - if projection.base.ty(self.mir, self.tcx).to_ty(self.tcx).is_region_ptr() { + if projection.base.ty(self.mir, self.tcx).ty.is_region_ptr() { self.optimizations.and_stars.insert(location); } } } if let Rvalue::Len(ref place) = *rvalue { - let place_ty = place.ty(&self.mir.local_decls, self.tcx).to_ty(self.tcx); - if let TyKind::Array(_, len) = place_ty.sty { + let place_ty = place.ty(&self.mir.local_decls, self.tcx).ty; + if let ty::Array(_, len) = place_ty.sty { let span = self.mir.source_info(location).span; let ty = self.tcx.types.usize; let constant = Constant { span, ty, literal: len, user_ty: None }; diff --git a/src/librustc_mir/transform/lower_128bit.rs b/src/librustc_mir/transform/lower_128bit.rs index 80072153167f3..fd9d6bb5760b1 100644 --- a/src/librustc_mir/transform/lower_128bit.rs +++ b/src/librustc_mir/transform/lower_128bit.rs @@ -1,29 +1,18 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Replaces 128-bit operators with lang item calls use rustc::hir::def_id::DefId; use rustc::middle::lang_items::LangItem; use rustc::mir::*; -use rustc::ty::{List, Ty, TyCtxt, TyKind}; +use rustc::ty::{self, List, Ty, TyCtxt}; use rustc_data_structures::indexed_vec::{Idx}; -use transform::{MirPass, MirSource}; -use syntax; +use crate::transform::{MirPass, MirSource}; pub struct Lower128Bit; impl MirPass for Lower128Bit { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - _src: MirSource, + _src: MirSource<'tcx>, mir: &mut Mir<'tcx>) { let debugging_override = tcx.sess.opts.debugging_opts.lower_128bit_ops; let target_default = tcx.sess.host.options.i128_lowering; @@ -97,13 +86,13 @@ impl Lower128Bit { block.statements.push(Statement { source_info: source_info, kind: StatementKind::Assign( - Place::Local(local), + Place::Base(PlaceBase::Local(local)), box Rvalue::Cast( CastKind::Misc, rhs, rhs_override_ty.unwrap())), }); - rhs = Operand::Move(Place::Local(local)); + rhs = Operand::Move(Place::Base(PlaceBase::Local(local))); } let call_did = check_lang_item_type( @@ -146,10 +135,10 @@ fn check_lang_item_type<'a, 'tcx, D>( let sig = poly_sig.no_bound_vars().unwrap(); let lhs_ty = lhs.ty(local_decls, tcx); let rhs_ty = rhs.ty(local_decls, tcx); - let place_ty = place.ty(local_decls, tcx).to_ty(tcx); + let place_ty = place.ty(local_decls, tcx).ty; let expected = [lhs_ty, rhs_ty, place_ty]; assert_eq!(sig.inputs_and_output[..], expected, - "lang item {}", tcx.def_symbol_name(did)); + "lang item `{}`", tcx.def_path_str(did)); did } @@ -192,10 +181,10 @@ impl RhsKind { } } -fn sign_of_128bit(ty: Ty) -> Option { +fn sign_of_128bit(ty: Ty<'_>) -> Option { match ty.sty { - TyKind::Int(syntax::ast::IntTy::I128) => Some(true), - TyKind::Uint(syntax::ast::UintTy::U128) => Some(false), + ty::Int(syntax::ast::IntTy::I128) => Some(true), + ty::Uint(syntax::ast::UintTy::U128) => Some(false), _ => None, } } diff --git a/src/librustc_mir/transform/mod.rs b/src/librustc_mir/transform/mod.rs index 005c1a08affd5..27cb87f5dcaa0 100644 --- a/src/librustc_mir/transform/mod.rs +++ b/src/librustc_mir/transform/mod.rs @@ -1,24 +1,13 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use borrow_check::nll::type_check; -use build; +use crate::borrow_check::nll::type_check; +use crate::build; use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; use rustc::mir::{Mir, MirPhase, Promoted}; -use rustc::ty::TyCtxt; +use rustc::ty::{TyCtxt, InstanceDef}; use rustc::ty::query::Providers; use rustc::ty::steal::Steal; use rustc::hir; use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; use rustc::util::nodemap::DefIdSet; -use rustc_data_structures::sync::Lrc; use std::borrow::Cow; use syntax::ast; use syntax_pos::Span; @@ -36,7 +25,7 @@ pub mod elaborate_drops; pub mod add_call_guards; pub mod promote_consts; pub mod qualify_consts; -mod qualify_min_const_fn; +pub mod qualify_min_const_fn; pub mod remove_noop_landing_pads; pub mod dump_mir; pub mod deaggregator; @@ -48,7 +37,7 @@ pub mod inline; pub mod lower_128bit; pub mod uniform_array_move_out; -pub(crate) fn provide(providers: &mut Providers) { +pub(crate) fn provide(providers: &mut Providers<'_>) { self::qualify_consts::provide(providers); self::check_unsafety::provide(providers); *providers = Providers { @@ -66,10 +55,10 @@ fn is_mir_available<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> boo tcx.mir_keys(def_id.krate).contains(&def_id) } -/// Finds the full set of def-ids within the current crate that have +/// Finds the full set of `DefId`s within the current crate that have /// MIR associated with them. fn mir_keys<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, krate: CrateNum) - -> Lrc { + -> &'tcx DefIdSet { assert_eq!(krate, LOCAL_CRATE); let mut set = DefIdSet::default(); @@ -88,10 +77,10 @@ fn mir_keys<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, krate: CrateNum) v: &'tcx hir::VariantData, _: ast::Name, _: &'tcx hir::Generics, - _: ast::NodeId, + _: hir::HirId, _: Span) { - if let hir::VariantData::Tuple(_, node_id) = *v { - self.set.insert(self.tcx.hir().local_def_id(node_id)); + if let hir::VariantData::Tuple(_, hir_id) = *v { + self.set.insert(self.tcx.hir().local_def_id_from_hir_id(hir_id)); } intravisit::walk_struct_def(self, v) } @@ -104,7 +93,7 @@ fn mir_keys<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, krate: CrateNum) set: &mut set, }.as_deep_visitor()); - Lrc::new(set) + tcx.arena.alloc(set) } fn mir_built<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx Steal> { @@ -114,20 +103,25 @@ fn mir_built<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx Stea /// Where a specific Mir comes from. #[derive(Debug, Copy, Clone)] -pub struct MirSource { - pub def_id: DefId, +pub struct MirSource<'tcx> { + pub instance: InstanceDef<'tcx>, /// If `Some`, this is a promoted rvalue within the parent function. pub promoted: Option, } -impl MirSource { +impl<'tcx> MirSource<'tcx> { pub fn item(def_id: DefId) -> Self { MirSource { - def_id, + instance: InstanceDef::Item(def_id), promoted: None } } + + #[inline] + pub fn def_id(&self) -> DefId { + self.instance.def_id() + } } /// Generates a default name for the pass based on the name of the @@ -151,14 +145,14 @@ pub trait MirPass { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - source: MirSource, + source: MirSource<'tcx>, mir: &mut Mir<'tcx>); } pub fn run_passes( tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &mut Mir<'tcx>, - def_id: DefId, + instance: InstanceDef<'tcx>, mir_phase: MirPhase, passes: &[&dyn MirPass], ) { @@ -170,7 +164,7 @@ pub fn run_passes( } let source = MirSource { - def_id, + instance, promoted, }; let mut index = 0; @@ -208,7 +202,7 @@ fn mir_const<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx Stea let _ = tcx.unsafety_check_result(def_id); let mut mir = tcx.mir_built(def_id).steal(); - run_passes(tcx, &mut mir, def_id, MirPhase::Const, &[ + run_passes(tcx, &mut mir, InstanceDef::Item(def_id), MirPhase::Const, &[ // What we need to do constant evaluation. &simplify::SimplifyCfg::new("initial"), &type_check::TypeckMir, @@ -219,15 +213,15 @@ fn mir_const<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx Stea } fn mir_validated<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx Steal> { - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); - if let hir::BodyOwnerKind::Const = tcx.hir().body_owner_kind(node_id) { + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); + if let hir::BodyOwnerKind::Const = tcx.hir().body_owner_kind_by_hir_id(hir_id) { // Ensure that we compute the `mir_const_qualif` for constants at // this point, before we steal the mir-const result. let _ = tcx.mir_const_qualif(def_id); } let mut mir = tcx.mir_const(def_id).steal(); - run_passes(tcx, &mut mir, def_id, MirPhase::Validated, &[ + run_passes(tcx, &mut mir, InstanceDef::Item(def_id), MirPhase::Validated, &[ // What we need to run borrowck etc. &qualify_consts::QualifyAndPromoteConstants, &simplify::SimplifyCfg::new("qualify-consts"), @@ -238,23 +232,19 @@ fn mir_validated<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx fn optimized_mir<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx Mir<'tcx> { // (Mir-)Borrowck uses `mir_validated`, so we have to force it to // execute before we can steal. - let _ = tcx.mir_borrowck(def_id); + tcx.ensure().mir_borrowck(def_id); if tcx.use_ast_borrowck() { - let _ = tcx.borrowck(def_id); + tcx.ensure().borrowck(def_id); } let mut mir = tcx.mir_validated(def_id).steal(); - run_passes(tcx, &mut mir, def_id, MirPhase::Optimized, &[ - // Remove all things not needed by analysis + run_passes(tcx, &mut mir, InstanceDef::Item(def_id), MirPhase::Optimized, &[ + // Remove all things only needed by analysis &no_landing_pads::NoLandingPads, &simplify_branches::SimplifyBranches::new("initial"), &remove_noop_landing_pads::RemoveNoopLandingPads, - // Remove all `AscribeUserType` statements. - &cleanup_post_borrowck::CleanAscribeUserType, - // Remove all `FakeRead` statements and the borrows that are only - // used for checking matches - &cleanup_post_borrowck::CleanFakeReadsAndBorrows, + &cleanup_post_borrowck::CleanupNonCodegenStatements, &simplify::SimplifyCfg::new("early-opt"), @@ -294,6 +284,7 @@ fn optimized_mir<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx &simplify_branches::SimplifyBranches::new("after-const-prop"), &deaggregator::Deaggregator, ©_prop::CopyPropagation, + &simplify_branches::SimplifyBranches::new("after-copy-prop"), &remove_noop_landing_pads::RemoveNoopLandingPads, &simplify::SimplifyCfg::new("final"), &simplify::SimplifyLocals, diff --git a/src/librustc_mir/transform/no_landing_pads.rs b/src/librustc_mir/transform/no_landing_pads.rs index c8f171d4160c6..089d9b9b54454 100644 --- a/src/librustc_mir/transform/no_landing_pads.rs +++ b/src/librustc_mir/transform/no_landing_pads.rs @@ -1,27 +1,17 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This pass removes the unwind branch of all the terminators when the no-landing-pads option is //! specified. use rustc::ty::TyCtxt; use rustc::mir::*; use rustc::mir::visit::MutVisitor; -use transform::{MirPass, MirSource}; +use crate::transform::{MirPass, MirSource}; pub struct NoLandingPads; impl MirPass for NoLandingPads { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - _: MirSource, + _: MirSource<'tcx>, mir: &mut Mir<'tcx>) { no_landing_pads(tcx, mir) } diff --git a/src/librustc_mir/transform/promote_consts.rs b/src/librustc_mir/transform/promote_consts.rs index 7f8dfc111a4bd..43723aaf568da 100644 --- a/src/librustc_mir/transform/promote_consts.rs +++ b/src/librustc_mir/transform/promote_consts.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A pass that promotes borrows of constant rvalues. //! //! The rvalues considered constant are trees of temps, @@ -140,7 +130,8 @@ impl<'tcx> Visitor<'tcx> for TempCollector<'tcx> { } } -pub fn collect_temps(mir: &Mir, rpo: &mut ReversePostorder) -> IndexVec { +pub fn collect_temps(mir: &Mir<'_>, + rpo: &mut ReversePostorder<'_, '_>) -> IndexVec { let mut collector = TempCollector { temps: IndexVec::from_elem(TempState::Undefined, &mir.local_decls), span: mir.span, @@ -187,11 +178,11 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { span, scope: OUTERMOST_SOURCE_SCOPE }, - kind: StatementKind::Assign(Place::Local(dest), box rvalue) + kind: StatementKind::Assign(Place::Base(PlaceBase::Local(dest)), box rvalue) }); } - /// Copy the initialization of this temp to the + /// Copies the initialization of this temp to the /// promoted MIR, recursing through temps. fn promote_temp(&mut self, temp: Local) -> Local { let old_keep_original = self.keep_original; @@ -277,7 +268,9 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { func, args, cleanup: None, - destination: Some((Place::Local(new_temp), new_target)), + destination: Some( + (Place::Base(PlaceBase::Local(new_temp)), new_target) + ), from_hir_call, }, ..terminator @@ -299,9 +292,10 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { let promoted_id = Promoted::new(self.source.promoted.len()); let mut promoted_place = |ty, span| { promoted.span = span; - promoted.local_decls[RETURN_PLACE] = - LocalDecl::new_return_place(ty, span); - Place::Promoted(box (promoted_id, ty)) + promoted.local_decls[RETURN_PLACE] = LocalDecl::new_return_place(ty, span); + Place::Base( + PlaceBase::Static(box Static{ kind: StaticKind::Promoted(promoted_id), ty }) + ) }; let (blocks, local_decls) = self.source.basic_blocks_and_local_decls_mut(); match candidate { @@ -316,7 +310,7 @@ impl<'a, 'tcx> Promoter<'a, 'tcx> { place = &mut proj.base; }; - let ty = place.ty(local_decls, self.tcx).to_ty(self.tcx); + let ty = place.ty(local_decls, self.tcx).ty; let span = statement.source_info.span; Operand::Move(mem::replace(place, promoted_place(ty, span))) @@ -382,7 +376,7 @@ pub fn promote_candidates<'a, 'tcx>(mir: &mut Mir<'tcx>, match candidate { Candidate::Ref(Location { block, statement_index }) => { match mir[block].statements[statement_index].kind { - StatementKind::Assign(Place::Local(local), _) => { + StatementKind::Assign(Place::Base(PlaceBase::Local(local)), _) => { if temps[local] == TempState::PromotedOut { // Already promoted. continue; @@ -410,9 +404,11 @@ pub fn promote_candidates<'a, 'tcx>(mir: &mut Mir<'tcx>, IndexVec::new(), None, initial_locals, + IndexVec::new(), 0, vec![], - mir.span + mir.span, + vec![], ), tcx, source: mir, @@ -427,7 +423,7 @@ pub fn promote_candidates<'a, 'tcx>(mir: &mut Mir<'tcx>, for block in mir.basic_blocks_mut() { block.statements.retain(|statement| { match statement.kind { - StatementKind::Assign(Place::Local(index), _) | + StatementKind::Assign(Place::Base(PlaceBase::Local(index)), _) | StatementKind::StorageLive(index) | StatementKind::StorageDead(index) => { !promoted(index) @@ -437,7 +433,7 @@ pub fn promote_candidates<'a, 'tcx>(mir: &mut Mir<'tcx>, }); let terminator = block.terminator_mut(); match terminator.kind { - TerminatorKind::Drop { location: Place::Local(index), target, .. } => { + TerminatorKind::Drop { location: Place::Base(PlaceBase::Local(index)), target, .. } => { if promoted(index) { terminator.kind = TerminatorKind::Goto { target, diff --git a/src/librustc_mir/transform/qualify_consts.rs b/src/librustc_mir/transform/qualify_consts.rs index 5f08dee872859..1faa9ad0d0da4 100644 --- a/src/librustc_mir/transform/qualify_consts.rs +++ b/src/librustc_mir/transform/qualify_consts.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A pass that qualifies constness of temporaries in constants, //! static initializers and functions and also drives promotion. //! @@ -21,69 +11,27 @@ use rustc_data_structures::sync::Lrc; use rustc_target::spec::abi::Abi; use rustc::hir; use rustc::hir::def_id::DefId; -use rustc::mir::interpret::ConstValue; use rustc::traits::{self, TraitEngine}; use rustc::ty::{self, TyCtxt, Ty, TypeFoldable}; use rustc::ty::cast::CastTy; use rustc::ty::query::Providers; use rustc::mir::*; +use rustc::mir::interpret::ConstValue; use rustc::mir::traversal::ReversePostorder; use rustc::mir::visit::{PlaceContext, Visitor, MutatingUseContext, NonMutatingUseContext}; use rustc::middle::lang_items; use rustc::session::config::nightly_options; use syntax::ast::LitKind; -use syntax::feature_gate::{UnstableFeatures, feature_err, emit_feature_err, GateIssue}; +use syntax::feature_gate::{emit_feature_err, GateIssue}; use syntax_pos::{Span, DUMMY_SP}; use std::fmt; +use std::ops::{Deref, Index, IndexMut}; use std::usize; -use transform::{MirPass, MirSource}; +use crate::transform::{MirPass, MirSource}; use super::promote_consts::{self, Candidate, TempState}; -bitflags! { - // Borrows of temporaries can be promoted only if - // they have none of these qualifications, with - // the exception of `STATIC_REF` (in statics only). - struct Qualif: u8 { - // Constant containing interior mutability (UnsafeCell). - const MUTABLE_INTERIOR = 1 << 0; - - // Constant containing an ADT that implements Drop. - const NEEDS_DROP = 1 << 1; - - // Function argument. - const FN_ARGUMENT = 1 << 2; - - // Not constant at all - non-`const fn` calls, asm!, - // pointer comparisons, ptr-to-int casts, etc. - const NOT_CONST = 1 << 3; - - // Refers to temporaries which cannot be promoted as - // promote_consts decided they weren't simple enough. - const NOT_PROMOTABLE = 1 << 4; - - // Const items can only have MUTABLE_INTERIOR - // and NOT_PROMOTABLE without producing an error. - const CONST_ERROR = !Qualif::MUTABLE_INTERIOR.bits & - !Qualif::NOT_PROMOTABLE.bits; - } -} - -impl<'a, 'tcx> Qualif { - /// Remove flags which are impossible for the given type. - fn restrict(&mut self, ty: Ty<'tcx>, - tcx: TyCtxt<'a, 'tcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>) { - if ty.is_freeze(tcx, param_env, DUMMY_SP) { - *self = *self - Qualif::MUTABLE_INTERIOR; - } - if !ty.needs_drop(tcx, param_env) { - *self = *self - Qualif::NEEDS_DROP; - } - } -} - /// What kind of item we are in. #[derive(Copy, Clone, Debug, PartialEq, Eq)] enum Mode { @@ -95,7 +43,7 @@ enum Mode { } impl fmt::Display for Mode { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Mode::Const => write!(f, "constant"), Mode::Static | Mode::StaticMut => write!(f, "static"), @@ -105,27 +53,568 @@ impl fmt::Display for Mode { } } -struct Qualifier<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { +const QUALIF_COUNT: usize = 4; + +// FIXME(eddyb) once we can use const generics, replace this array with +// something like `IndexVec` but for fixed-size arrays (`IndexArray`?). +#[derive(Copy, Clone, Default)] +struct PerQualif([T; QUALIF_COUNT]); + +impl PerQualif { + fn new(x: T) -> Self { + PerQualif([x.clone(), x.clone(), x.clone(), x]) + } +} + +impl PerQualif { + fn as_mut(&mut self) -> PerQualif<&mut T> { + let [x0, x1, x2, x3] = &mut self.0; + PerQualif([x0, x1, x2, x3]) + } + + fn zip(self, other: PerQualif) -> PerQualif<(T, U)> { + let [x0, x1, x2, x3] = self.0; + let [y0, y1, y2, y3] = other.0; + PerQualif([(x0, y0), (x1, y1), (x2, y2), (x3, y3)]) + } +} + +impl PerQualif { + fn encode_to_bits(self) -> u8 { + self.0.iter().enumerate().fold(0, |bits, (i, &qualif)| { + bits | ((qualif as u8) << i) + }) + } + + fn decode_from_bits(bits: u8) -> Self { + let mut qualifs = Self::default(); + for (i, qualif) in qualifs.0.iter_mut().enumerate() { + *qualif = (bits & (1 << i)) != 0; + } + qualifs + } +} + +impl Index for PerQualif { + type Output = T; + + fn index(&self, _: Q) -> &T { + &self.0[Q::IDX] + } +} + +impl IndexMut for PerQualif { + fn index_mut(&mut self, _: Q) -> &mut T { + &mut self.0[Q::IDX] + } +} + +struct ConstCx<'a, 'tcx> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, mode: Mode, + mir: &'a Mir<'tcx>, + + per_local: PerQualif>, +} + +impl<'a, 'tcx> ConstCx<'a, 'tcx> { + fn is_const_panic_fn(&self, def_id: DefId) -> bool { + Some(def_id) == self.tcx.lang_items().panic_fn() || + Some(def_id) == self.tcx.lang_items().begin_panic_fn() + } +} + +#[derive(Copy, Clone, Debug)] +enum ValueSource<'a, 'tcx> { + Rvalue(&'a Rvalue<'tcx>), + Call { + callee: &'a Operand<'tcx>, + args: &'a [Operand<'tcx>], + return_ty: Ty<'tcx>, + }, +} + +trait Qualif { + const IDX: usize; + + /// Return the qualification that is (conservatively) correct for any value + /// of the type, or `None` if the qualification is not value/type-based. + fn in_any_value_of_ty(_cx: &ConstCx<'_, 'tcx>, _ty: Ty<'tcx>) -> Option { + None + } + + /// Return a mask for the qualification, given a type. This is `false` iff + /// no value of that type can have the qualification. + fn mask_for_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool { + Self::in_any_value_of_ty(cx, ty).unwrap_or(true) + } + + fn in_local(cx: &ConstCx<'_, '_>, local: Local) -> bool { + cx.per_local.0[Self::IDX].contains(local) + } + + fn in_static(_cx: &ConstCx<'_, 'tcx>, _static: &Static<'tcx>) -> bool { + // FIXME(eddyb) should we do anything here for value properties? + false + } + + fn in_projection_structurally( + cx: &ConstCx<'_, 'tcx>, + proj: &PlaceProjection<'tcx>, + ) -> bool { + let base_qualif = Self::in_place(cx, &proj.base); + let qualif = base_qualif && Self::mask_for_ty( + cx, + proj.base.ty(cx.mir, cx.tcx) + .projection_ty(cx.tcx, &proj.elem) + .ty, + ); + match proj.elem { + ProjectionElem::Deref | + ProjectionElem::Subslice { .. } | + ProjectionElem::Field(..) | + ProjectionElem::ConstantIndex { .. } | + ProjectionElem::Downcast(..) => qualif, + + ProjectionElem::Index(local) => qualif || Self::in_local(cx, local), + } + } + + fn in_projection(cx: &ConstCx<'_, 'tcx>, proj: &PlaceProjection<'tcx>) -> bool { + Self::in_projection_structurally(cx, proj) + } + + fn in_place(cx: &ConstCx<'_, 'tcx>, place: &Place<'tcx>) -> bool { + match *place { + Place::Base(PlaceBase::Local(local)) => Self::in_local(cx, local), + Place::Base(PlaceBase::Static(box Static {kind: StaticKind::Promoted(_), .. })) => + bug!("qualifying already promoted MIR"), + Place::Base(PlaceBase::Static(ref static_)) => { + Self::in_static(cx, static_) + }, + Place::Projection(ref proj) => Self::in_projection(cx, proj), + } + } + + fn in_operand(cx: &ConstCx<'_, 'tcx>, operand: &Operand<'tcx>) -> bool { + match *operand { + Operand::Copy(ref place) | + Operand::Move(ref place) => Self::in_place(cx, place), + + Operand::Constant(ref constant) => { + if let ConstValue::Unevaluated(def_id, _) = constant.literal.val { + // Don't peek inside trait associated constants. + if cx.tcx.trait_of_item(def_id).is_some() { + Self::in_any_value_of_ty(cx, constant.ty).unwrap_or(false) + } else { + let (bits, _) = cx.tcx.at(constant.span).mir_const_qualif(def_id); + + let qualif = PerQualif::decode_from_bits(bits).0[Self::IDX]; + + // Just in case the type is more specific than + // the definition, e.g., impl associated const + // with type parameters, take it into account. + qualif && Self::mask_for_ty(cx, constant.ty) + } + } else { + false + } + } + } + } + + fn in_rvalue_structurally(cx: &ConstCx<'_, 'tcx>, rvalue: &Rvalue<'tcx>) -> bool { + match *rvalue { + Rvalue::NullaryOp(..) => false, + + Rvalue::Discriminant(ref place) | + Rvalue::Len(ref place) => Self::in_place(cx, place), + + Rvalue::Use(ref operand) | + Rvalue::Repeat(ref operand, _) | + Rvalue::UnaryOp(_, ref operand) | + Rvalue::Cast(_, ref operand, _) => Self::in_operand(cx, operand), + + Rvalue::BinaryOp(_, ref lhs, ref rhs) | + Rvalue::CheckedBinaryOp(_, ref lhs, ref rhs) => { + Self::in_operand(cx, lhs) || Self::in_operand(cx, rhs) + } + + Rvalue::Ref(_, _, ref place) => { + // Special-case reborrows to be more like a copy of the reference. + if let Place::Projection(ref proj) = *place { + if let ProjectionElem::Deref = proj.elem { + let base_ty = proj.base.ty(cx.mir, cx.tcx).ty; + if let ty::Ref(..) = base_ty.sty { + return Self::in_place(cx, &proj.base); + } + } + } + + Self::in_place(cx, place) + } + + Rvalue::Aggregate(_, ref operands) => { + operands.iter().any(|o| Self::in_operand(cx, o)) + } + } + } + + fn in_rvalue(cx: &ConstCx<'_, 'tcx>, rvalue: &Rvalue<'tcx>) -> bool { + Self::in_rvalue_structurally(cx, rvalue) + } + + fn in_call( + cx: &ConstCx<'_, 'tcx>, + _callee: &Operand<'tcx>, + _args: &[Operand<'tcx>], + return_ty: Ty<'tcx>, + ) -> bool { + // Be conservative about the returned value of a const fn. + Self::in_any_value_of_ty(cx, return_ty).unwrap_or(false) + } + + fn in_value(cx: &ConstCx<'_, 'tcx>, source: ValueSource<'_, 'tcx>) -> bool { + match source { + ValueSource::Rvalue(rvalue) => Self::in_rvalue(cx, rvalue), + ValueSource::Call { callee, args, return_ty } => { + Self::in_call(cx, callee, args, return_ty) + } + } + } +} + +// Constant containing interior mutability (UnsafeCell). +struct HasMutInterior; + +impl Qualif for HasMutInterior { + const IDX: usize = 0; + + fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> Option { + Some(!ty.is_freeze(cx.tcx, cx.param_env, DUMMY_SP)) + } + + fn in_rvalue(cx: &ConstCx<'_, 'tcx>, rvalue: &Rvalue<'tcx>) -> bool { + match *rvalue { + // Returning `true` for `Rvalue::Ref` indicates the borrow isn't + // allowed in constants (and the `Checker` will error), and/or it + // won't be promoted, due to `&mut ...` or interior mutability. + Rvalue::Ref(_, kind, ref place) => { + let ty = place.ty(cx.mir, cx.tcx).ty; + + if let BorrowKind::Mut { .. } = kind { + // In theory, any zero-sized value could be borrowed + // mutably without consequences. However, only &mut [] + // is allowed right now, and only in functions. + if cx.mode == Mode::StaticMut { + // Inside a `static mut`, &mut [...] is also allowed. + match ty.sty { + ty::Array(..) | ty::Slice(_) => {} + _ => return true, + } + } else if let ty::Array(_, len) = ty.sty { + // FIXME(eddyb) the `cx.mode == Mode::Fn` condition + // seems unnecessary, given that this is merely a ZST. + if !(len.unwrap_usize(cx.tcx) == 0 && cx.mode == Mode::Fn) { + return true; + } + } else { + return true; + } + } + } + + Rvalue::Aggregate(ref kind, _) => { + if let AggregateKind::Adt(def, ..) = **kind { + if Some(def.did) == cx.tcx.lang_items().unsafe_cell_type() { + let ty = rvalue.ty(cx.mir, cx.tcx); + assert_eq!(Self::in_any_value_of_ty(cx, ty), Some(true)); + return true; + } + } + } + + _ => {} + } + + Self::in_rvalue_structurally(cx, rvalue) + } +} + +// Constant containing an ADT that implements Drop. +struct NeedsDrop; + +impl Qualif for NeedsDrop { + const IDX: usize = 1; + + fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> Option { + Some(ty.needs_drop(cx.tcx, cx.param_env)) + } + + fn in_rvalue(cx: &ConstCx<'_, 'tcx>, rvalue: &Rvalue<'tcx>) -> bool { + if let Rvalue::Aggregate(ref kind, _) = *rvalue { + if let AggregateKind::Adt(def, ..) = **kind { + if def.has_dtor(cx.tcx) { + return true; + } + } + } + + Self::in_rvalue_structurally(cx, rvalue) + } +} + +// Not constant at all - non-`const fn` calls, asm!, +// pointer comparisons, ptr-to-int casts, etc. +struct IsNotConst; + +impl Qualif for IsNotConst { + const IDX: usize = 2; + + fn in_static(cx: &ConstCx<'_, 'tcx>, static_: &Static<'tcx>) -> bool { + match static_.kind { + StaticKind::Promoted(_) => unreachable!(), + StaticKind::Static(def_id) => { + // Only allow statics (not consts) to refer to other statics. + let allowed = cx.mode == Mode::Static || cx.mode == Mode::StaticMut; + + !allowed || + cx.tcx.get_attrs(def_id).iter().any( + |attr| attr.check_name("thread_local" + )) + } + } + } + + fn in_projection(cx: &ConstCx<'_, 'tcx>, proj: &PlaceProjection<'tcx>) -> bool { + match proj.elem { + ProjectionElem::Deref | + ProjectionElem::Downcast(..) => return true, + + ProjectionElem::ConstantIndex {..} | + ProjectionElem::Subslice {..} | + ProjectionElem::Index(_) => {} + + ProjectionElem::Field(..) => { + if cx.mode == Mode::Fn { + let base_ty = proj.base.ty(cx.mir, cx.tcx).ty; + if let Some(def) = base_ty.ty_adt_def() { + if def.is_union() { + return true; + } + } + } + } + } + + Self::in_projection_structurally(cx, proj) + } + + fn in_rvalue(cx: &ConstCx<'_, 'tcx>, rvalue: &Rvalue<'tcx>) -> bool { + match *rvalue { + Rvalue::Cast(CastKind::Misc, ref operand, cast_ty) if cx.mode == Mode::Fn => { + let operand_ty = operand.ty(cx.mir, cx.tcx); + let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast"); + let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); + match (cast_in, cast_out) { + (CastTy::Ptr(_), CastTy::Int(_)) | + (CastTy::FnPtr, CastTy::Int(_)) => { + // in normal functions, mark such casts as not promotable + return true; + } + _ => {} + } + } + + Rvalue::BinaryOp(op, ref lhs, _) if cx.mode == Mode::Fn => { + if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(cx.mir, cx.tcx).sty { + assert!(op == BinOp::Eq || op == BinOp::Ne || + op == BinOp::Le || op == BinOp::Lt || + op == BinOp::Ge || op == BinOp::Gt || + op == BinOp::Offset); + + // raw pointer operations are not allowed inside promoteds + return true; + } + } + + Rvalue::NullaryOp(NullOp::Box, _) => return true, + + _ => {} + } + + Self::in_rvalue_structurally(cx, rvalue) + } + + fn in_call( + cx: &ConstCx<'_, 'tcx>, + callee: &Operand<'tcx>, + args: &[Operand<'tcx>], + _return_ty: Ty<'tcx>, + ) -> bool { + let fn_ty = callee.ty(cx.mir, cx.tcx); + match fn_ty.sty { + ty::FnDef(def_id, _) => { + match cx.tcx.fn_sig(def_id).abi() { + Abi::RustIntrinsic | + Abi::PlatformIntrinsic => { + assert!(!cx.tcx.is_const_fn(def_id)); + match &cx.tcx.item_name(def_id).as_str()[..] { + | "size_of" + | "min_align_of" + | "needs_drop" + | "type_id" + | "bswap" + | "bitreverse" + | "ctpop" + | "cttz" + | "cttz_nonzero" + | "ctlz" + | "ctlz_nonzero" + | "overflowing_add" + | "overflowing_sub" + | "overflowing_mul" + | "unchecked_shl" + | "unchecked_shr" + | "rotate_left" + | "rotate_right" + | "add_with_overflow" + | "sub_with_overflow" + | "mul_with_overflow" + | "saturating_add" + | "saturating_sub" + | "transmute" + => return true, + + _ => {} + } + } + _ => { + let is_const_fn = + cx.tcx.is_const_fn(def_id) || + cx.tcx.is_unstable_const_fn(def_id).is_some() || + cx.is_const_panic_fn(def_id); + if !is_const_fn { + return true; + } + } + } + } + _ => return true, + } + + Self::in_operand(cx, callee) || args.iter().any(|arg| Self::in_operand(cx, arg)) + } +} + +// Refers to temporaries which cannot be promoted as +// promote_consts decided they weren't simple enough. +// FIXME(oli-obk,eddyb): Remove this flag entirely and +// solely process this information via `IsNotConst`. +struct IsNotPromotable; + +impl Qualif for IsNotPromotable { + const IDX: usize = 3; + + fn in_call( + cx: &ConstCx<'_, 'tcx>, + callee: &Operand<'tcx>, + args: &[Operand<'tcx>], + _return_ty: Ty<'tcx>, + ) -> bool { + if cx.mode == Mode::Fn { + if let ty::FnDef(def_id, _) = callee.ty(cx.mir, cx.tcx).sty { + // Never promote runtime `const fn` calls of + // functions without `#[rustc_promotable]`. + if !cx.tcx.is_promotable_const_fn(def_id) { + return true; + } + } + } + + Self::in_operand(cx, callee) || args.iter().any(|arg| Self::in_operand(cx, arg)) + } +} + +// Ensure the `IDX` values are sequential (`0..QUALIF_COUNT`). +macro_rules! static_assert_seq_qualifs { + ($i:expr => $first:ident $(, $rest:ident)*) => { + static_assert!(SEQ_QUALIFS: { + static_assert_seq_qualifs!($i + 1 => $($rest),*); + + $first::IDX == $i + }); + }; + ($i:expr =>) => { + static_assert!(SEQ_QUALIFS: QUALIF_COUNT == $i); + }; +} +static_assert_seq_qualifs!(0 => HasMutInterior, NeedsDrop, IsNotConst, IsNotPromotable); + +impl ConstCx<'_, 'tcx> { + fn qualifs_in_any_value_of_ty(&self, ty: Ty<'tcx>) -> PerQualif { + let mut qualifs = PerQualif::default(); + qualifs[HasMutInterior] = HasMutInterior::in_any_value_of_ty(self, ty).unwrap_or(false); + qualifs[NeedsDrop] = NeedsDrop::in_any_value_of_ty(self, ty).unwrap_or(false); + qualifs[IsNotConst] = IsNotConst::in_any_value_of_ty(self, ty).unwrap_or(false); + qualifs[IsNotPromotable] = IsNotPromotable::in_any_value_of_ty(self, ty).unwrap_or(false); + qualifs + } + + fn qualifs_in_local(&self, local: Local) -> PerQualif { + let mut qualifs = PerQualif::default(); + qualifs[HasMutInterior] = HasMutInterior::in_local(self, local); + qualifs[NeedsDrop] = NeedsDrop::in_local(self, local); + qualifs[IsNotConst] = IsNotConst::in_local(self, local); + qualifs[IsNotPromotable] = IsNotPromotable::in_local(self, local); + qualifs + } + + fn qualifs_in_value(&self, source: ValueSource<'_, 'tcx>) -> PerQualif { + let mut qualifs = PerQualif::default(); + qualifs[HasMutInterior] = HasMutInterior::in_value(self, source); + qualifs[NeedsDrop] = NeedsDrop::in_value(self, source); + qualifs[IsNotConst] = IsNotConst::in_value(self, source); + qualifs[IsNotPromotable] = IsNotPromotable::in_value(self, source); + qualifs + } +} + +struct Checker<'a, 'tcx> { + cx: ConstCx<'a, 'tcx>, + span: Span, def_id: DefId, - mir: &'a Mir<'tcx>, rpo: ReversePostorder<'a, 'tcx>, - tcx: TyCtxt<'a, 'gcx, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - local_qualif: IndexVec>, - qualif: Qualif, - const_fn_arg_vars: BitSet, + temp_promotion_state: IndexVec, - promotion_candidates: Vec + promotion_candidates: Vec, } -impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { +macro_rules! unleash_miri { + ($this:expr) => {{ + if $this.tcx.sess.opts.debugging_opts.unleash_the_miri_inside_of_you { + $this.tcx.sess.span_warn($this.span, "skipping const checks"); + return; + } + }} +} + +impl Deref for Checker<'a, 'tcx> { + type Target = ConstCx<'a, 'tcx>; + + fn deref(&self) -> &Self::Target { + &self.cx + } +} + +impl<'a, 'tcx> Checker<'a, 'tcx> { fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, mir: &'a Mir<'tcx>, mode: Mode) - -> Qualifier<'a, 'tcx, 'tcx> { + -> Self { assert!(def_id.is_local()); let mut rpo = traversal::reverse_postorder(mir); let temps = promote_consts::collect_temps(mir, &mut rpo); @@ -133,24 +622,43 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { let param_env = tcx.param_env(def_id); - let mut local_qualif = IndexVec::from_elem(None, &mir.local_decls); - for arg in mir.args_iter() { - let mut qualif = Qualif::NEEDS_DROP; - qualif.restrict(mir.local_decls[arg].ty, tcx, param_env); - local_qualif[arg] = Some(qualif); + let mut cx = ConstCx { + tcx, + param_env, + mode, + mir, + per_local: PerQualif::new(BitSet::new_empty(mir.local_decls.len())), + }; + + for (local, decl) in mir.local_decls.iter_enumerated() { + match mir.local_kind(local) { + LocalKind::Arg => { + let qualifs = cx.qualifs_in_any_value_of_ty(decl.ty); + for (per_local, qualif) in &mut cx.per_local.as_mut().zip(qualifs).0 { + if *qualif { + per_local.insert(local); + } + } + cx.per_local[IsNotConst].insert(local); + } + + LocalKind::Var if mode == Mode::Fn => { + cx.per_local[IsNotConst].insert(local); + } + + LocalKind::Temp if !temps[local].is_promotable() => { + cx.per_local[IsNotConst].insert(local); + } + + _ => {} + } } - Qualifier { - mode, + Checker { + cx, span: mir.span, def_id, - mir, rpo, - tcx, - param_env, - local_qualif, - qualif: Qualif::empty(), - const_fn_arg_vars: BitSet::new_empty(mir.local_decls.len()), temp_promotion_state: temps, promotion_candidates: vec![] } @@ -160,7 +668,7 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { // categories, but enabling full miri would make that // slightly pointless (even with feature-gating). fn not_const(&mut self) { - self.add(Qualif::NOT_CONST); + unleash_miri!(self); if self.mode != Mode::Fn { let mut err = struct_span_err!( self.tcx.sess, @@ -179,151 +687,152 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { } } - /// Error about extra statements in a constant. - fn statement_like(&mut self) { - self.add(Qualif::NOT_CONST); - if self.mode != Mode::Fn { - let mut err = feature_err( - &self.tcx.sess.parse_sess, - "const_let", - self.span, - GateIssue::Language, - &format!("statements in {}s are unstable", self.mode), - ); - if self.tcx.sess.teach(&err.get_code().unwrap()) { - err.note("Blocks in constants may only contain items (such as constant, function \ - definition, etc...) and a tail expression."); - err.help("To avoid it, you have to replace the non-item object."); - } - err.emit(); - } - } + /// Assigns an rvalue/call qualification to the given destination. + fn assign(&mut self, dest: &Place<'tcx>, source: ValueSource<'_, 'tcx>, location: Location) { + trace!("assign: {:?} <- {:?}", dest, source); - /// Add the given qualification to self.qualif. - fn add(&mut self, qualif: Qualif) { - self.qualif = self.qualif | qualif; - } + let mut qualifs = self.qualifs_in_value(source); - /// Add the given type's qualification to self.qualif. - fn add_type(&mut self, ty: Ty<'tcx>) { - self.add(Qualif::MUTABLE_INTERIOR | Qualif::NEEDS_DROP); - self.qualif.restrict(ty, self.tcx, self.param_env); - } + if let ValueSource::Rvalue(&Rvalue::Ref(_, kind, ref place)) = source { + // Getting `true` from `HasMutInterior::in_rvalue` means + // the borrowed place is disallowed from being borrowed, + // due to either a mutable borrow (with some exceptions), + // or an shared borrow of a value with interior mutability. + // Then `HasMutInterior` is replaced with `IsNotConst`, + // to avoid duplicate errors (e.g. from reborrowing). + if qualifs[HasMutInterior] { + qualifs[HasMutInterior] = false; + qualifs[IsNotConst] = true; - /// Within the provided closure, self.qualif will start - /// out empty, and its value after the closure returns will - /// be combined with the value before the call to nest. - fn nest(&mut self, f: F) { - let original = self.qualif; - self.qualif = Qualif::empty(); - f(self); - self.add(original); - } + if self.mode != Mode::Fn { + if let BorrowKind::Mut { .. } = kind { + let mut err = struct_span_err!(self.tcx.sess, self.span, E0017, + "references in {}s may only refer \ + to immutable values", self.mode); + err.span_label(self.span, format!("{}s require immutable values", + self.mode)); + if self.tcx.sess.teach(&err.get_code().unwrap()) { + err.note("References in statics and constants may only refer to \ + immutable values.\n\n\ + Statics are shared everywhere, and if they refer to \ + mutable data one might violate memory safety since \ + holding multiple mutable references to shared data is \ + not allowed.\n\n\ + If you really want global mutable state, try using \ + static mut or a global UnsafeCell."); + } + err.emit(); + } else { + span_err!(self.tcx.sess, self.span, E0492, + "cannot borrow a constant which may contain \ + interior mutability, create a static instead"); + } + } + } else if let BorrowKind::Mut { .. } | BorrowKind::Shared = kind { + // Don't promote BorrowKind::Shallow borrows, as they don't + // reach codegen. + + // We might have a candidate for promotion. + let candidate = Candidate::Ref(location); + // We can only promote interior borrows of promotable temps. + let mut place = place; + while let Place::Projection(ref proj) = *place { + if proj.elem == ProjectionElem::Deref { + break; + } + place = &proj.base; + } + debug!("qualify_consts: promotion candidate: place={:?}", place); + if let Place::Base(PlaceBase::Local(local)) = *place { + if self.mir.local_kind(local) == LocalKind::Temp { + debug!("qualify_consts: promotion candidate: local={:?}", local); + // The borrowed place doesn't have `HasMutInterior` + // (from `in_rvalue`), so we can safely ignore + // `HasMutInterior` from the local's qualifications. + // This allows borrowing fields which don't have + // `HasMutInterior`, from a type that does, e.g.: + // `let _: &'static _ = &(Cell::new(1), 2).1;` + let mut local_qualifs = self.qualifs_in_local(local); + local_qualifs[HasMutInterior] = false; + if !local_qualifs.0.iter().any(|&qualif| qualif) { + debug!("qualify_consts: promotion candidate: {:?}", candidate); + self.promotion_candidates.push(candidate); + } + } + } + } + } - /// Assign the current qualification to the given destination. - fn assign(&mut self, dest: &Place<'tcx>, location: Location) { - trace!("assign: {:?}", dest); - let qualif = self.qualif; - let span = self.span; - let store = |slot: &mut Option| { - if slot.is_some() { - span_bug!(span, "multiple assignments to {:?}", dest); + let mut dest = dest; + let index = loop { + match dest { + // We treat all locals equal in constants + Place::Base(PlaceBase::Local(index)) => break *index, + // projections are transparent for assignments + // we qualify the entire destination at once, even if just a field would have + // stricter qualification + Place::Projection(proj) => { + // Catch more errors in the destination. `visit_place` also checks various + // projection rules like union field access and raw pointer deref + self.visit_place( + dest, + PlaceContext::MutatingUse(MutatingUseContext::Store), + location + ); + dest = &proj.base; + }, + Place::Base(PlaceBase::Static(box Static{ kind: StaticKind::Promoted(_), .. })) => + bug!("promoteds don't exist yet during promotion"), + Place::Base(PlaceBase::Static(box Static{ kind: _, .. })) => { + // Catch more errors in the destination. `visit_place` also checks that we + // do not try to access statics from constants or try to mutate statics + self.visit_place( + dest, + PlaceContext::MutatingUse(MutatingUseContext::Store), + location + ); + return; + } } - *slot = Some(qualif); }; + let kind = self.mir.local_kind(index); + debug!("store to {:?} {:?}", kind, index); + // Only handle promotable temps in non-const functions. if self.mode == Mode::Fn { - if let Place::Local(index) = *dest { - if self.mir.local_kind(index) == LocalKind::Temp - && self.temp_promotion_state[index].is_promotable() { - debug!("store to promotable temp {:?} ({:?})", index, qualif); - store(&mut self.local_qualif[index]); - } + if kind != LocalKind::Temp || + !self.temp_promotion_state[index].is_promotable() { + return; } - return; } - if self.tcx.features().const_let { - let mut dest = dest; - let index = loop { - match dest { - // with `const_let` active, we treat all locals equal - Place::Local(index) => break *index, - // projections are transparent for assignments - // we qualify the entire destination at once, even if just a field would have - // stricter qualification - Place::Projection(proj) => { - // Catch more errors in the destination. `visit_place` also checks various - // projection rules like union field access and raw pointer deref - self.visit_place( - dest, - PlaceContext::MutatingUse(MutatingUseContext::Store), - location - ); - dest = &proj.base; - }, - Place::Promoted(..) => bug!("promoteds don't exist yet during promotion"), - Place::Static(..) => { - // Catch more errors in the destination. `visit_place` also checks that we - // do not try to access statics from constants or try to mutate statics - self.visit_place( - dest, - PlaceContext::MutatingUse(MutatingUseContext::Store), - location - ); - return; - } - } - }; - debug!("store to var {:?}", index); - match &mut self.local_qualif[index] { - // this is overly restrictive, because even full assignments do not clear the qualif - // While we could special case full assignments, this would be inconsistent with - // aggregates where we overwrite all fields via assignments, which would not get - // that feature. - Some(ref mut qualif) => *qualif = *qualif | self.qualif, - // insert new qualification - qualif @ None => *qualif = Some(self.qualif), + // this is overly restrictive, because even full assignments do not clear the qualif + // While we could special case full assignments, this would be inconsistent with + // aggregates where we overwrite all fields via assignments, which would not get + // that feature. + for (per_local, qualif) in &mut self.cx.per_local.as_mut().zip(qualifs).0 { + if *qualif { + per_local.insert(index); } - return; } - match *dest { - Place::Local(index) if self.mir.local_kind(index) == LocalKind::Temp || - self.mir.local_kind(index) == LocalKind::ReturnPointer => { - debug!("store to {:?} (temp or return pointer)", index); - store(&mut self.local_qualif[index]) - } - - Place::Projection(box Projection { - base: Place::Local(index), - elem: ProjectionElem::Deref - }) if self.mir.local_kind(index) == LocalKind::Temp - && self.mir.local_decls[index].ty.is_box() - && self.local_qualif[index].map_or(false, |qualif| { - qualif.contains(Qualif::NOT_CONST) - }) => { - // Part of `box expr`, we should've errored - // already for the Box allocation Rvalue. - } - - // This must be an explicit assignment. - _ => { - // Catch more errors in the destination. - self.visit_place( - dest, - PlaceContext::MutatingUse(MutatingUseContext::Store), - location - ); - self.statement_like(); + // Ensure the `IsNotConst` qualification is preserved. + // NOTE(eddyb) this is actually unnecessary right now, as + // we never replace the local's qualif, but we might in + // the future, and so it serves to catch changes that unset + // important bits (in which case, asserting `contains` could + // be replaced with calling `insert` to re-set the bit). + if kind == LocalKind::Temp { + if !self.temp_promotion_state[index].is_promotable() { + assert!(self.cx.per_local[IsNotConst].contains(index)); } } } - /// Qualify a whole const, static initializer or const fn. - fn qualify_const(&mut self) -> (Qualif, Lrc>) { - debug!("qualifying {} {:?}", self.mode, self.def_id); + /// Check a whole const, static initializer or const fn. + fn check_const(&mut self) -> (u8, Lrc>) { + debug!("const-checking {} {:?}", self.mode, self.def_id); let mir = self.mir; @@ -358,45 +867,6 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { TerminatorKind::FalseUnwind { .. } => None, TerminatorKind::Return => { - if !self.tcx.features().const_let { - // Check for unused values. This usually means - // there are extra statements in the AST. - for temp in mir.temps_iter() { - if self.local_qualif[temp].is_none() { - continue; - } - - let state = self.temp_promotion_state[temp]; - if let TempState::Defined { location, uses: 0 } = state { - let data = &mir[location.block]; - let stmt_idx = location.statement_index; - - // Get the span for the initialization. - let source_info = if stmt_idx < data.statements.len() { - data.statements[stmt_idx].source_info - } else { - data.terminator().source_info - }; - self.span = source_info.span; - - // Treat this as a statement in the AST. - self.statement_like(); - } - } - - // Make sure there are no extra unassigned variables. - self.qualif = Qualif::NOT_CONST; - for index in mir.vars_iter() { - if !self.const_fn_arg_vars.contains(index) { - debug!("unassigned variable {:?}", index); - self.assign(&Place::Local(index), Location { - block: bb, - statement_index: usize::MAX, - }); - } - } - } - break; } }; @@ -413,16 +883,6 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { } } - self.qualif = self.local_qualif[RETURN_PLACE].unwrap_or(Qualif::NOT_CONST); - - // Account for errors in consts by using the - // conservative type qualification instead. - if self.qualif.intersects(Qualif::CONST_ERROR) { - self.qualif = Qualif::empty(); - let return_ty = mir.return_ty(); - self.add_type(return_ty); - } - // Collect all the temps we need to promote. let mut promoted_temps = BitSet::new_empty(self.temp_promotion_state.len()); @@ -432,7 +892,10 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { match *candidate { Candidate::Ref(Location { block: bb, statement_index: stmt_idx }) => { match self.mir[bb].statements[stmt_idx].kind { - StatementKind::Assign(_, box Rvalue::Ref(_, _, Place::Local(index))) => { + StatementKind::Assign( + _, + box Rvalue::Ref(_, _, Place::Base(PlaceBase::Local(index))) + ) => { promoted_temps.insert(index); } _ => {} @@ -442,69 +905,40 @@ impl<'a, 'tcx> Qualifier<'a, 'tcx, 'tcx> { } } - (self.qualif, Lrc::new(promoted_temps)) - } - - fn is_const_panic_fn(&self, def_id: DefId) -> bool { - Some(def_id) == self.tcx.lang_items().panic_fn() || - Some(def_id) == self.tcx.lang_items().begin_panic_fn() - } -} + let promoted_temps = Lrc::new(promoted_temps); -/// Accumulates an Rvalue or Call's effects in self.qualif. -/// For functions (constant or not), it also records -/// candidates for promotion in promotion_candidates. -impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { - fn visit_local(&mut self, - &local: &Local, - _: PlaceContext<'tcx>, - _: Location) { - debug!("visit_local: local={:?}", local); - let kind = self.mir.local_kind(local); - match kind { - LocalKind::ReturnPointer => { - self.not_const(); - } - LocalKind::Var if !self.tcx.features().const_let => { - if self.mode != Mode::Fn { - emit_feature_err(&self.tcx.sess.parse_sess, "const_let", - self.span, GateIssue::Language, - &format!("let bindings in {}s are unstable",self.mode)); - } - self.add(Qualif::NOT_CONST); - } - LocalKind::Var | - LocalKind::Arg | - LocalKind::Temp => { - if let LocalKind::Arg = kind { - self.add(Qualif::FN_ARGUMENT); - } - - if !self.temp_promotion_state[local].is_promotable() { - debug!("visit_local: (not promotable) local={:?}", local); - self.add(Qualif::NOT_PROMOTABLE); - } + let mut qualifs = self.qualifs_in_local(RETURN_PLACE); - if let Some(qualif) = self.local_qualif[local] { - self.add(qualif); - } else { - self.not_const(); - } - } + // Account for errors in consts by using the + // conservative type qualification instead. + if qualifs[IsNotConst] { + qualifs = self.qualifs_in_any_value_of_ty(mir.return_ty()); } + + (qualifs.encode_to_bits(), promoted_temps) } +} +/// Checks MIR for const-correctness, using `ConstCx` +/// for value qualifications, and accumulates writes of +/// rvalue/call results to locals, in `local_qualif`. +/// For functions (constant or not), it also records +/// candidates for promotion in `promotion_candidates`. +impl<'a, 'tcx> Visitor<'tcx> for Checker<'a, 'tcx> { fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext<'tcx>, location: Location) { debug!("visit_place: place={:?} context={:?} location={:?}", place, context, location); + self.super_place(place, context, location); match *place { - Place::Local(ref local) => self.visit_local(local, context, location), - Place::Promoted(_) => bug!("promoting already promoted MIR"), - Place::Static(ref global) => { + Place::Base(PlaceBase::Local(_)) => {} + Place::Base(PlaceBase::Static(box Static{ kind: StaticKind::Promoted(_), .. })) => { + unreachable!() + } + Place::Base(PlaceBase::Static(box Static{ kind: StaticKind::Static(def_id), .. })) => { if self.tcx - .get_attrs(global.def_id) + .get_attrs(def_id) .iter() .any(|attr| attr.check_name("thread_local")) { if self.mode != Mode::Fn { @@ -512,13 +946,12 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { "thread-local statics cannot be \ accessed at compile-time"); } - self.add(Qualif::NOT_CONST); return; } // Only allow statics (not consts) to refer to other statics. if self.mode == Mode::Static || self.mode == Mode::StaticMut { - if context.is_mutating_use() { + if self.mode == Mode::Static && context.is_mutating_use() { // this is not strictly necessary as miri will also bail out // For interior mutability we can't really catch this statically as that // goes through raw pointers and intermediate temporaries, so miri has @@ -530,7 +963,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { } return; } - self.add(Qualif::NOT_CONST); + unleash_miri!(self); if self.mode != Mode::Fn { let mut err = struct_span_err!(self.tcx.sess, self.span, E0013, @@ -549,67 +982,64 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { } } Place::Projection(ref proj) => { - self.nest(|this| { - this.super_place(place, context, location); - match proj.elem { - ProjectionElem::Deref => { - this.add(Qualif::NOT_CONST); - let base_ty = proj.base.ty(this.mir, this.tcx).to_ty(this.tcx); - match this.mode { - Mode::Fn => {}, - _ => { - if let ty::RawPtr(_) = base_ty.sty { - if !this.tcx.features().const_raw_ptr_deref { - emit_feature_err( - &this.tcx.sess.parse_sess, "const_raw_ptr_deref", - this.span, GateIssue::Language, - &format!( - "dereferencing raw pointers in {}s is unstable", - this.mode, - ), - ); - } + match proj.elem { + ProjectionElem::Deref => { + if context.is_mutating_use() { + // `not_const` errors out in const contexts + self.not_const() + } + let base_ty = proj.base.ty(self.mir, self.tcx).ty; + match self.mode { + Mode::Fn => {}, + _ => { + if let ty::RawPtr(_) = base_ty.sty { + if !self.tcx.features().const_raw_ptr_deref { + emit_feature_err( + &self.tcx.sess.parse_sess, "const_raw_ptr_deref", + self.span, GateIssue::Language, + &format!( + "dereferencing raw pointers in {}s is unstable", + self.mode, + ), + ); } } } } + } - ProjectionElem::Field(..) | - ProjectionElem::Index(_) => { - let base_ty = proj.base.ty(this.mir, this.tcx).to_ty(this.tcx); - if let Some(def) = base_ty.ty_adt_def() { - if def.is_union() { - match this.mode { - Mode::Fn => this.not_const(), - Mode::ConstFn => { - if !this.tcx.features().const_fn_union { - emit_feature_err( - &this.tcx.sess.parse_sess, "const_fn_union", - this.span, GateIssue::Language, - "unions in const fn are unstable", - ); - } - }, - - | Mode::Static - | Mode::StaticMut - | Mode::Const - => {}, - } + ProjectionElem::ConstantIndex {..} | + ProjectionElem::Subslice {..} | + ProjectionElem::Field(..) | + ProjectionElem::Index(_) => { + let base_ty = proj.base.ty(self.mir, self.tcx).ty; + if let Some(def) = base_ty.ty_adt_def() { + if def.is_union() { + match self.mode { + Mode::ConstFn => { + if !self.tcx.features().const_fn_union { + emit_feature_err( + &self.tcx.sess.parse_sess, "const_fn_union", + self.span, GateIssue::Language, + "unions in const fn are unstable", + ); + } + }, + + | Mode::Fn + | Mode::Static + | Mode::StaticMut + | Mode::Const + => {}, } } - - let ty = place.ty(this.mir, this.tcx).to_ty(this.tcx); - this.qualif.restrict(ty, this.tcx, this.param_env); } + } - ProjectionElem::ConstantIndex {..} | - ProjectionElem::Subslice {..} | - ProjectionElem::Downcast(..) => { - this.not_const() - } + ProjectionElem::Downcast(..) => { + self.not_const() } - }); + } } } } @@ -619,44 +1049,27 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { self.super_operand(operand, location); match *operand { - Operand::Copy(_) | - Operand::Move(_) => { + Operand::Move(ref place) => { // Mark the consumed locals to indicate later drops are noops. - if let Operand::Move(Place::Local(local)) = *operand { - self.local_qualif[local] = self.local_qualif[local].map(|q| - q - Qualif::NEEDS_DROP - ); - } - } - Operand::Constant(ref constant) => { - if let ConstValue::Unevaluated(def_id, _) = constant.literal.val { - // Don't peek inside trait associated constants. - if self.tcx.trait_of_item(def_id).is_some() { - self.add_type(constant.literal.ty); - } else { - let (bits, _) = self.tcx.at(constant.span).mir_const_qualif(def_id); - - let qualif = Qualif::from_bits(bits).expect("invalid mir_const_qualif"); - self.add(qualif); - - // Just in case the type is more specific than - // the definition, e.g., impl associated const - // with type parameters, take it into account. - self.qualif.restrict(constant.literal.ty, self.tcx, self.param_env); - } + if let Place::Base(PlaceBase::Local(local)) = *place { + self.cx.per_local[NeedsDrop].remove(local); } } + Operand::Copy(_) | + Operand::Constant(_) => {} } } fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) { debug!("visit_rvalue: rvalue={:?} location={:?}", rvalue, location); - // Recurse through operands and places. + + // Check nested operands and places. if let Rvalue::Ref(region, kind, ref place) = *rvalue { + // Special-case reborrows. let mut is_reborrow = false; if let Place::Projection(ref proj) = *place { if let ProjectionElem::Deref = proj.elem { - let base_ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx); + let base_ty = proj.base.ty(self.mir, self.tcx).ty; if let ty::Ref(..) = base_ty.sty { is_reborrow = true; } @@ -665,14 +1078,18 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { if is_reborrow { let ctx = match kind { - BorrowKind::Shared => - PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow(region)), - BorrowKind::Shallow => - PlaceContext::NonMutatingUse(NonMutatingUseContext::ShallowBorrow(region)), - BorrowKind::Unique => - PlaceContext::NonMutatingUse(NonMutatingUseContext::UniqueBorrow(region)), - BorrowKind::Mut { .. } => - PlaceContext::MutatingUse(MutatingUseContext::Borrow(region)), + BorrowKind::Shared => PlaceContext::NonMutatingUse( + NonMutatingUseContext::SharedBorrow(region), + ), + BorrowKind::Shallow => PlaceContext::NonMutatingUse( + NonMutatingUseContext::ShallowBorrow(region), + ), + BorrowKind::Unique => PlaceContext::NonMutatingUse( + NonMutatingUseContext::UniqueBorrow(region), + ), + BorrowKind::Mut { .. } => PlaceContext::MutatingUse( + MutatingUseContext::Borrow(region), + ), }; self.super_place(place, ctx, location); } else { @@ -691,112 +1108,13 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { Rvalue::CheckedBinaryOp(..) | Rvalue::Cast(CastKind::ReifyFnPointer, ..) | Rvalue::Cast(CastKind::UnsafeFnPointer, ..) | - Rvalue::Cast(CastKind::ClosureFnPointer, ..) | + Rvalue::Cast(CastKind::ClosureFnPointer(_), ..) | Rvalue::Cast(CastKind::Unsize, ..) | + Rvalue::Cast(CastKind::MutToConstPointer, ..) | Rvalue::Discriminant(..) | - Rvalue::Len(_) => {} - - Rvalue::Ref(_, kind, ref place) => { - let ty = place.ty(self.mir, self.tcx).to_ty(self.tcx); - - // Default to forbidding the borrow and/or its promotion, - // due to the potential for direct or interior mutability, - // and only proceed by setting `forbidden_mut` to `false`. - let mut forbidden_mut = true; - - if let BorrowKind::Mut { .. } = kind { - // In theory, any zero-sized value could be borrowed - // mutably without consequences. However, only &mut [] - // is allowed right now, and only in functions. - if self.mode == Mode::StaticMut { - // Inside a `static mut`, &mut [...] is also allowed. - match ty.sty { - ty::Array(..) | ty::Slice(_) => forbidden_mut = false, - _ => {} - } - } else if let ty::Array(_, len) = ty.sty { - // FIXME(eddyb) the `self.mode == Mode::Fn` condition - // seems unnecessary, given that this is merely a ZST. - if len.unwrap_usize(self.tcx) == 0 && self.mode == Mode::Fn { - forbidden_mut = false; - } - } - - if forbidden_mut { - self.add(Qualif::NOT_CONST); - if self.mode != Mode::Fn { - let mut err = struct_span_err!(self.tcx.sess, self.span, E0017, - "references in {}s may only refer \ - to immutable values", self.mode); - err.span_label(self.span, format!("{}s require immutable values", - self.mode)); - if self.tcx.sess.teach(&err.get_code().unwrap()) { - err.note("References in statics and constants may only refer to \ - immutable values.\n\n\ - Statics are shared everywhere, and if they refer to \ - mutable data one might violate memory safety since \ - holding multiple mutable references to shared data is \ - not allowed.\n\n\ - If you really want global mutable state, try using \ - static mut or a global UnsafeCell."); - } - err.emit(); - } - } - } else { - // Constants cannot be borrowed if they contain interior mutability as - // it means that our "silent insertion of statics" could change - // initializer values (very bad). - if self.qualif.contains(Qualif::MUTABLE_INTERIOR) { - // A reference of a MUTABLE_INTERIOR place is instead - // NOT_CONST (see `if forbidden_mut` below), to avoid - // duplicate errors (from reborrowing, for example). - self.qualif = self.qualif - Qualif::MUTABLE_INTERIOR; - if self.mode != Mode::Fn { - span_err!(self.tcx.sess, self.span, E0492, - "cannot borrow a constant which may contain \ - interior mutability, create a static instead"); - } - } else { - // We allow immutable borrows of frozen data. - forbidden_mut = false; - } - } - - debug!("visit_rvalue: forbidden_mut={:?}", forbidden_mut); - if forbidden_mut { - self.add(Qualif::NOT_CONST); - } else { - // We might have a candidate for promotion. - let candidate = Candidate::Ref(location); - // We can only promote interior borrows of promotable temps. - let mut place = place; - while let Place::Projection(ref proj) = *place { - if proj.elem == ProjectionElem::Deref { - break; - } - place = &proj.base; - } - debug!("visit_rvalue: place={:?}", place); - if let Place::Local(local) = *place { - if self.mir.local_kind(local) == LocalKind::Temp { - debug!("visit_rvalue: local={:?}", local); - if let Some(qualif) = self.local_qualif[local] { - // `forbidden_mut` is false, so we can safely ignore - // `MUTABLE_INTERIOR` from the local's qualifications. - // This allows borrowing fields which don't have - // `MUTABLE_INTERIOR`, from a type that does, e.g.: - // `let _: &'static _ = &(Cell::new(1), 2).1;` - debug!("visit_rvalue: qualif={:?}", qualif); - if (qualif - Qualif::MUTABLE_INTERIOR).is_empty() { - debug!("visit_rvalue: candidate={:?}", candidate); - self.promotion_candidates.push(candidate); - } - } - } - } - } - } + Rvalue::Len(_) | + Rvalue::Ref(..) | + Rvalue::Aggregate(..) => {} Rvalue::Cast(CastKind::Misc, ref operand, cast_ty) => { let operand_ty = operand.ty(self.mir, self.tcx); @@ -804,11 +1122,9 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); match (cast_in, cast_out) { (CastTy::Ptr(_), CastTy::Int(_)) | - (CastTy::FnPtr, CastTy::Int(_)) => { - if let Mode::Fn = self.mode { - // in normal functions, mark such casts as not promotable - self.add(Qualif::NOT_CONST); - } else if !self.tcx.features().const_raw_ptr_to_usize_cast { + (CastTy::FnPtr, CastTy::Int(_)) if self.mode != Mode::Fn => { + unleash_miri!(self); + if !self.tcx.features().const_raw_ptr_to_usize_cast { // in const fn and constants require the feature gate // FIXME: make it unsafe inside const fn and constants emit_feature_err( @@ -832,10 +1148,8 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { op == BinOp::Ge || op == BinOp::Gt || op == BinOp::Offset); - if let Mode::Fn = self.mode { - // raw pointer operations are not allowed inside promoteds - self.add(Qualif::NOT_CONST); - } else if !self.tcx.features().const_compare_raw_pointers { + unleash_miri!(self); + if self.mode != Mode::Fn && !self.tcx.features().const_compare_raw_pointers { // require the feature gate inside constants and const fn // FIXME: make it unsafe to use these operations emit_feature_err( @@ -850,7 +1164,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { } Rvalue::NullaryOp(NullOp::Box, _) => { - self.add(Qualif::NOT_CONST); + unleash_miri!(self); if self.mode != Mode::Fn { let mut err = struct_span_err!(self.tcx.sess, self.span, E0010, "allocations are not allowed in {}s", self.mode); @@ -866,20 +1180,6 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { err.emit(); } } - - Rvalue::Aggregate(ref kind, _) => { - if let AggregateKind::Adt(def, ..) = **kind { - if def.has_dtor(self.tcx) { - self.add(Qualif::NEEDS_DROP); - } - - if Some(def.did) == self.tcx.lang_items().unsafe_cell_type() { - let ty = rvalue.ty(self.mir, self.tcx); - self.add_type(ty); - assert!(self.qualif.contains(Qualif::MUTABLE_INTERIOR)); - } - } - } } } @@ -889,13 +1189,17 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { location: Location) { debug!("visit_terminator_kind: bb={:?} kind={:?} location={:?}", bb, kind, location); if let TerminatorKind::Call { ref func, ref args, ref destination, .. } = *kind { - self.visit_operand(func, location); + if let Some((ref dest, _)) = *destination { + self.assign(dest, ValueSource::Call { + callee: func, + args, + return_ty: dest.ty(self.mir, self.tcx).ty, + }, location); + } let fn_ty = func.ty(self.mir, self.tcx); let mut callee_def_id = None; let mut is_shuffle = false; - let mut is_const_fn = false; - let mut is_promotable_const_fn = false; match fn_ty.sty { ty::FnDef(def_id, _) => { callee_def_id = Some(def_id); @@ -904,36 +1208,11 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { Abi::PlatformIntrinsic => { assert!(!self.tcx.is_const_fn(def_id)); match &self.tcx.item_name(def_id).as_str()[..] { - | "size_of" - | "min_align_of" - | "needs_drop" - | "type_id" - | "bswap" - | "bitreverse" - | "ctpop" - | "cttz" - | "cttz_nonzero" - | "ctlz" - | "ctlz_nonzero" - | "overflowing_add" - | "overflowing_sub" - | "overflowing_mul" - | "unchecked_shl" - | "unchecked_shr" - | "rotate_left" - | "rotate_right" - | "add_with_overflow" - | "sub_with_overflow" - | "mul_with_overflow" - // no need to check feature gates, intrinsics are only callable - // from the libstd or with forever unstable feature gates - => is_const_fn = true, // special intrinsic that can be called diretly without an intrinsic // feature gate needs a language feature gate "transmute" => { // never promote transmute calls if self.mode != Mode::Fn { - is_const_fn = true; // const eval transmute calls only with the feature gate if !self.tcx.features().const_transmute { emit_feature_err( @@ -949,33 +1228,30 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { is_shuffle = true; } + // no need to check feature gates, intrinsics are only callable + // from the libstd or with forever unstable feature gates _ => {} } } _ => { - // In normal functions we only care about promotion. - if self.mode == Mode::Fn { - // Never promote const fn calls of - // functions without `#[rustc_promotable]`. - if self.tcx.is_promotable_const_fn(def_id) { - is_const_fn = true; - is_promotable_const_fn = true; - } else if self.tcx.is_const_fn(def_id) { - is_const_fn = true; - } - } else { - // stable const fns or unstable const fns with their feature gate - // active - if self.tcx.is_const_fn(def_id) { - is_const_fn = true; + // In normal functions no calls are feature-gated. + if self.mode != Mode::Fn { + let unleash_miri = self + .tcx + .sess + .opts + .debugging_opts + .unleash_the_miri_inside_of_you; + if self.tcx.is_const_fn(def_id) || unleash_miri { + // stable const fns or unstable const fns + // with their feature gate active + // FIXME(eddyb) move stability checks from `is_const_fn` here. } else if self.is_const_panic_fn(def_id) { // Check the const_panic feature gate. // FIXME: cannot allow this inside `allow_internal_unstable` // because that would make `panic!` insta stable in constants, // since the macro is marked with the attribute. - if self.tcx.features().const_panic { - is_const_fn = true; - } else { + if !self.tcx.features().const_panic { // Don't allow panics in constants without the feature gate. emit_feature_err( &self.tcx.sess.parse_sess, @@ -990,13 +1266,10 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { // Check `#[unstable]` const fns or `#[rustc_const_unstable]` // functions without the feature gate active in this crate in // order to report a better error message than the one below. - if self.span.allows_unstable() { - // `allow_internal_unstable` can make such calls stable. - is_const_fn = true; - } else { + if !self.span.allows_unstable(&feature.as_str()) { let mut err = self.tcx.sess.struct_span_err(self.span, &format!("`{}` is not yet stable as a const fn", - self.tcx.item_path_str(def_id))); + self.tcx.def_path_str(def_id))); if nightly_options::is_nightly_build() { help!(&mut err, "add `#![feature({})]` to the \ @@ -1006,38 +1279,20 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { err.emit(); } } else { - // FIXME(#24111): remove this check when const fn stabilizes. - let (msg, note) = if let UnstableFeatures::Disallow = - self.tcx.sess.opts.unstable_features { - (format!("calls in {}s are limited to \ - tuple structs and tuple variants", - self.mode), - Some("a limited form of compile-time function \ - evaluation is available on a nightly \ - compiler via `const fn`")) - } else { - (format!("calls in {}s are limited \ - to constant functions, \ - tuple structs and tuple variants", - self.mode), - None) - }; let mut err = struct_span_err!( self.tcx.sess, self.span, E0015, - "{}", - msg, + "calls in {}s are limited to constant functions, \ + tuple structs and tuple variants", + self.mode, ); - if let Some(note) = note { - err.span_note(self.span, note); - } err.emit(); } } } } - }, + } ty::FnPtr(_) => { if self.mode != Mode::Fn { let mut err = self.tcx.sess.struct_span_err( @@ -1045,42 +1300,22 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { &format!("function pointers are not allowed in const fn")); err.emit(); } - }, + } _ => { self.not_const(); - return } } - - let constant_arguments = callee_def_id.and_then(|id| { - args_required_const(self.tcx, id) - }); - for (i, arg) in args.iter().enumerate() { - self.nest(|this| { - this.visit_operand(arg, location); - if this.mode != Mode::Fn { - return - } - let candidate = Candidate::Argument { bb, index: i }; - if is_shuffle && i == 2 { - if this.qualif.is_empty() { - debug!("visit_terminator_kind: candidate={:?}", candidate); - this.promotion_candidates.push(candidate); - } else { - span_err!(this.tcx.sess, this.span, E0526, - "shuffle indices are not constant"); - } - return + if self.mode == Mode::Fn { + let constant_args = callee_def_id.and_then(|id| { + args_required_const(self.tcx, id) + }).unwrap_or_default(); + for (i, arg) in args.iter().enumerate() { + if !(is_shuffle && i == 2 || constant_args.contains(&i)) { + continue; } - let constant_arguments = match constant_arguments.as_ref() { - Some(s) => s, - None => return, - }; - if !constant_arguments.contains(&i) { - return - } + let candidate = Candidate::Argument { bb, index: i }; // Since the argument is required to be constant, // we care about constness, not promotability. // If we checked for promotability, we'd miss out on @@ -1091,54 +1326,37 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { // which happens even without the user requesting it. // We can error out with a hard error if the argument is not // constant here. - if (this.qualif - Qualif::NOT_PROMOTABLE).is_empty() { + if !IsNotConst::in_operand(self, arg) { debug!("visit_terminator_kind: candidate={:?}", candidate); - this.promotion_candidates.push(candidate); + self.promotion_candidates.push(candidate); } else { - this.tcx.sess.span_err(this.span, - &format!("argument {} is required to be a constant", - i + 1)); + if is_shuffle { + span_err!(self.tcx.sess, self.span, E0526, + "shuffle indices are not constant"); + } else { + self.tcx.sess.span_err(self.span, + &format!("argument {} is required to be a constant", + i + 1)); + } } - }); - } - - // non-const fn calls - if !is_const_fn { - self.qualif = Qualif::NOT_CONST; - if self.mode != Mode::Fn { - self.tcx.sess.delay_span_bug( - self.span, - "should have reported an error about non-const fn calls in constants", - ) } } - if let Some((ref dest, _)) = *destination { - // Avoid propagating irrelevant callee/argument qualifications. - if self.qualif.intersects(Qualif::CONST_ERROR) { - self.qualif = Qualif::NOT_CONST; - } else { - // Be conservative about the returned value of a const fn. - let tcx = self.tcx; - let ty = dest.ty(self.mir, tcx).to_ty(tcx); - if is_const_fn && !is_promotable_const_fn && self.mode == Mode::Fn { - self.qualif = Qualif::NOT_PROMOTABLE; - } else { - self.qualif = Qualif::empty(); - } - self.add_type(ty); - } - self.assign(dest, location); + // Check callee and argument operands. + self.visit_operand(func, location); + for arg in args { + self.visit_operand(arg, location); } } else if let TerminatorKind::Drop { location: ref place, .. } = *kind { self.super_terminator_kind(bb, kind, location); // Deny *any* live drops anywhere other than functions. if self.mode != Mode::Fn { + unleash_miri!(self); // HACK(eddyb): emulate a bit of dataflow analysis, // conservatively, that drop elaboration will do. - let needs_drop = if let Place::Local(local) = *place { - if self.local_qualif[local].map_or(true, |q| q.contains(Qualif::NEEDS_DROP)) { + let needs_drop = if let Place::Base(PlaceBase::Local(local)) = *place { + if NeedsDrop::in_local(self, local) { Some(self.mir.local_decls[local].source_info.span) } else { None @@ -1149,7 +1367,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { if let Some(span) = needs_drop { // Double-check the type being dropped, to minimize false positives. - let ty = place.ty(self.mir, self.tcx).to_ty(self.tcx); + let ty = place.ty(self.mir, self.tcx).ty; if ty.needs_drop(self.tcx, self.param_env) { struct_span_err!(self.tcx.sess, span, E0493, "destructors cannot be evaluated at compile-time") @@ -1171,51 +1389,9 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { rvalue: &Rvalue<'tcx>, location: Location) { debug!("visit_assign: dest={:?} rvalue={:?} location={:?}", dest, rvalue, location); - self.visit_rvalue(rvalue, location); - - // Check the allowed const fn argument forms. - if let (Mode::ConstFn, &Place::Local(index)) = (self.mode, dest) { - if self.mir.local_kind(index) == LocalKind::Var && - self.const_fn_arg_vars.insert(index) && - !self.tcx.features().const_let { - - // Direct use of an argument is permitted. - match *rvalue { - Rvalue::Use(Operand::Copy(Place::Local(local))) | - Rvalue::Use(Operand::Move(Place::Local(local))) => { - if self.mir.local_kind(local) == LocalKind::Arg { - return; - } - } - _ => {} - } - - // Avoid a generic error for other uses of arguments. - if self.qualif.contains(Qualif::FN_ARGUMENT) { - let decl = &self.mir.local_decls[index]; - let mut err = feature_err( - &self.tcx.sess.parse_sess, - "const_let", - decl.source_info.span, - GateIssue::Language, - "arguments of constant functions can only be immutable by-value bindings" - ); - if self.tcx.sess.teach(&err.get_code().unwrap()) { - err.note("Constant functions are not allowed to mutate anything. Thus, \ - binding to an argument with a mutable pattern is not allowed."); - err.note("Remove any mutable bindings from the argument list to fix this \ - error. In case you need to mutate the argument, try lazily \ - initializing a global variable instead of using a const fn, or \ - refactoring the code to a functional style to avoid mutation if \ - possible."); - } - err.emit(); - return; - } - } - } + self.assign(dest, ValueSource::Rvalue(rvalue), location); - self.assign(dest, location); + self.visit_rvalue(rvalue, location); } fn visit_source_info(&mut self, source_info: &SourceInfo) { @@ -1225,23 +1401,20 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { fn visit_statement(&mut self, bb: BasicBlock, statement: &Statement<'tcx>, location: Location) { debug!("visit_statement: bb={:?} statement={:?} location={:?}", bb, statement, location); - self.nest(|this| { - this.visit_source_info(&statement.source_info); - match statement.kind { - StatementKind::Assign(ref place, ref rvalue) => { - this.visit_assign(bb, place, rvalue, location); - } - StatementKind::FakeRead(..) | - StatementKind::SetDiscriminant { .. } | - StatementKind::StorageLive(_) | - StatementKind::StorageDead(_) | - StatementKind::InlineAsm {..} | - StatementKind::Retag { .. } | - StatementKind::EscapeToRaw { .. } | - StatementKind::AscribeUserType(..) | - StatementKind::Nop => {} + match statement.kind { + StatementKind::Assign(..) => { + self.super_statement(bb, statement, location); } - }); + // FIXME(eddyb) should these really do nothing? + StatementKind::FakeRead(..) | + StatementKind::SetDiscriminant { .. } | + StatementKind::StorageLive(_) | + StatementKind::StorageDead(_) | + StatementKind::InlineAsm {..} | + StatementKind::Retag { .. } | + StatementKind::AscribeUserType(..) | + StatementKind::Nop => {} + } } fn visit_terminator(&mut self, @@ -1249,11 +1422,11 @@ impl<'a, 'tcx> Visitor<'tcx> for Qualifier<'a, 'tcx, 'tcx> { terminator: &Terminator<'tcx>, location: Location) { debug!("visit_terminator: bb={:?} terminator={:?} location={:?}", bb, terminator, location); - self.nest(|this| this.super_terminator(bb, terminator, location)); + self.super_terminator(bb, terminator, location); } } -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { *providers = Providers { mir_const_qualif, ..*providers @@ -1271,12 +1444,10 @@ fn mir_const_qualif<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if mir.return_ty().references_error() { tcx.sess.delay_span_bug(mir.span, "mir_const_qualif: Mir had errors"); - return (Qualif::NOT_CONST.bits(), Lrc::new(BitSet::new_empty(0))); + return (1 << IsNotConst::IDX, Lrc::new(BitSet::new_empty(0))); } - let mut qualifier = Qualifier::new(tcx, def_id, mir, Mode::Const); - let (qualif, promoted_temps) = qualifier.qualify_const(); - (qualif.bits(), promoted_temps) + Checker::new(tcx, def_id, mir, Mode::Const).check_const() } pub struct QualifyAndPromoteConstants; @@ -1284,7 +1455,7 @@ pub struct QualifyAndPromoteConstants; impl MirPass for QualifyAndPromoteConstants { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - src: MirSource, + src: MirSource<'tcx>, mir: &mut Mir<'tcx>) { // There's not really any point in promoting errorful MIR. if mir.return_ty().references_error() { @@ -1296,10 +1467,11 @@ impl MirPass for QualifyAndPromoteConstants { return; } - let def_id = src.def_id; - let id = tcx.hir().as_local_node_id(def_id).unwrap(); + let def_id = src.def_id(); + let id = tcx.hir().as_local_hir_id(def_id).unwrap(); let mut const_promoted_temps = None; - let mode = match tcx.hir().body_owner_kind(id) { + let mode = match tcx.hir().body_owner_kind_by_hir_id(id) { + hir::BodyOwnerKind::Closure => Mode::Fn, hir::BodyOwnerKind::Fn => { if tcx.is_const_fn(def_id) { Mode::ConstFn @@ -1317,42 +1489,85 @@ impl MirPass for QualifyAndPromoteConstants { debug!("run_pass: mode={:?}", mode); if mode == Mode::Fn || mode == Mode::ConstFn { - // This is ugly because Qualifier holds onto mir, + // This is ugly because Checker holds onto mir, // which can't be mutated until its scope ends. let (temps, candidates) = { - let mut qualifier = Qualifier::new(tcx, def_id, mir, mode); + let mut checker = Checker::new(tcx, def_id, mir, mode); if mode == Mode::ConstFn { - if tcx.is_min_const_fn(def_id) { + if tcx.sess.opts.debugging_opts.unleash_the_miri_inside_of_you { + checker.check_const(); + } else if tcx.is_min_const_fn(def_id) { // enforce `min_const_fn` for stable const fns use super::qualify_min_const_fn::is_min_const_fn; if let Err((span, err)) = is_min_const_fn(tcx, def_id, mir) { - tcx.sess.span_err(span, &err); + let mut diag = struct_span_err!( + tcx.sess, + span, + E0723, + "{} (see issue #57563)", + err, + ); + diag.help( + "add #![feature(const_fn)] to the crate attributes to enable", + ); + diag.emit(); } else { // this should not produce any errors, but better safe than sorry // FIXME(#53819) - qualifier.qualify_const(); + checker.check_const(); } } else { // Enforce a constant-like CFG for `const fn`. - qualifier.qualify_const(); + checker.check_const(); } } else { - while let Some((bb, data)) = qualifier.rpo.next() { - qualifier.visit_basic_block_data(bb, data); + while let Some((bb, data)) = checker.rpo.next() { + checker.visit_basic_block_data(bb, data); } } - (qualifier.temp_promotion_state, qualifier.promotion_candidates) + (checker.temp_promotion_state, checker.promotion_candidates) }; // Do the actual promotion, now that we know what's viable. promote_consts::promote_candidates(mir, tcx, temps, candidates); } else { + if !mir.control_flow_destroyed.is_empty() { + let mut locals = mir.vars_iter(); + if let Some(local) = locals.next() { + let span = mir.local_decls[local].source_info.span; + let mut error = tcx.sess.struct_span_err( + span, + &format!( + "new features like let bindings are not permitted in {}s \ + which also use short circuiting operators", + mode, + ), + ); + for (span, kind) in mir.control_flow_destroyed.iter() { + error.span_note( + *span, + &format!("use of {} here does not actually short circuit due to \ + the const evaluator presently not being able to do control flow. \ + See https://github.com/rust-lang/rust/issues/49146 for more \ + information.", kind), + ); + } + for local in locals { + let span = mir.local_decls[local].source_info.span; + error.span_note( + span, + "more locals defined here", + ); + } + error.emit(); + } + } let promoted_temps = if mode == Mode::Const { // Already computed by `mir_const_qualif`. const_promoted_temps.unwrap() } else { - Qualifier::new(tcx, def_id, mir, mode).qualify_const().1 + Checker::new(tcx, def_id, mir, mode).check_const().1 }; // In `const` and `static` everything without `StorageDead` @@ -1370,7 +1585,11 @@ impl MirPass for QualifyAndPromoteConstants { }); let terminator = block.terminator_mut(); match terminator.kind { - TerminatorKind::Drop { location: Place::Local(index), target, .. } => { + TerminatorKind::Drop { + location: Place::Base(PlaceBase::Local(index)), + target, + .. + } => { if promoted_temps.contains(index) { terminator.kind = TerminatorKind::Goto { target, @@ -1408,7 +1627,7 @@ impl MirPass for QualifyAndPromoteConstants { } } -fn args_required_const(tcx: TyCtxt, def_id: DefId) -> Option> { +fn args_required_const(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) -> Option> { let attrs = tcx.get_attrs(def_id); let attr = attrs.iter().find(|a| a.check_name("rustc_args_required_const"))?; let mut ret = FxHashSet::default(); diff --git a/src/librustc_mir/transform/qualify_min_const_fn.rs b/src/librustc_mir/transform/qualify_min_const_fn.rs index 3c1b9dbd91fa8..87459571b529c 100644 --- a/src/librustc_mir/transform/qualify_min_const_fn.rs +++ b/src/librustc_mir/transform/qualify_min_const_fn.rs @@ -21,6 +21,7 @@ pub fn is_min_const_fn( | Predicate::RegionOutlives(_) | Predicate::TypeOutlives(_) | Predicate::WellFormed(_) + | Predicate::Projection(_) | Predicate::ConstEvaluatable(..) => continue, | Predicate::ObjectSafe(_) => { bug!("object safe predicate on function: {:#?}", predicate) @@ -29,13 +30,6 @@ pub fn is_min_const_fn( bug!("closure kind predicate on function: {:#?}", predicate) } Predicate::Subtype(_) => bug!("subtype predicate on function: {:#?}", predicate), - Predicate::Projection(_) => { - let span = tcx.def_span(current); - // we'll hit a `Predicate::Trait` later which will report an error - tcx.sess - .delay_span_bug(span, "projection without trait bound"); - continue; - } Predicate::Trait(pred) => { if Some(pred.def_id()) == tcx.lang_items().sized_trait() { continue; @@ -65,12 +59,6 @@ pub fn is_min_const_fn( } } - for local in mir.vars_iter() { - return Err(( - mir.local_decls[local].source_info.span, - "local variables in const fn are unstable".into(), - )); - } for local in &mir.local_decls { check_ty(tcx, local.ty, local.source_info.span)?; } @@ -147,7 +135,7 @@ fn check_rvalue( check_operand(tcx, mir, operand, span) } Rvalue::Len(place) | Rvalue::Discriminant(place) | Rvalue::Ref(_, _, place) => { - check_place(tcx, mir, place, span, PlaceMode::Read) + check_place(tcx, mir, place, span) } Rvalue::Cast(CastKind::Misc, operand, cast_ty) => { use rustc::ty::cast::CastTy; @@ -164,8 +152,11 @@ fn check_rvalue( _ => check_operand(tcx, mir, operand, span), } } + Rvalue::Cast(CastKind::MutToConstPointer, operand, _) => { + check_operand(tcx, mir, operand, span) + } Rvalue::Cast(CastKind::UnsafeFnPointer, _, _) | - Rvalue::Cast(CastKind::ClosureFnPointer, _, _) | + Rvalue::Cast(CastKind::ClosureFnPointer(_), _, _) | Rvalue::Cast(CastKind::ReifyFnPointer, _, _) => Err(( span, "function pointer casts are not allowed in const fn".into(), @@ -213,11 +204,6 @@ fn check_rvalue( } } -enum PlaceMode { - Assign, - Read, -} - fn check_statement( tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &'a Mir<'tcx>, @@ -226,11 +212,11 @@ fn check_statement( let span = statement.source_info.span; match &statement.kind { StatementKind::Assign(place, rval) => { - check_place(tcx, mir, place, span, PlaceMode::Assign)?; + check_place(tcx, mir, place, span)?; check_rvalue(tcx, mir, rval, span) } - StatementKind::FakeRead(..) => Err((span, "match in const fn is unstable".into())), + StatementKind::FakeRead(_, place) => check_place(tcx, mir, place, span), // just an assignment StatementKind::SetDiscriminant { .. } => Ok(()), @@ -243,7 +229,6 @@ fn check_statement( | StatementKind::StorageLive(_) | StatementKind::StorageDead(_) | StatementKind::Retag { .. } - | StatementKind::EscapeToRaw { .. } | StatementKind::AscribeUserType(..) | StatementKind::Nop => Ok(()), } @@ -257,7 +242,7 @@ fn check_operand( ) -> McfResult { match operand { Operand::Move(place) | Operand::Copy(place) => { - check_place(tcx, mir, place, span, PlaceMode::Read) + check_place(tcx, mir, place, span) } Operand::Constant(_) => Ok(()), } @@ -268,29 +253,18 @@ fn check_place( mir: &'a Mir<'tcx>, place: &Place<'tcx>, span: Span, - mode: PlaceMode, ) -> McfResult { match place { - Place::Local(l) => match mode { - PlaceMode::Assign => match mir.local_kind(*l) { - LocalKind::Temp | LocalKind::ReturnPointer => Ok(()), - LocalKind::Arg | LocalKind::Var => { - Err((span, "assignments in const fn are unstable".into())) - } - }, - PlaceMode::Read => Ok(()), - }, + Place::Base(PlaceBase::Local(_)) => Ok(()), // promoteds are always fine, they are essentially constants - Place::Promoted(_) => Ok(()), - Place::Static(_) => Err((span, "cannot access `static` items in const fn".into())), + Place::Base(PlaceBase::Static(box Static { kind: StaticKind::Promoted(_), .. })) => Ok(()), + Place::Base(PlaceBase::Static(box Static { kind: StaticKind::Static(_), .. })) => + Err((span, "cannot access `static` items in const fn".into())), Place::Projection(proj) => { match proj.elem { + | ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. } | ProjectionElem::Deref | ProjectionElem::Field(..) | ProjectionElem::Index(_) => { - check_place(tcx, mir, &proj.base, span, mode) - } - // slice patterns are unstable - | ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. } => { - return Err((span, "slice patterns in const fn are unstable".into())) + check_place(tcx, mir, &proj.base, span) } | ProjectionElem::Downcast(..) => { Err((span, "`match` or `if let` in `const fn` is unstable".into())) @@ -312,10 +286,10 @@ fn check_terminator( | TerminatorKind::Resume => Ok(()), TerminatorKind::Drop { location, .. } => { - check_place(tcx, mir, location, span, PlaceMode::Read) + check_place(tcx, mir, location, span) } TerminatorKind::DropAndReplace { location, value, .. } => { - check_place(tcx, mir, location, span, PlaceMode::Read)?; + check_place(tcx, mir, location, span)?; check_operand(tcx, mir, value, span) }, @@ -343,15 +317,11 @@ fn check_terminator( // some intrinsics are waved through if called inside the // standard library. Users never need to call them directly match tcx.fn_sig(def_id).abi() { - abi::Abi::RustIntrinsic => match &tcx.item_name(def_id).as_str()[..] { - | "size_of" - | "min_align_of" - | "needs_drop" - => {}, - _ => return Err(( + abi::Abi::RustIntrinsic => if !is_intrinsic_whitelisted(tcx, def_id) { + return Err(( span, "can only call a curated list of intrinsics in `min_const_fn`".into(), - )), + )) }, abi::Abi::Rust if tcx.is_min_const_fn(def_id) => {}, abi::Abi::Rust => return Err(( @@ -391,3 +361,35 @@ fn check_terminator( }, } } + +/// Returns `true` if the `def_id` refers to an intrisic which we've whitelisted +/// for being called from stable `const fn`s (`min_const_fn`). +/// +/// Adding more intrinsics requires sign-off from @rust-lang/lang. +fn is_intrinsic_whitelisted(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> bool { + match &tcx.item_name(def_id).as_str()[..] { + | "size_of" + | "min_align_of" + | "needs_drop" + // Arithmetic: + | "add_with_overflow" // ~> .overflowing_add + | "sub_with_overflow" // ~> .overflowing_sub + | "mul_with_overflow" // ~> .overflowing_mul + | "overflowing_add" // ~> .wrapping_add + | "overflowing_sub" // ~> .wrapping_sub + | "overflowing_mul" // ~> .wrapping_mul + | "saturating_add" // ~> .saturating_add + | "saturating_sub" // ~> .saturating_sub + | "unchecked_shl" // ~> .wrapping_shl + | "unchecked_shr" // ~> .wrapping_shr + | "rotate_left" // ~> .rotate_left + | "rotate_right" // ~> .rotate_right + | "ctpop" // ~> .count_ones + | "ctlz" // ~> .leading_zeros + | "cttz" // ~> .trailing_zeros + | "bswap" // ~> .swap_bytes + | "bitreverse" // ~> .reverse_bits + => true, + _ => false, + } +} diff --git a/src/librustc_mir/transform/remove_noop_landing_pads.rs b/src/librustc_mir/transform/remove_noop_landing_pads.rs index 81b010e7dcec9..b7493b25d4650 100644 --- a/src/librustc_mir/transform/remove_noop_landing_pads.rs +++ b/src/librustc_mir/transform/remove_noop_landing_pads.rs @@ -1,20 +1,10 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::ty::TyCtxt; use rustc::mir::*; use rustc_data_structures::bit_set::BitSet; -use transform::{MirPass, MirSource}; -use util::patch::MirPatch; +use crate::transform::{MirPass, MirSource}; +use crate::util::patch::MirPatch; -/// A pass that removes no-op landing pads and replaces jumps to them with +/// A pass that removes noop landing pads and replaces jumps to them with /// `None`. This is important because otherwise LLVM generates terrible /// code for these. pub struct RemoveNoopLandingPads; @@ -34,7 +24,7 @@ pub fn remove_noop_landing_pads<'a, 'tcx>( impl MirPass for RemoveNoopLandingPads { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - _src: MirSource, + _src: MirSource<'tcx>, mir: &mut Mir<'tcx>) { remove_noop_landing_pads(tcx, mir); } @@ -44,7 +34,7 @@ impl RemoveNoopLandingPads { fn is_nop_landing_pad( &self, bb: BasicBlock, - mir: &Mir, + mir: &Mir<'_>, nop_landing_pads: &BitSet, ) -> bool { for stmt in &mir[bb].statements { @@ -57,7 +47,7 @@ impl RemoveNoopLandingPads { // These are all nops in a landing pad } - StatementKind::Assign(Place::Local(_), box Rvalue::Use(_)) => { + StatementKind::Assign(Place::Base(PlaceBase::Local(_)), box Rvalue::Use(_)) => { // Writing to a local (e.g., a drop flag) does not // turn a landing pad to a non-nop } @@ -65,8 +55,7 @@ impl RemoveNoopLandingPads { StatementKind::Assign { .. } | StatementKind::SetDiscriminant { .. } | StatementKind::InlineAsm { .. } | - StatementKind::Retag { .. } | - StatementKind::EscapeToRaw { .. } => { + StatementKind::Retag { .. } => { return false; } } @@ -97,7 +86,7 @@ impl RemoveNoopLandingPads { } } - fn remove_nop_landing_pads(&self, mir: &mut Mir) { + fn remove_nop_landing_pads(&self, mir: &mut Mir<'_>) { // make sure there's a single resume block let resume_block = { let patch = MirPatch::new(mir); diff --git a/src/librustc_mir/transform/rustc_peek.rs b/src/librustc_mir/transform/rustc_peek.rs index c996dc285f7e7..246f876235d71 100644 --- a/src/librustc_mir/transform/rustc_peek.rs +++ b/src/librustc_mir/transform/rustc_peek.rs @@ -1,45 +1,37 @@ -// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc_target::spec::abi::{Abi}; use syntax::ast; use syntax_pos::Span; use rustc::ty::{self, TyCtxt}; +use rustc::hir::def_id::DefId; use rustc::mir::{self, Mir, Location}; use rustc_data_structures::bit_set::BitSet; -use transform::{MirPass, MirSource}; +use crate::transform::{MirPass, MirSource}; -use dataflow::{do_dataflow, DebugFormatted}; -use dataflow::MoveDataParamEnv; -use dataflow::BitDenotation; -use dataflow::DataflowResults; -use dataflow::{DefinitelyInitializedPlaces, MaybeInitializedPlaces, MaybeUninitializedPlaces}; -use dataflow::move_paths::{MovePathIndex, LookupResult}; -use dataflow::move_paths::{HasMoveData, MoveData}; -use dataflow; +use crate::dataflow::{do_dataflow, DebugFormatted}; +use crate::dataflow::MoveDataParamEnv; +use crate::dataflow::BitDenotation; +use crate::dataflow::DataflowResults; +use crate::dataflow::{ + DefinitelyInitializedPlaces, MaybeInitializedPlaces, MaybeUninitializedPlaces +}; +use crate::dataflow::move_paths::{MovePathIndex, LookupResult}; +use crate::dataflow::move_paths::{HasMoveData, MoveData}; +use crate::dataflow; -use dataflow::has_rustc_mir_with; +use crate::dataflow::has_rustc_mir_with; pub struct SanityCheck; impl MirPass for SanityCheck { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - src: MirSource, mir: &mut Mir<'tcx>) { - let def_id = src.def_id; - let id = tcx.hir().as_local_node_id(def_id).unwrap(); + src: MirSource<'tcx>, mir: &mut Mir<'tcx>) { + let def_id = src.def_id(); if !tcx.has_attr(def_id, "rustc_mir") { - debug!("skipping rustc_peek::SanityCheck on {}", tcx.item_path_str(def_id)); + debug!("skipping rustc_peek::SanityCheck on {}", tcx.def_path_str(def_id)); return; } else { - debug!("running rustc_peek::SanityCheck on {}", tcx.item_path_str(def_id)); + debug!("running rustc_peek::SanityCheck on {}", tcx.def_path_str(def_id)); } let attributes = tcx.get_attrs(def_id); @@ -48,26 +40,26 @@ impl MirPass for SanityCheck { let mdpe = MoveDataParamEnv { move_data: move_data, param_env: param_env }; let dead_unwinds = BitSet::new_empty(mir.basic_blocks().len()); let flow_inits = - do_dataflow(tcx, mir, id, &attributes, &dead_unwinds, + do_dataflow(tcx, mir, def_id, &attributes, &dead_unwinds, MaybeInitializedPlaces::new(tcx, mir, &mdpe), |bd, i| DebugFormatted::new(&bd.move_data().move_paths[i])); let flow_uninits = - do_dataflow(tcx, mir, id, &attributes, &dead_unwinds, + do_dataflow(tcx, mir, def_id, &attributes, &dead_unwinds, MaybeUninitializedPlaces::new(tcx, mir, &mdpe), |bd, i| DebugFormatted::new(&bd.move_data().move_paths[i])); let flow_def_inits = - do_dataflow(tcx, mir, id, &attributes, &dead_unwinds, + do_dataflow(tcx, mir, def_id, &attributes, &dead_unwinds, DefinitelyInitializedPlaces::new(tcx, mir, &mdpe), |bd, i| DebugFormatted::new(&bd.move_data().move_paths[i])); if has_rustc_mir_with(&attributes, "rustc_peek_maybe_init").is_some() { - sanity_check_via_rustc_peek(tcx, mir, id, &attributes, &flow_inits); + sanity_check_via_rustc_peek(tcx, mir, def_id, &attributes, &flow_inits); } if has_rustc_mir_with(&attributes, "rustc_peek_maybe_uninit").is_some() { - sanity_check_via_rustc_peek(tcx, mir, id, &attributes, &flow_uninits); + sanity_check_via_rustc_peek(tcx, mir, def_id, &attributes, &flow_uninits); } if has_rustc_mir_with(&attributes, "rustc_peek_definite_init").is_some() { - sanity_check_via_rustc_peek(tcx, mir, id, &attributes, &flow_def_inits); + sanity_check_via_rustc_peek(tcx, mir, def_id, &attributes, &flow_def_inits); } if has_rustc_mir_with(&attributes, "stop_after_dataflow").is_some() { tcx.sess.fatal("stop_after_dataflow ended compilation"); @@ -93,12 +85,12 @@ impl MirPass for SanityCheck { /// errors are not intended to be used for unit tests.) pub fn sanity_check_via_rustc_peek<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &Mir<'tcx>, - id: ast::NodeId, + def_id: DefId, _attributes: &[ast::Attribute], - results: &DataflowResults) - where O: BitDenotation + HasMoveData<'tcx> + results: &DataflowResults<'tcx, O>) + where O: BitDenotation<'tcx, Idx=MovePathIndex> + HasMoveData<'tcx> { - debug!("sanity_check_via_rustc_peek id: {:?}", id); + debug!("sanity_check_via_rustc_peek def_id: {:?}", def_id); // FIXME: this is not DRY. Figure out way to abstract this and // `dataflow::build_sets`. (But note it is doing non-standard // stuff, so such generalization may not be realistic.) @@ -110,9 +102,9 @@ pub fn sanity_check_via_rustc_peek<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir: &Mir<'tcx>, - results: &DataflowResults, + results: &DataflowResults<'tcx, O>, bb: mir::BasicBlock) where - O: BitDenotation + HasMoveData<'tcx> + O: BitDenotation<'tcx, Idx=MovePathIndex> + HasMoveData<'tcx> { let move_data = results.0.operator.move_data(); let mir::BasicBlockData { ref statements, ref terminator, is_cleanup: _ } = mir[bb]; @@ -123,8 +115,8 @@ fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }; assert!(args.len() == 1); let peek_arg_place = match args[0] { - mir::Operand::Copy(ref place @ mir::Place::Local(_)) | - mir::Operand::Move(ref place @ mir::Place::Local(_)) => Some(place), + mir::Operand::Copy(ref place @ mir::Place::Base(mir::PlaceBase::Local(_))) | + mir::Operand::Move(ref place @ mir::Place::Base(mir::PlaceBase::Local(_))) => Some(place), _ => None, }; @@ -162,7 +154,6 @@ fn each_block<'a, 'tcx, O>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir::StatementKind::StorageDead(_) | mir::StatementKind::InlineAsm { .. } | mir::StatementKind::Retag { .. } | - mir::StatementKind::EscapeToRaw { .. } | mir::StatementKind::AscribeUserType(..) | mir::StatementKind::Nop => continue, mir::StatementKind::SetDiscriminant{ .. } => diff --git a/src/librustc_mir/transform/simplify.rs b/src/librustc_mir/transform/simplify.rs index 592f721b2f545..14e7895af0419 100644 --- a/src/librustc_mir/transform/simplify.rs +++ b/src/librustc_mir/transform/simplify.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A number of passes which remove various redundancies in the CFG. //! //! The `SimplifyCfg` pass gets rid of unnecessary blocks in the CFG, whereas the `SimplifyLocals` @@ -44,7 +34,7 @@ use rustc::mir::*; use rustc::mir::visit::{MutVisitor, Visitor, PlaceContext}; use rustc::session::config::DebugInfo; use std::borrow::Cow; -use transform::{MirPass, MirSource}; +use crate::transform::{MirPass, MirSource}; pub struct SimplifyCfg { label: String } @@ -54,7 +44,7 @@ impl SimplifyCfg { } } -pub fn simplify_cfg(mir: &mut Mir) { +pub fn simplify_cfg(mir: &mut Mir<'_>) { CfgSimplifier::new(mir).simplify(); remove_dead_blocks(mir); @@ -69,7 +59,7 @@ impl MirPass for SimplifyCfg { fn run_pass<'a, 'tcx>(&self, _tcx: TyCtxt<'a, 'tcx, 'tcx>, - _src: MirSource, + _src: MirSource<'tcx>, mir: &mut Mir<'tcx>) { debug!("SimplifyCfg({:?}) - simplifying {:?}", self.label, mir); simplify_cfg(mir); @@ -273,7 +263,7 @@ impl<'a, 'tcx: 'a> CfgSimplifier<'a, 'tcx> { } } -pub fn remove_dead_blocks(mir: &mut Mir) { +pub fn remove_dead_blocks(mir: &mut Mir<'_>) { let mut seen = BitSet::new_empty(mir.basic_blocks().len()); for (bb, _) in traversal::preorder(mir) { seen.insert(bb.index()); @@ -308,7 +298,7 @@ pub struct SimplifyLocals; impl MirPass for SimplifyLocals { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - _: MirSource, + _: MirSource<'tcx>, mir: &mut Mir<'tcx>) { let mut marker = DeclMarker { locals: BitSet::new_empty(mir.local_decls.len()) }; marker.visit_mir(mir); diff --git a/src/librustc_mir/transform/simplify_branches.rs b/src/librustc_mir/transform/simplify_branches.rs index b24898095435b..db73e829c53a7 100644 --- a/src/librustc_mir/transform/simplify_branches.rs +++ b/src/librustc_mir/transform/simplify_branches.rs @@ -1,18 +1,8 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! A pass that simplifies branches when their condition is known. use rustc::ty::{TyCtxt, ParamEnv}; use rustc::mir::*; -use transform::{MirPass, MirSource}; +use crate::transform::{MirPass, MirSource}; use std::borrow::Cow; @@ -31,7 +21,7 @@ impl MirPass for SimplifyBranches { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - _src: MirSource, + _src: MirSource<'tcx>, mir: &mut Mir<'tcx>) { for block in mir.basic_blocks_mut() { let terminator = block.terminator_mut(); @@ -40,11 +30,12 @@ impl MirPass for SimplifyBranches { discr: Operand::Constant(ref c), switch_ty, ref values, ref targets, .. } => { let switch_ty = ParamEnv::empty().and(switch_ty); - if let Some(constint) = c.literal.assert_bits(tcx, switch_ty) { + let constant = c.literal.assert_bits(tcx, switch_ty); + if let Some(constant) = constant { let (otherwise, targets) = targets.split_last().unwrap(); let mut ret = TerminatorKind::Goto { target: *otherwise }; for (&v, t) in values.iter().zip(targets.iter()) { - if v == constint { + if v == constant { ret = TerminatorKind::Goto { target: *t }; break; } @@ -56,9 +47,8 @@ impl MirPass for SimplifyBranches { }, TerminatorKind::Assert { target, cond: Operand::Constant(ref c), expected, .. - } if (c.literal.assert_bool(tcx) == Some(true)) == expected => { - TerminatorKind::Goto { target } - }, + } if (c.literal.assert_bool(tcx) == Some(true)) == expected => + TerminatorKind::Goto { target }, TerminatorKind::FalseEdges { real_target, .. } => { TerminatorKind::Goto { target: real_target } }, diff --git a/src/librustc_mir/transform/uniform_array_move_out.rs b/src/librustc_mir/transform/uniform_array_move_out.rs index 949b8f74f71e9..616944dd7ef99 100644 --- a/src/librustc_mir/transform/uniform_array_move_out.rs +++ b/src/librustc_mir/transform/uniform_array_move_out.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // This pass converts move out from array by Subslice and // ConstIndex{.., from_end: true} to ConstIndex move out(s) from begin // of array. It allows detect error by mir borrowck and elaborate @@ -40,16 +30,16 @@ use rustc::ty; use rustc::ty::TyCtxt; use rustc::mir::*; use rustc::mir::visit::{Visitor, PlaceContext, NonUseContext}; -use transform::{MirPass, MirSource}; -use util::patch::MirPatch; use rustc_data_structures::indexed_vec::{IndexVec}; +use crate::transform::{MirPass, MirSource}; +use crate::util::patch::MirPatch; pub struct UniformArrayMoveOut; impl MirPass for UniformArrayMoveOut { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - _src: MirSource, + _src: MirSource<'tcx>, mir: &mut Mir<'tcx>) { let mut patch = MirPatch::new(mir); { @@ -79,7 +69,7 @@ impl<'a, 'tcx> Visitor<'tcx> for UniformArrayMoveOutVisitor<'a, 'tcx> { from_end: false} = proj.elem { // no need to transformation } else { - let place_ty = proj.base.ty(self.mir, self.tcx).to_ty(self.tcx); + let place_ty = proj.base.ty(self.mir, self.tcx).ty; if let ty::Array(item_ty, const_size) = place_ty.sty { if let Some(size) = const_size.assert_usize(self.tcx) { assert!(size <= u32::max_value() as u64, @@ -111,7 +101,7 @@ impl<'a, 'tcx> UniformArrayMoveOutVisitor<'a, 'tcx> { let temp = self.patch.new_temp(item_ty, self.mir.source_info(location).span); self.patch.add_statement(location, StatementKind::StorageLive(temp)); self.patch.add_assign(location, - Place::Local(temp), + Place::Base(PlaceBase::Local(temp)), Rvalue::Use( Operand::Move( Place::Projection(box PlaceProjection{ @@ -123,12 +113,16 @@ impl<'a, 'tcx> UniformArrayMoveOutVisitor<'a, 'tcx> { })))); temp }).collect(); - self.patch.add_assign(location, - dst_place.clone(), - Rvalue::Aggregate(box AggregateKind::Array(item_ty), - temps.iter().map( - |x| Operand::Move(Place::Local(*x))).collect() - )); + self.patch.add_assign( + location, + dst_place.clone(), + Rvalue::Aggregate( + box AggregateKind::Array(item_ty), + temps.iter().map( + |x| Operand::Move(Place::Base(PlaceBase::Local(*x))) + ).collect() + ) + ); for temp in temps { self.patch.add_statement(location, StatementKind::StorageDead(temp)); } @@ -171,7 +165,7 @@ pub struct RestoreSubsliceArrayMoveOut; impl MirPass for RestoreSubsliceArrayMoveOut { fn run_pass<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, - _src: MirSource, + _src: MirSource<'tcx>, mir: &mut Mir<'tcx>) { let mut patch = MirPatch::new(mir); { @@ -186,7 +180,7 @@ impl MirPass for RestoreSubsliceArrayMoveOut { if let StatementKind::Assign(ref dst_place, ref rval) = statement.kind { if let Rvalue::Aggregate(box AggregateKind::Array(_), ref items) = **rval { let items : Vec<_> = items.iter().map(|item| { - if let Operand::Move(Place::Local(local)) = item { + if let Operand::Move(Place::Base(PlaceBase::Local(local))) = item { let local_use = &visitor.locals_use[*local]; let opt_index_and_place = Self::try_get_item_source(local_use, mir); // each local should be used twice: @@ -201,7 +195,7 @@ impl MirPass for RestoreSubsliceArrayMoveOut { let opt_src_place = items.first().and_then(|x| *x).map(|x| x.2); let opt_size = opt_src_place.and_then(|src_place| { - let src_ty = src_place.ty(mir, tcx).to_ty(tcx); + let src_ty = src_place.ty(mir, tcx).ty; if let ty::Array(_, ref size_o) = src_ty.sty { size_o.assert_usize(tcx) } else { @@ -267,7 +261,7 @@ impl RestoreSubsliceArrayMoveOut { if block.statements.len() > location.statement_index { let statement = &block.statements[location.statement_index]; if let StatementKind::Assign( - Place::Local(_), + Place::Base(PlaceBase::Local(_)), box Rvalue::Use(Operand::Move(Place::Projection(box PlaceProjection{ ref base, elem: ProjectionElem::ConstantIndex{ offset, min_length: _, from_end: false}})))) = statement.kind { diff --git a/src/librustc_mir/util/alignment.rs b/src/librustc_mir/util/alignment.rs index a96c5dd687069..788b7fdaaf912 100644 --- a/src/librustc_mir/util/alignment.rs +++ b/src/librustc_mir/util/alignment.rs @@ -1,18 +1,7 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - use rustc::ty::{self, TyCtxt}; use rustc::mir::*; -/// Return `true` if this place is allowed to be less aligned +/// Returns `true` if this place is allowed to be less aligned /// than its containing struct (because it is within a packed /// struct). pub fn is_disaligned<'a, 'tcx, L>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -28,7 +17,7 @@ pub fn is_disaligned<'a, 'tcx, L>(tcx: TyCtxt<'a, 'tcx, 'tcx>, return false } - let ty = place.ty(local_decls, tcx).to_ty(tcx); + let ty = place.ty(local_decls, tcx).ty; match tcx.layout_raw(param_env.and(ty)) { Ok(layout) if layout.align.abi.bytes() == 1 => { // if the alignment is 1, the type can't be further @@ -57,7 +46,7 @@ fn is_within_packed<'a, 'tcx, L>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // encountered a Deref, which is ABI-aligned ProjectionElem::Deref => break, ProjectionElem::Field(..) => { - let ty = base.ty(local_decls, tcx).to_ty(tcx); + let ty = base.ty(local_decls, tcx).ty; match ty.sty { ty::Adt(def, _) if def.repr.packed() => { return true diff --git a/src/librustc_mir/util/borrowck_errors.rs b/src/librustc_mir/util/borrowck_errors.rs index 8566f845f23e3..fd694ddbbd19f 100644 --- a/src/librustc_mir/util/borrowck_errors.rs +++ b/src/librustc_mir/util/borrowck_errors.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::session::config::BorrowckMode; use rustc::ty::{self, TyCtxt}; use rustc_errors::{DiagnosticBuilder, DiagnosticId}; @@ -22,7 +12,7 @@ pub enum Origin { } impl fmt::Display for Origin { - fn fmt(&self, w: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result { // If the user passed `-Z borrowck=compare`, then include // origin info as part of the error report, // otherwise @@ -148,13 +138,15 @@ pub trait BorrowckErrors<'cx>: Sized + Copy { old_load_end_span: Option, o: Origin, ) -> DiagnosticBuilder<'cx> { + let via = |msg: &str| + if msg.is_empty() { msg.to_string() } else { format!(" (via `{}`)", msg) }; let mut err = struct_span_err!( self, new_loan_span, E0499, "cannot borrow `{}`{} as mutable more than once at a time{OGN}", desc, - opt_via, + via(opt_via), OGN = o ); if old_loan_span == new_loan_span { @@ -174,11 +166,11 @@ pub trait BorrowckErrors<'cx>: Sized + Copy { } else { err.span_label( old_loan_span, - format!("first mutable borrow occurs here{}", old_opt_via), + format!("first mutable borrow occurs here{}", via(old_opt_via)), ); err.span_label( new_loan_span, - format!("second mutable borrow occurs here{}", opt_via), + format!("second mutable borrow occurs here{}", via(opt_via)), ); if let Some(old_load_end_span) = old_load_end_span { err.span_label(old_load_end_span, "first borrow ends here"); @@ -302,27 +294,46 @@ pub trait BorrowckErrors<'cx>: Sized + Copy { old_load_end_span: Option, o: Origin, ) -> DiagnosticBuilder<'cx> { + let via = |msg: &str| + if msg.is_empty() { msg.to_string() } else { format!(" (via `{}`)", msg) }; let mut err = struct_span_err!( self, span, E0502, - "cannot borrow `{}`{} as {} because {} is also borrowed as {}{}{OGN}", + "cannot borrow `{}`{} as {} because {} is also borrowed \ + as {}{}{OGN}", desc_new, - msg_new, + via(msg_new), kind_new, noun_old, kind_old, - msg_old, + via(msg_old), OGN = o ); - err.span_label(span, format!("{} borrow occurs here{}", kind_new, msg_new)); - err.span_label( - old_span, - format!("{} borrow occurs here{}", kind_old, msg_old), - ); + + if msg_new == "" { + // If `msg_new` is empty, then this isn't a borrow of a union field. + err.span_label(span, format!("{} borrow occurs here", kind_new)); + err.span_label(old_span, format!("{} borrow occurs here", kind_old)); + } else { + // If `msg_new` isn't empty, then this a borrow of a union field. + err.span_label( + span, + format!( + "{} borrow of `{}` -- which overlaps with `{}` -- occurs here", + kind_new, msg_new, msg_old, + ) + ); + err.span_label( + old_span, + format!("{} borrow occurs here{}", kind_old, via(msg_old)), + ); + } + if let Some(old_load_end_span) = old_load_end_span { err.span_label(old_load_end_span, format!("{} borrow ends here", kind_old)); } + self.cancel_if_wrong_origin(err, o) } @@ -426,7 +437,7 @@ pub trait BorrowckErrors<'cx>: Sized + Copy { fn cannot_move_out_of_interior_noncopy( self, move_from_span: Span, - ty: ty::Ty, + ty: ty::Ty<'_>, is_index: Option, o: Origin, ) -> DiagnosticBuilder<'cx> { @@ -453,7 +464,7 @@ pub trait BorrowckErrors<'cx>: Sized + Copy { fn cannot_move_out_of_interior_of_drop( self, move_from_span: Span, - container_ty: ty::Ty, + container_ty: ty::Ty<'_>, o: Origin, ) -> DiagnosticBuilder<'cx> { let mut err = struct_span_err!( diff --git a/src/librustc_mir/util/collect_writes.rs b/src/librustc_mir/util/collect_writes.rs index 3e1d0852deedf..fd94c49dd1d4a 100644 --- a/src/librustc_mir/util/collect_writes.rs +++ b/src/librustc_mir/util/collect_writes.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::mir::{Local, Location}; use rustc::mir::Mir; use rustc::mir::visit::PlaceContext; diff --git a/src/librustc_mir/util/def_use.rs b/src/librustc_mir/util/def_use.rs index 07de346e795f9..2e41c6e493bc3 100644 --- a/src/librustc_mir/util/def_use.rs +++ b/src/librustc_mir/util/def_use.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Def-use analysis. use rustc::mir::{Local, Location, Mir}; @@ -71,7 +61,7 @@ impl<'tcx> DefUseAnalysis<'tcx> { } } - /// FIXME(pcwalton): This should update the def-use chains. + // FIXME(pcwalton): this should update the def-use chains. pub fn replace_all_defs_and_uses_with(&self, local: Local, mir: &mut Mir<'tcx>, @@ -117,7 +107,7 @@ impl<'tcx> Info<'tcx> { pub fn defs_not_including_drop( &self, - ) -> iter::Filter>, fn(&&Use<'tcx>) -> bool> { + ) -> iter::Filter>, fn(&&Use<'tcx>) -> bool> { self.defs_and_uses.iter().filter(|place_use| { place_use.context.is_mutating_use() && !place_use.context.is_drop() }) diff --git a/src/librustc_mir/util/elaborate_drops.rs b/src/librustc_mir/util/elaborate_drops.rs index 4f381e0a3d23e..2d275c9a13792 100644 --- a/src/librustc_mir/util/elaborate_drops.rs +++ b/src/librustc_mir/util/elaborate_drops.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::fmt; use rustc::hir; use rustc::mir::*; @@ -15,10 +5,10 @@ use rustc::middle::lang_items; use rustc::traits::Reveal; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::layout::VariantIdx; -use rustc::ty::subst::Substs; +use rustc::ty::subst::SubstsRef; use rustc::ty::util::IntTypeExt; use rustc_data_structures::indexed_vec::Idx; -use util::patch::MirPatch; +use crate::util::patch::MirPatch; use std::u32; @@ -132,7 +122,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> where D: DropElaborator<'b, 'tcx> { fn place_ty(&self, place: &Place<'tcx>) -> Ty<'tcx> { - place.ty(self.elaborator.mir(), self.tcx()).to_ty(self.tcx()) + place.ty(self.elaborator.mir(), self.tcx()).ty } fn tcx(&self) -> TyCtxt<'b, 'tcx, 'tcx> { @@ -154,9 +144,9 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> /// joined together under the `rest` subpath. They are all controlled /// by the primary drop flag, but only the last rest-field dropped /// should clear it (and it must also not clear anything else). - /// - /// FIXME: I think we should just control the flags externally - /// and then we do not need this machinery. + // + // FIXME: I think we should just control the flags externally, + // and then we do not need this machinery. pub fn elaborate_drop<'a>(&mut self, bb: BasicBlock) { debug!("elaborate_drop({:?})", self); let style = self.elaborator.drop_style(self.path, DropFlagMode::Deep); @@ -193,13 +183,13 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> } } - /// Return the place and move path for each field of `variant`, + /// Returns the place and move path for each field of `variant`, /// (the move path is `None` if the field is a rest field). fn move_paths_for_fields(&self, base_place: &Place<'tcx>, variant_path: D::Path, variant: &'tcx ty::VariantDef, - substs: &'tcx Substs<'tcx>) + substs: SubstsRef<'tcx>) -> Vec<(Place<'tcx>, Option)> { variant.fields.iter().enumerate().map(|(i, f)| { @@ -244,7 +234,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> } } - /// Create one-half of the drop ladder for a list of fields, and return + /// Creates one-half of the drop ladder for a list of fields, and return /// the list of steps in it in reverse order, with the first step /// dropping 0 fields and so on. /// @@ -278,7 +268,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> ) } - /// Create a full drop ladder, consisting of 2 connected half-drop-ladders + /// Creates a full drop ladder, consisting of 2 connected half-drop-ladders /// /// For example, with 3 fields, the drop ladder is /// @@ -338,7 +328,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> self.drop_ladder(fields, succ, unwind).0 } - fn open_drop_for_box<'a>(&mut self, adt: &'tcx ty::AdtDef, substs: &'tcx Substs<'tcx>) + fn open_drop_for_box<'a>(&mut self, adt: &'tcx ty::AdtDef, substs: SubstsRef<'tcx>) -> BasicBlock { debug!("open_drop_for_box({:?}, {:?}, {:?})", self, adt, substs); @@ -356,7 +346,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> self.drop_subpath(&interior, interior_path, succ, unwind_succ) } - fn open_drop_for_adt<'a>(&mut self, adt: &'tcx ty::AdtDef, substs: &'tcx Substs<'tcx>) + fn open_drop_for_adt<'a>(&mut self, adt: &'tcx ty::AdtDef, substs: SubstsRef<'tcx>) -> BasicBlock { debug!("open_drop_for_adt({:?}, {:?}, {:?})", self, adt, substs); if adt.variants.len() == 0 { @@ -386,7 +376,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> } fn open_drop_for_adt_contents(&mut self, adt: &'tcx ty::AdtDef, - substs: &'tcx Substs<'tcx>) + substs: SubstsRef<'tcx>) -> (BasicBlock, Unwind) { let (succ, unwind) = self.drop_ladder_bottom(); if !adt.is_enum() { @@ -403,7 +393,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> } fn open_drop_for_multivariant(&mut self, adt: &'tcx ty::AdtDef, - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, succ: BasicBlock, unwind: Unwind) -> (BasicBlock, Unwind) { @@ -422,8 +412,8 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> self.path, variant_index); if let Some(variant_path) = subpath { let base_place = self.place.clone().elem( - ProjectionElem::Downcast(adt, variant_index) - ); + ProjectionElem::Downcast(Some(adt.variants[variant_index].ident.name), + variant_index)); let fields = self.move_paths_for_fields( &base_place, variant_path, @@ -496,7 +486,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> // discriminant after it is free-ed, because that // way lies only trouble. let discr_ty = adt.repr.discr_type().to_ty(self.tcx()); - let discr = Place::Local(self.new_temp(discr_ty)); + let discr = Place::Base(PlaceBase::Local(self.new_temp(discr_ty))); let discr_rv = Rvalue::Discriminant(self.place.clone()); let switch_block = BasicBlockData { statements: vec![self.assign(&discr, discr_rv)], @@ -530,11 +520,11 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> mutbl: hir::Mutability::MutMutable }); let ref_place = self.new_temp(ref_ty); - let unit_temp = Place::Local(self.new_temp(tcx.mk_unit())); + let unit_temp = Place::Base(PlaceBase::Local(self.new_temp(tcx.mk_unit()))); let result = BasicBlockData { statements: vec![self.assign( - &Place::Local(ref_place), + &Place::Base(PlaceBase::Local(ref_place)), Rvalue::Ref(tcx.types.re_erased, BorrowKind::Mut { allow_two_phase_borrow: false }, self.place.clone()) @@ -543,7 +533,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> kind: TerminatorKind::Call { func: Operand::function_handle(tcx, drop_fn.def_id, substs, self.source_info.span), - args: vec![Operand::Move(Place::Local(ref_place))], + args: vec![Operand::Move(Place::Base(PlaceBase::Local(ref_place)))], destination: Some((unit_temp, succ)), cleanup: unwind.into_option(), from_hir_call: true, @@ -588,8 +578,8 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> ty: ety, mutbl: hir::Mutability::MutMutable }); - let ptr = &Place::Local(self.new_temp(ref_ty)); - let can_go = &Place::Local(self.new_temp(tcx.types.bool)); + let ptr = &Place::Base(PlaceBase::Local(self.new_temp(ref_ty))); + let can_go = &Place::Base(PlaceBase::Local(self.new_temp(tcx.types.bool))); let one = self.constant_usize(1); let (ptr_next, cur_next) = if ptr_based { @@ -597,23 +587,23 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> tcx.types.re_erased, BorrowKind::Mut { allow_two_phase_borrow: false }, Place::Projection(Box::new(Projection { - base: Place::Local(cur), + base: Place::Base(PlaceBase::Local(cur)), elem: ProjectionElem::Deref, })) ), - Rvalue::BinaryOp(BinOp::Offset, copy(&Place::Local(cur)), one)) + Rvalue::BinaryOp(BinOp::Offset, copy(&Place::Base(PlaceBase::Local(cur))), one)) } else { (Rvalue::Ref( tcx.types.re_erased, BorrowKind::Mut { allow_two_phase_borrow: false }, self.place.clone().index(cur)), - Rvalue::BinaryOp(BinOp::Add, copy(&Place::Local(cur)), one)) + Rvalue::BinaryOp(BinOp::Add, copy(&Place::Base(PlaceBase::Local(cur))), one)) }; let drop_block = BasicBlockData { statements: vec![ self.assign(ptr, ptr_next), - self.assign(&Place::Local(cur), cur_next) + self.assign(&Place::Base(PlaceBase::Local(cur)), cur_next) ], is_cleanup: unwind.is_cleanup(), terminator: Some(Terminator { @@ -627,7 +617,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let loop_block = BasicBlockData { statements: vec![ self.assign(can_go, Rvalue::BinaryOp(BinOp::Eq, - copy(&Place::Local(cur)), + copy(&Place::Base(PlaceBase::Local(cur))), copy(length_or_end))) ], is_cleanup: unwind.is_cleanup(), @@ -677,8 +667,8 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> let move_ = |place: &Place<'tcx>| Operand::Move(place.clone()); let tcx = self.tcx(); - let size = &Place::Local(self.new_temp(tcx.types.usize)); - let size_is_zero = &Place::Local(self.new_temp(tcx.types.bool)); + let size = &Place::Base(PlaceBase::Local(self.new_temp(tcx.types.usize))); + let size_is_zero = &Place::Base(PlaceBase::Local(self.new_temp(tcx.types.bool))); let base_block = BasicBlockData { statements: vec![ self.assign(size, Rvalue::NullaryOp(NullOp::SizeOf, ety)), @@ -713,9 +703,12 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> }; let cur = self.new_temp(iter_ty); - let length = Place::Local(self.new_temp(tcx.types.usize)); + let length = Place::Base(PlaceBase::Local(self.new_temp(tcx.types.usize))); let length_or_end = if ptr_based { - Place::Local(self.new_temp(iter_ty)) + // FIXME check if we want to make it return a `Place` directly + // if all use sites want a `Place::Base` anyway. + let temp = self.new_temp(iter_ty); + Place::Base(PlaceBase::Local(temp)) } else { length.clone() }; @@ -738,13 +731,13 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> unwind, ptr_based); - let cur = Place::Local(cur); + let cur = Place::Base(PlaceBase::Local(cur)); let zero = self.constant_usize(0); let mut drop_block_stmts = vec![]; drop_block_stmts.push(self.assign(&length, Rvalue::Len(self.place.clone()))); if ptr_based { let tmp_ty = tcx.mk_mut_ptr(self.place_ty(self.place)); - let tmp = Place::Local(self.new_temp(tmp_ty)); + let tmp = Place::Base(PlaceBase::Local(self.new_temp(tmp_ty))); // tmp = &mut P; // cur = tmp as *mut T; // end = Offset(cur, len); @@ -828,7 +821,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> } } - /// Return a basic block that drop a place using the context + /// Returns a basic block that drop a place using the context /// and path in `c`. If `mode` is something, also clear `c` /// according to it. /// @@ -877,7 +870,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> fn box_free_block<'a>( &mut self, adt: &'tcx ty::AdtDef, - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, target: BasicBlock, unwind: Unwind, ) -> BasicBlock { @@ -888,12 +881,12 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> fn unelaborated_free_block<'a>( &mut self, adt: &'tcx ty::AdtDef, - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, target: BasicBlock, unwind: Unwind ) -> BasicBlock { let tcx = self.tcx(); - let unit_temp = Place::Local(self.new_temp(tcx.mk_unit())); + let unit_temp = Place::Base(PlaceBase::Local(self.new_temp(tcx.mk_unit()))); let free_func = tcx.require_lang_item(lang_items::BoxFreeFnLangItem); let args = adt.variants[VariantIdx::new(0)].fields.iter().enumerate().map(|(i, f)| { let field = Field::new(i); @@ -973,7 +966,9 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D> span: self.source_info.span, ty: self.tcx().types.usize, user_ty: None, - literal: ty::Const::from_usize(self.tcx(), val.into()), + literal: self.tcx().mk_const( + ty::Const::from_usize(self.tcx(), val.into()) + ), }) } diff --git a/src/librustc_mir/util/graphviz.rs b/src/librustc_mir/util/graphviz.rs index 58963a40ad09e..f87714b58c442 100644 --- a/src/librustc_mir/util/graphviz.rs +++ b/src/librustc_mir/util/graphviz.rs @@ -1,17 +1,7 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use dot; use rustc::hir::def_id::DefId; use rustc::mir::*; use rustc::ty::TyCtxt; +use rustc_data_structures::indexed_vec::Idx; use std::fmt::Debug; use std::io::{self, Write}; @@ -31,14 +21,25 @@ pub fn write_mir_graphviz<'tcx, W>(tcx: TyCtxt<'_, '_, 'tcx>, Ok(()) } +// Must match `[0-9A-Za-z_]*`. This does not appear in the rendered graph, so +// it does not have to be user friendly. +pub fn graphviz_safe_def_name(def_id: DefId) -> String { + format!( + "{}_{}_{}", + def_id.krate.index(), + def_id.index.address_space().index(), + def_id.index.as_array_index(), + ) +} + /// Write a graphviz DOT graph of the MIR. pub fn write_mir_fn_graphviz<'tcx, W>(tcx: TyCtxt<'_, '_, 'tcx>, def_id: DefId, - mir: &Mir, + mir: &Mir<'_>, w: &mut W) -> io::Result<()> where W: Write { - writeln!(w, "digraph Mir_{} {{", tcx.hir().as_local_node_id(def_id).unwrap())?; + writeln!(w, "digraph Mir_{} {{", graphviz_safe_def_name(def_id))?; // Global graph properties writeln!(w, r#" graph [fontname="monospace"];"#)?; @@ -68,7 +69,7 @@ pub fn write_mir_fn_graphviz<'tcx, W>(tcx: TyCtxt<'_, '_, 'tcx>, /// `init` and `fini` are callbacks for emitting additional rows of /// data (using HTML enclosed with `` in the emitted text). pub fn write_node_label(block: BasicBlock, - mir: &Mir, + mir: &Mir<'_>, w: &mut W, num_cols: u32, init: INIT, @@ -110,7 +111,7 @@ pub fn write_node_label(block: BasicBlock, } /// Write a graphviz DOT node for the given basic block. -fn write_node(block: BasicBlock, mir: &Mir, w: &mut W) -> io::Result<()> { +fn write_node(block: BasicBlock, mir: &Mir<'_>, w: &mut W) -> io::Result<()> { // Start a new node with the label to follow, in one of DOT's pseudo-HTML tables. write!(w, r#" {} [shape="none", label=<"#, node(block))?; write_node_label(block, mir, w, 1, |_| Ok(()), |_| Ok(()))?; @@ -119,7 +120,7 @@ fn write_node(block: BasicBlock, mir: &Mir, w: &mut W) -> io::Result<( } /// Write graphviz DOT edges with labels between the given basic block and all of its successors. -fn write_edges(source: BasicBlock, mir: &Mir, w: &mut W) -> io::Result<()> { +fn write_edges(source: BasicBlock, mir: &Mir<'_>, w: &mut W) -> io::Result<()> { let terminator = mir[source].terminator(); let labels = terminator.kind.fmt_successor_labels(); @@ -135,20 +136,24 @@ fn write_edges(source: BasicBlock, mir: &Mir, w: &mut W) -> io::Result /// all the variables and temporaries. fn write_graph_label<'a, 'gcx, 'tcx, W: Write>(tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId, - mir: &Mir, + mir: &Mir<'_>, w: &mut W) -> io::Result<()> { - write!(w, " label= 0 { write!(w, ", ")?; } - write!(w, "{:?}: {}", Place::Local(arg), escape(&mir.local_decls[arg].ty))?; + write!(w, + "{:?}: {}", + Place::Base(PlaceBase::Local(arg)), + escape(&mir.local_decls[arg].ty) + )?; } - write!(w, ") -> {}", escape(mir.return_ty()))?; + write!(w, ") -> {}", escape(&mir.return_ty()))?; write!(w, r#"
"#)?; for local in mir.vars_and_temps_iter() { @@ -161,10 +166,10 @@ fn write_graph_label<'a, 'gcx, 'tcx, W: Write>(tcx: TyCtxt<'a, 'gcx, 'tcx>, if let Some(name) = decl.name { write!(w, r#"{:?}: {}; // {}
"#, - Place::Local(local), escape(&decl.ty), name)?; + Place::Base(PlaceBase::Local(local)), escape(&decl.ty), name)?; } else { write!(w, r#"let mut {:?}: {};
"#, - Place::Local(local), escape(&decl.ty))?; + Place::Base(PlaceBase::Local(local)), escape(&decl.ty))?; } } diff --git a/src/librustc_mir/util/liveness.rs b/src/librustc_mir/util/liveness.rs index 22554acc6adaa..cbdd50cf4052a 100644 --- a/src/librustc_mir/util/liveness.rs +++ b/src/librustc_mir/util/liveness.rs @@ -1,31 +1,22 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Liveness analysis which computes liveness of MIR local variables at the boundary of basic blocks +//! Liveness analysis which computes liveness of MIR local variables at the boundary of basic +//! blocks. //! //! This analysis considers references as being used only at the point of the //! borrow. This means that this does not track uses because of references that //! already exist: //! -//! ```Rust -//! fn foo() { -//! x = 0; -//! // `x` is live here -//! GLOBAL = &x: *const u32; -//! // but not here, even while it can be accessed through `GLOBAL`. -//! foo(); -//! x = 1; -//! // `x` is live again here, because it is assigned to `OTHER_GLOBAL` -//! OTHER_GLOBAL = &x: *const u32; -//! // ... -//! } +//! ```rust +//! fn foo() { +//! x = 0; +//! // `x` is live here ... +//! GLOBAL = &x: *const u32; +//! // ... but not here, even while it can be accessed through `GLOBAL`. +//! foo(); +//! x = 1; +//! // `x` is live again here, because it is assigned to `OTHER_GLOBAL`. +//! OTHER_GLOBAL = &x: *const u32; +//! // ... +//! } //! ``` //! //! This means that users of this analysis still have to check whether @@ -38,17 +29,17 @@ use rustc::mir::visit::{ }; use rustc::mir::Local; use rustc::mir::*; -use rustc::ty::{item_path, TyCtxt}; +use rustc::ty::{self, TyCtxt}; use rustc_data_structures::bit_set::BitSet; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc_data_structures::work_queue::WorkQueue; use std::fs; use std::io::{self, Write}; use std::path::{Path, PathBuf}; -use transform::MirSource; -use util::pretty::{dump_enabled, write_basic_block, write_mir_intro}; +use crate::transform::MirSource; +use crate::util::pretty::{dump_enabled, write_basic_block, write_mir_intro}; -pub type LiveVarSet = BitSet; +pub type LiveVarSet = BitSet; /// This gives the result of the liveness analysis at the boundary of /// basic blocks. @@ -57,66 +48,27 @@ pub type LiveVarSet = BitSet; /// liveness for. This is often `Local`, in which case we computed /// liveness for all variables -- but it can also be some other type, /// which indicates a subset of the variables within the graph. -pub struct LivenessResult { +pub struct LivenessResult { /// Live variables on exit to each basic block. This is equal to /// the union of the `ins` for each successor. - pub outs: IndexVec>, + pub outs: IndexVec, } -/// Defines the mapping to/from the MIR local variables (`Local`) to -/// the "live variable indices" we are using in a particular -/// computation. -pub trait LiveVariableMap { - type LiveVar; - - fn from_local(&self, local: Local) -> Option; - fn from_live_var(&self, local: Self::LiveVar) -> Local; - fn num_variables(&self) -> usize; -} - -#[derive(Debug)] -pub struct IdentityMap<'a, 'tcx: 'a> { - mir: &'a Mir<'tcx>, -} - -impl<'a, 'tcx> IdentityMap<'a, 'tcx> { - pub fn new(mir: &'a Mir<'tcx>) -> Self { - Self { mir } - } -} - -impl<'a, 'tcx> LiveVariableMap for IdentityMap<'a, 'tcx> { - type LiveVar = Local; - - fn from_local(&self, local: Local) -> Option { - Some(local) - } - - fn from_live_var(&self, local: Self::LiveVar) -> Local { - local - } - - fn num_variables(&self) -> usize { - self.mir.local_decls.len() - } -} - -/// Compute which local variables are live within the given function +/// Computes which local variables are live within the given function /// `mir`. The liveness mode `mode` determines what sorts of uses are /// considered to make a variable live (e.g., do drops count?). -pub fn liveness_of_locals<'tcx, V: Idx>( +pub fn liveness_of_locals<'tcx>( mir: &Mir<'tcx>, - map: &impl LiveVariableMap, -) -> LivenessResult { - let num_live_vars = map.num_variables(); +) -> LivenessResult { + let num_live_vars = mir.local_decls.len(); - let def_use: IndexVec<_, DefsUses> = mir + let def_use: IndexVec<_, DefsUses> = mir .basic_blocks() .iter() - .map(|b| block(map, b, num_live_vars)) + .map(|b| block(b, num_live_vars)) .collect(); - let mut outs: IndexVec<_, LiveVarSet> = mir + let mut outs: IndexVec<_, LiveVarSet> = mir .basic_blocks() .indices() .map(|_| LiveVarSet::new_empty(num_live_vars)) @@ -220,27 +172,23 @@ pub fn categorize<'tcx>(context: PlaceContext<'tcx>) -> Option { } } -struct DefsUsesVisitor<'lv, V, M> -where - V: Idx, - M: LiveVariableMap + 'lv, +struct DefsUsesVisitor { - map: &'lv M, - defs_uses: DefsUses, + defs_uses: DefsUses, } #[derive(Eq, PartialEq, Clone)] -struct DefsUses { - defs: LiveVarSet, - uses: LiveVarSet, +struct DefsUses { + defs: LiveVarSet, + uses: LiveVarSet, } -impl DefsUses { - fn apply(&self, bits: &mut LiveVarSet) -> bool { +impl DefsUses { + fn apply(&self, bits: &mut LiveVarSet) -> bool { bits.subtract(&self.defs) | bits.union(&self.uses) } - fn add_def(&mut self, index: V) { + fn add_def(&mut self, index: Local) { // If it was used already in the block, remove that use // now that we found a definition. // @@ -254,7 +202,7 @@ impl DefsUses { self.defs.insert(index); } - fn add_use(&mut self, index: V) { + fn add_use(&mut self, index: Local) { // Inverse of above. // // Example: @@ -270,29 +218,22 @@ impl DefsUses { } } -impl<'tcx, 'lv, V, M> Visitor<'tcx> for DefsUsesVisitor<'lv, V, M> -where - V: Idx, - M: LiveVariableMap, +impl<'tcx> Visitor<'tcx> for DefsUsesVisitor { fn visit_local(&mut self, &local: &Local, context: PlaceContext<'tcx>, _: Location) { - if let Some(v_index) = self.map.from_local(local) { - match categorize(context) { - Some(DefUse::Def) => self.defs_uses.add_def(v_index), - Some(DefUse::Use) | Some(DefUse::Drop) => self.defs_uses.add_use(v_index), - _ => (), - } + match categorize(context) { + Some(DefUse::Def) => self.defs_uses.add_def(local), + Some(DefUse::Use) | Some(DefUse::Drop) => self.defs_uses.add_use(local), + _ => (), } } } -fn block<'tcx, V: Idx>( - map: &impl LiveVariableMap, +fn block<'tcx>( b: &BasicBlockData<'tcx>, locals: usize, -) -> DefsUses { +) -> DefsUses { let mut visitor = DefsUsesVisitor { - map, defs_uses: DefsUses { defs: LiveVarSet::new_empty(locals), uses: LiveVarSet::new_empty(locals), @@ -314,36 +255,34 @@ fn block<'tcx, V: Idx>( visitor.defs_uses } -pub fn dump_mir<'a, 'tcx, V: Idx>( +pub fn dump_mir<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, pass_name: &str, - source: MirSource, + source: MirSource<'tcx>, mir: &Mir<'tcx>, - map: &impl LiveVariableMap, - result: &LivenessResult, + result: &LivenessResult, ) { if !dump_enabled(tcx, pass_name, source) { return; } - let node_path = item_path::with_forced_impl_filename_line(|| { + let node_path = ty::print::with_forced_impl_filename_line(|| { // see notes on #41697 below - tcx.item_path_str(source.def_id) + tcx.def_path_str(source.def_id()) }); - dump_matched_mir_node(tcx, pass_name, &node_path, source, mir, map, result); + dump_matched_mir_node(tcx, pass_name, &node_path, source, mir, result); } -fn dump_matched_mir_node<'a, 'tcx, V: Idx>( +fn dump_matched_mir_node<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, pass_name: &str, node_path: &str, - source: MirSource, + source: MirSource<'tcx>, mir: &Mir<'tcx>, - map: &dyn LiveVariableMap, - result: &LivenessResult, + result: &LivenessResult, ) { let mut file_path = PathBuf::new(); file_path.push(Path::new(&tcx.sess.opts.debugging_opts.dump_mir_dir)); - let item_id = tcx.hir().as_local_node_id(source.def_id).unwrap(); + let item_id = tcx.hir().as_local_hir_id(source.def_id()).unwrap(); let file_name = format!("rustc.node{}{}-liveness.mir", item_id, pass_name); file_path.push(&file_name); let _ = fs::File::create(&file_path).and_then(|mut file| { @@ -351,25 +290,23 @@ fn dump_matched_mir_node<'a, 'tcx, V: Idx>( writeln!(file, "// source = {:?}", source)?; writeln!(file, "// pass_name = {}", pass_name)?; writeln!(file, "")?; - write_mir_fn(tcx, source, mir, map, &mut file, result)?; + write_mir_fn(tcx, source, mir, &mut file, result)?; Ok(()) }); } -pub fn write_mir_fn<'a, 'tcx, V: Idx>( +pub fn write_mir_fn<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, - src: MirSource, + src: MirSource<'tcx>, mir: &Mir<'tcx>, - map: &dyn LiveVariableMap, w: &mut dyn Write, - result: &LivenessResult, + result: &LivenessResult, ) -> io::Result<()> { write_mir_intro(tcx, src, mir, w)?; for block in mir.basic_blocks().indices() { - let print = |w: &mut dyn Write, prefix, result: &IndexVec>| { + let print = |w: &mut dyn Write, prefix, result: &IndexVec| { let live: Vec = result[block] .iter() - .map(|v| map.from_live_var(v)) .map(|local| format!("{:?}", local)) .collect(); writeln!(w, "{} {{{}}}", prefix, live.join(", ")) diff --git a/src/librustc_mir/util/mod.rs b/src/librustc_mir/util/mod.rs index 1e624081bcfb5..1a5a2a92247dd 100644 --- a/src/librustc_mir/util/mod.rs +++ b/src/librustc_mir/util/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use core::unicode::property::Pattern_White_Space; use rustc::ty; use syntax_pos::Span; @@ -25,7 +15,7 @@ pub mod collect_writes; pub use self::alignment::is_disaligned; pub use self::pretty::{dump_enabled, dump_mir, write_mir_pretty, PassWhere}; -pub use self::graphviz::{write_mir_graphviz}; +pub use self::graphviz::{graphviz_safe_def_name, write_mir_graphviz}; pub use self::graphviz::write_node_label as write_graphviz_node_label; /// If possible, suggest replacing `ref` with `ref mut`. diff --git a/src/librustc_mir/util/patch.rs b/src/librustc_mir/util/patch.rs index 807c8386693f8..366cd71f6d4e9 100644 --- a/src/librustc_mir/util/patch.rs +++ b/src/librustc_mir/util/patch.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::ty::Ty; use rustc::mir::*; use rustc_data_structures::indexed_vec::{IndexVec, Idx}; @@ -180,14 +170,14 @@ impl<'tcx> MirPatch<'tcx> { } } - pub fn source_info_for_index(data: &BasicBlockData, loc: Location) -> SourceInfo { + pub fn source_info_for_index(data: &BasicBlockData<'_>, loc: Location) -> SourceInfo { match data.statements.get(loc.statement_index) { Some(stmt) => stmt.source_info, None => data.terminator().source_info } } - pub fn source_info_for_location(&self, mir: &Mir, loc: Location) -> SourceInfo { + pub fn source_info_for_location(&self, mir: &Mir<'_>, loc: Location) -> SourceInfo { let data = match loc.block.index().checked_sub(mir.basic_blocks().len()) { Some(new) => &self.new_blocks[new], None => &mir[loc.block] diff --git a/src/librustc_mir/util/pretty.rs b/src/librustc_mir/util/pretty.rs index 0db9b23925a1f..5e5e451b75bf2 100644 --- a/src/librustc_mir/util/pretty.rs +++ b/src/librustc_mir/util/pretty.rs @@ -1,19 +1,7 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::hir; use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::mir::*; use rustc::mir::visit::Visitor; use rustc::ty::{self, TyCtxt}; -use rustc::ty::item_path; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::indexed_vec::Idx; use std::fmt::Display; @@ -22,7 +10,7 @@ use std::fs; use std::io::{self, Write}; use std::path::{Path, PathBuf}; use super::graphviz::write_mir_fn_graphviz; -use transform::MirSource; +use crate::transform::MirSource; const INDENT: &str = " "; /// Alignment for lining up comments following MIR statements @@ -79,7 +67,7 @@ pub fn dump_mir<'a, 'gcx, 'tcx, F>( pass_num: Option<&dyn Display>, pass_name: &str, disambiguator: &dyn Display, - source: MirSource, + source: MirSource<'tcx>, mir: &Mir<'tcx>, extra_data: F, ) where @@ -89,9 +77,9 @@ pub fn dump_mir<'a, 'gcx, 'tcx, F>( return; } - let node_path = item_path::with_forced_impl_filename_line(|| { + let node_path = ty::print::with_forced_impl_filename_line(|| { // see notes on #41697 below - tcx.item_path_str(source.def_id) + tcx.def_path_str(source.def_id()) }); dump_matched_mir_node( tcx, @@ -108,15 +96,15 @@ pub fn dump_mir<'a, 'gcx, 'tcx, F>( pub fn dump_enabled<'a, 'gcx, 'tcx>( tcx: TyCtxt<'a, 'gcx, 'tcx>, pass_name: &str, - source: MirSource, + source: MirSource<'tcx>, ) -> bool { let filters = match tcx.sess.opts.debugging_opts.dump_mir { None => return false, Some(ref filters) => filters, }; - let node_path = item_path::with_forced_impl_filename_line(|| { + let node_path = ty::print::with_forced_impl_filename_line(|| { // see notes on #41697 below - tcx.item_path_str(source.def_id) + tcx.def_path_str(source.def_id()) }); filters.split('|').any(|or_filter| { or_filter.split('&').all(|and_filter| { @@ -126,7 +114,7 @@ pub fn dump_enabled<'a, 'gcx, 'tcx>( } // #41697 -- we use `with_forced_impl_filename_line()` because -// `item_path_str()` would otherwise trigger `type_of`, and this can +// `def_path_str()` would otherwise trigger `type_of`, and this can // run while we are already attempting to evaluate `type_of`. fn dump_matched_mir_node<'a, 'gcx, 'tcx, F>( @@ -135,13 +123,13 @@ fn dump_matched_mir_node<'a, 'gcx, 'tcx, F>( pass_name: &str, node_path: &str, disambiguator: &dyn Display, - source: MirSource, + source: MirSource<'tcx>, mir: &Mir<'tcx>, mut extra_data: F, ) where F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>, { - let _: io::Result<()> = try_block! { + let _: io::Result<()> = try { let mut file = create_dump_file(tcx, "mir", pass_num, pass_name, disambiguator, source)?; writeln!(file, "// MIR for `{}`", node_path)?; writeln!(file, "// source = {:?}", source)?; @@ -152,15 +140,16 @@ fn dump_matched_mir_node<'a, 'gcx, 'tcx, F>( } writeln!(file, "")?; extra_data(PassWhere::BeforeCFG, &mut file)?; + write_user_type_annotations(mir, &mut file)?; write_mir_fn(tcx, source, mir, &mut extra_data, &mut file)?; extra_data(PassWhere::AfterCFG, &mut file)?; }; if tcx.sess.opts.debugging_opts.dump_mir_graphviz { - let _: io::Result<()> = try_block! { + let _: io::Result<()> = try { let mut file = create_dump_file(tcx, "dot", pass_num, pass_name, disambiguator, source)?; - write_mir_fn_graphviz(tcx, source.def_id, mir, &mut file)?; + write_mir_fn_graphviz(tcx, source.def_id(), mir, &mut file)?; }; } } @@ -174,7 +163,7 @@ fn dump_path( pass_num: Option<&dyn Display>, pass_name: &str, disambiguator: &dyn Display, - source: MirSource, + source: MirSource<'tcx>, ) -> PathBuf { let promotion_id = match source.promoted { Some(id) => format!("-{:?}", id), @@ -193,13 +182,32 @@ fn dump_path( let mut file_path = PathBuf::new(); file_path.push(Path::new(&tcx.sess.opts.debugging_opts.dump_mir_dir)); - let item_name = tcx.hir() - .def_path(source.def_id) + let item_name = tcx + .def_path(source.def_id()) .to_filename_friendly_no_crate(); + // All drop shims have the same DefId, so we have to add the type + // to get unique file names. + let shim_disambiguator = match source.instance { + ty::InstanceDef::DropGlue(_, Some(ty)) => { + // Unfortunately, pretty-printed typed are not very filename-friendly. + // We dome some filtering. + let mut s = ".".to_owned(); + s.extend(ty.to_string() + .chars() + .filter_map(|c| match c { + ' ' => None, + ':' | '<' | '>' => Some('_'), + c => Some(c) + })); + s + } + _ => String::new(), + }; let file_name = format!( - "rustc.{}{}{}.{}.{}.{}", + "rustc.{}{}{}{}.{}.{}.{}", item_name, + shim_disambiguator, promotion_id, pass_num, pass_name, @@ -222,7 +230,7 @@ pub(crate) fn create_dump_file( pass_num: Option<&dyn Display>, pass_name: &str, disambiguator: &dyn Display, - source: MirSource, + source: MirSource<'tcx>, ) -> io::Result { let file_path = dump_path(tcx, extension, pass_num, pass_name, disambiguator, source); if let Some(parent) = file_path.parent() { @@ -262,7 +270,7 @@ pub fn write_mir_pretty<'a, 'gcx, 'tcx>( for (i, mir) in mir.promoted.iter_enumerated() { writeln!(w, "")?; let src = MirSource { - def_id, + instance: ty::InstanceDef::Item(def_id), promoted: Some(i), }; write_mir_fn(tcx, src, mir, &mut |_, _| Ok(()), w)?; @@ -273,7 +281,7 @@ pub fn write_mir_pretty<'a, 'gcx, 'tcx>( pub fn write_mir_fn<'a, 'gcx, 'tcx, F>( tcx: TyCtxt<'a, 'gcx, 'tcx>, - src: MirSource, + src: MirSource<'tcx>, mir: &Mir<'tcx>, extra_data: &mut F, w: &mut dyn Write, @@ -308,9 +316,8 @@ where let data = &mir[block]; // Basic block label at the top. - let cleanup_text = if data.is_cleanup { " // cleanup" } else { "" }; - let lbl = format!("{}{:?}: {{", INDENT, block); - writeln!(w, "{0:1$}{2}", lbl, ALIGN, cleanup_text)?; + let cleanup_text = if data.is_cleanup { " (cleanup)" } else { "" }; + writeln!(w, "{}{:?}{}: {{", INDENT, block, cleanup_text)?; // List of statements in the middle. let mut current_location = Location { @@ -446,7 +453,7 @@ impl<'cx, 'gcx, 'tcx> Visitor<'tcx> for ExtraComments<'cx, 'gcx, 'tcx> { } } -fn comment(tcx: TyCtxt, SourceInfo { span, scope }: SourceInfo) -> String { +fn comment(tcx: TyCtxt<'_, '_, '_>, SourceInfo { span, scope }: SourceInfo) -> String { format!( "scope {} at {}", scope.index(), @@ -458,8 +465,8 @@ fn comment(tcx: TyCtxt, SourceInfo { span, scope }: SourceInfo) -> String { /// /// Returns the total number of variables printed. fn write_scope_tree( - tcx: TyCtxt, - mir: &Mir, + tcx: TyCtxt<'_, '_, '_>, + mir: &Mir<'_>, scope_tree: &FxHashMap>, w: &mut dyn Write, parent: SourceScope, @@ -528,8 +535,8 @@ fn write_scope_tree( /// local variables (both user-defined bindings and compiler temporaries). pub fn write_mir_intro<'a, 'gcx, 'tcx>( tcx: TyCtxt<'a, 'gcx, 'tcx>, - src: MirSource, - mir: &Mir, + src: MirSource<'tcx>, + mir: &Mir<'_>, w: &mut dyn Write, ) -> io::Result<()> { write_mir_sig(tcx, src, mir, w)?; @@ -568,40 +575,50 @@ pub fn write_mir_intro<'a, 'gcx, 'tcx>( Ok(()) } -fn write_mir_sig(tcx: TyCtxt, src: MirSource, mir: &Mir, w: &mut dyn Write) -> io::Result<()> { - let id = tcx.hir().as_local_node_id(src.def_id).unwrap(); - let body_owner_kind = tcx.hir().body_owner_kind(id); - match (body_owner_kind, src.promoted) { - (_, Some(i)) => write!(w, "{:?} in", i)?, - (hir::BodyOwnerKind::Fn, _) => write!(w, "fn")?, - (hir::BodyOwnerKind::Const, _) => write!(w, "const")?, - (hir::BodyOwnerKind::Static(hir::MutImmutable), _) => write!(w, "static")?, - (hir::BodyOwnerKind::Static(hir::MutMutable), _) => write!(w, "static mut")?, +fn write_mir_sig( + tcx: TyCtxt<'_, '_, '_>, + src: MirSource<'tcx>, + mir: &Mir<'_>, + w: &mut dyn Write, +) -> io::Result<()> { + use rustc::hir::def::Def; + + trace!("write_mir_sig: {:?}", src.instance); + let descr = tcx.describe_def(src.def_id()); + let is_function = match descr { + Some(Def::Fn(_)) | Some(Def::Method(_)) | Some(Def::Ctor(..)) => true, + _ => tcx.is_closure(src.def_id()), + }; + match (descr, src.promoted) { + (_, Some(i)) => write!(w, "{:?} in ", i)?, + (Some(Def::Const(_)), _) | (Some(Def::AssociatedConst(_)), _) => write!(w, "const ")?, + (Some(Def::Static(_, /*is_mutbl*/false)), _) => write!(w, "static ")?, + (Some(Def::Static(_, /*is_mutbl*/true)), _) => write!(w, "static mut ")?, + (_, _) if is_function => write!(w, "fn ")?, + (None, _) => {}, // things like anon const, not an item + _ => bug!("Unexpected def description {:?}", descr), } - item_path::with_forced_impl_filename_line(|| { + ty::print::with_forced_impl_filename_line(|| { // see notes on #41697 elsewhere - write!(w, " {}", tcx.item_path_str(src.def_id)) + write!(w, " {}", tcx.def_path_str(src.def_id())) })?; - match (body_owner_kind, src.promoted) { - (hir::BodyOwnerKind::Fn, None) => { - write!(w, "(")?; + if src.promoted.is_none() && is_function { + write!(w, "(")?; - // fn argument types. - for (i, arg) in mir.args_iter().enumerate() { - if i != 0 { - write!(w, ", ")?; - } - write!(w, "{:?}: {}", Place::Local(arg), mir.local_decls[arg].ty)?; + // fn argument types. + for (i, arg) in mir.args_iter().enumerate() { + if i != 0 { + write!(w, ", ")?; } - - write!(w, ") -> {}", mir.return_ty())?; - } - (hir::BodyOwnerKind::Const, _) | (hir::BodyOwnerKind::Static(_), _) | (_, Some(_)) => { - assert_eq!(mir.arg_count, 0); - write!(w, ": {} =", mir.return_ty())?; + write!(w, "{:?}: {}", Place::Base(PlaceBase::Local(arg)), mir.local_decls[arg].ty)?; } + + write!(w, ") -> {}", mir.return_ty())?; + } else { + assert_eq!(mir.arg_count, 0); + write!(w, ": {} =", mir.return_ty())?; } if let Some(yield_ty) = mir.yield_ty { @@ -609,10 +626,13 @@ fn write_mir_sig(tcx: TyCtxt, src: MirSource, mir: &Mir, w: &mut dyn Write) -> i writeln!(w, "yields {}", yield_ty)?; } + write!(w, " ")?; + // Next thing that gets printed is the opening { + Ok(()) } -fn write_temp_decls(mir: &Mir, w: &mut dyn Write) -> io::Result<()> { +fn write_temp_decls(mir: &Mir<'_>, w: &mut dyn Write) -> io::Result<()> { // Compiler-introduced temporary types. for temp in mir.temps_iter() { writeln!( @@ -628,7 +648,20 @@ fn write_temp_decls(mir: &Mir, w: &mut dyn Write) -> io::Result<()> { Ok(()) } -pub fn dump_mir_def_ids(tcx: TyCtxt, single: Option) -> Vec { +fn write_user_type_annotations(mir: &Mir<'_>, w: &mut dyn Write) -> io::Result<()> { + if !mir.user_type_annotations.is_empty() { + writeln!(w, "| User Type Annotations")?; + } + for (index, annotation) in mir.user_type_annotations.iter_enumerated() { + writeln!(w, "| {:?}: {:?} at {:?}", index.index(), annotation.user_ty, annotation.span)?; + } + if !mir.user_type_annotations.is_empty() { + writeln!(w, "|")?; + } + Ok(()) +} + +pub fn dump_mir_def_ids(tcx: TyCtxt<'_, '_, '_>, single: Option) -> Vec { if let Some(i) = single { vec![i] } else { diff --git a/src/librustc_msan/Cargo.toml b/src/librustc_msan/Cargo.toml index 78c39d03e45a9..bda4078572501 100644 --- a/src/librustc_msan/Cargo.toml +++ b/src/librustc_msan/Cargo.toml @@ -3,6 +3,7 @@ authors = ["The Rust Project Developers"] build = "build.rs" name = "rustc_msan" version = "0.0.0" +edition = "2018" [lib] name = "rustc_msan" @@ -11,7 +12,7 @@ test = false [build-dependencies] build_helper = { path = "../build_helper" } -cmake = "0.1.18" +cmake = "0.1.38" [dependencies] alloc = { path = "../liballoc" } diff --git a/src/librustc_msan/build.rs b/src/librustc_msan/build.rs index 4abfc3585602f..1c66b0a9cd3cf 100644 --- a/src/librustc_msan/build.rs +++ b/src/librustc_msan/build.rs @@ -1,16 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -extern crate build_helper; -extern crate cmake; - use std::env; use build_helper::sanitizer_lib_boilerplate; @@ -18,6 +5,8 @@ use cmake::Config; fn main() { if let Some(llvm_config) = env::var_os("LLVM_CONFIG") { + build_helper::restore_library_path(); + let (native, target) = match sanitizer_lib_boilerplate("msan") { Ok(native) => native, _ => return, diff --git a/src/librustc_msan/lib.rs b/src/librustc_msan/lib.rs index 47f917e40c1ff..3bdb86d313dcb 100644 --- a/src/librustc_msan/lib.rs +++ b/src/librustc_msan/lib.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![sanitizer_runtime] #![feature(nll)] #![feature(sanitizer_runtime)] @@ -16,3 +6,5 @@ #![unstable(feature = "sanitizer_runtime_lib", reason = "internal implementation detail of sanitizers", issue = "0")] + +#![deny(rust_2018_idioms)] diff --git a/src/librustc_passes/Cargo.toml b/src/librustc_passes/Cargo.toml index 2babb93eedbcf..00bdcdc0cc021 100644 --- a/src/librustc_passes/Cargo.toml +++ b/src/librustc_passes/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustc_passes" version = "0.0.0" +edition = "2018" [lib] name = "rustc_passes" @@ -14,5 +15,6 @@ rustc = { path = "../librustc" } rustc_mir = { path = "../librustc_mir"} rustc_data_structures = { path = "../librustc_data_structures" } syntax = { path = "../libsyntax" } +syntax_ext = { path = "../libsyntax_ext" } syntax_pos = { path = "../libsyntax_pos" } -rustc_errors = { path = "../librustc_errors" } +errors = { path = "../librustc_errors", package = "rustc_errors" } diff --git a/src/librustc_passes/ast_validation.rs b/src/librustc_passes/ast_validation.rs index 9a35721e3e1c4..a9a604cad8bcf 100644 --- a/src/librustc_passes/ast_validation.rs +++ b/src/librustc_passes/ast_validation.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // Validate AST before lowering it to HIR // // This pass is supposed to catch things that fit into AST data structures, @@ -16,23 +6,172 @@ // This pass is supposed to perform only simple checks not requiring name resolution // or type checking or some other kind of complex analysis. +use std::mem; +use syntax::print::pprust; use rustc::lint; +use rustc::lint::builtin::{BuiltinLintDiagnostics, NESTED_IMPL_TRAIT}; use rustc::session::Session; +use rustc_data_structures::fx::FxHashMap; use syntax::ast::*; use syntax::attr; use syntax::source_map::Spanned; use syntax::symbol::keywords; use syntax::ptr::P; use syntax::visit::{self, Visitor}; +use syntax::{span_err, struct_span_err, walk_list}; +use syntax_ext::proc_macro_decls::is_proc_macro_attr; use syntax_pos::Span; -use errors; use errors::Applicability; +use log::debug; + +#[derive(Copy, Clone, Debug)] +struct OuterImplTrait { + span: Span, + + /// rust-lang/rust#57979: a bug in original implementation caused + /// us to fail sometimes to record an outer `impl Trait`. + /// Therefore, in order to reliably issue a warning (rather than + /// an error) in the *precise* places where we are newly injecting + /// the diagnostic, we have to distinguish between the places + /// where the outer `impl Trait` has always been recorded, versus + /// the places where it has only recently started being recorded. + only_recorded_since_pull_request_57730: bool, +} + +impl OuterImplTrait { + /// This controls whether we should downgrade the nested impl + /// trait diagnostic to a warning rather than an error, based on + /// whether the outer impl trait had been improperly skipped in + /// earlier implementations of the analysis on the stable + /// compiler. + fn should_warn_instead_of_error(&self) -> bool { + self.only_recorded_since_pull_request_57730 + } +} struct AstValidator<'a> { session: &'a Session, + has_proc_macro_decls: bool, + has_global_allocator: bool, + + // Used to ban nested `impl Trait`, e.g., `impl Into`. + // Nested `impl Trait` _is_ allowed in associated type position, + // e.g `impl Iterator` + outer_impl_trait: Option, + + // Used to ban `impl Trait` in path projections like `::Item` + // or `Foo::Bar` + is_impl_trait_banned: bool, + + // rust-lang/rust#57979: the ban of nested `impl Trait` was buggy + // until PRs #57730 and #57981 landed: it would jump directly to + // walk_ty rather than visit_ty (or skip recurring entirely for + // impl trait in projections), and thus miss some cases. We track + // whether we should downgrade to a warning for short-term via + // these booleans. + warning_period_57979_didnt_record_next_impl_trait: bool, + warning_period_57979_impl_trait_in_proj: bool, } impl<'a> AstValidator<'a> { + fn with_impl_trait_in_proj_warning(&mut self, v: bool, f: impl FnOnce(&mut Self) -> T) -> T { + let old = mem::replace(&mut self.warning_period_57979_impl_trait_in_proj, v); + let ret = f(self); + self.warning_period_57979_impl_trait_in_proj = old; + ret + } + + fn with_banned_impl_trait(&mut self, f: impl FnOnce(&mut Self)) { + let old = mem::replace(&mut self.is_impl_trait_banned, true); + f(self); + self.is_impl_trait_banned = old; + } + + fn with_impl_trait(&mut self, outer: Option, f: impl FnOnce(&mut Self)) { + let old = mem::replace(&mut self.outer_impl_trait, outer); + f(self); + self.outer_impl_trait = old; + } + + fn visit_assoc_type_binding_from_generic_args(&mut self, type_binding: &'a TypeBinding) { + // rust-lang/rust#57979: bug in old visit_generic_args called + // walk_ty rather than visit_ty, skipping outer `impl Trait` + // if it happened to occur at `type_binding.ty` + if let TyKind::ImplTrait(..) = type_binding.ty.node { + self.warning_period_57979_didnt_record_next_impl_trait = true; + } + self.visit_assoc_type_binding(type_binding); + } + + fn visit_ty_from_generic_args(&mut self, ty: &'a Ty) { + // rust-lang/rust#57979: bug in old visit_generic_args called + // walk_ty rather than visit_ty, skippping outer `impl Trait` + // if it happened to occur at `ty` + if let TyKind::ImplTrait(..) = ty.node { + self.warning_period_57979_didnt_record_next_impl_trait = true; + } + self.visit_ty(ty); + } + + fn outer_impl_trait(&mut self, span: Span) -> OuterImplTrait { + let only_recorded_since_pull_request_57730 = + self.warning_period_57979_didnt_record_next_impl_trait; + + // (this flag is designed to be set to true and then only + // reach the construction point for the outer impl trait once, + // so its safe and easiest to unconditionally reset it to + // false) + self.warning_period_57979_didnt_record_next_impl_trait = false; + + OuterImplTrait { + span, only_recorded_since_pull_request_57730, + } + } + + // Mirrors visit::walk_ty, but tracks relevant state + fn walk_ty(&mut self, t: &'a Ty) { + match t.node { + TyKind::ImplTrait(..) => { + let outer_impl_trait = self.outer_impl_trait(t.span); + self.with_impl_trait(Some(outer_impl_trait), |this| visit::walk_ty(this, t)) + } + TyKind::Path(ref qself, ref path) => { + // We allow these: + // - `Option` + // - `option::Option` + // - `option::Option::Foo + // + // But not these: + // - `::Foo` + // - `option::Option::Foo`. + // + // To implement this, we disallow `impl Trait` from `qself` + // (for cases like `::Foo>`) + // but we allow `impl Trait` in `GenericArgs` + // iff there are no more PathSegments. + if let Some(ref qself) = *qself { + // `impl Trait` in `qself` is always illegal + self.with_banned_impl_trait(|this| this.visit_ty(&qself.ty)); + } + + // Note that there should be a call to visit_path here, + // so if any logic is added to process `Path`s a call to it should be + // added both in visit_path and here. This code mirrors visit::walk_path. + for (i, segment) in path.segments.iter().enumerate() { + // Allow `impl Trait` iff we're on the final path segment + if i == path.segments.len() - 1 { + self.visit_path_segment(path.span, segment); + } else { + self.with_banned_impl_trait(|this| { + this.visit_path_segment(path.span, segment) + }); + } + } + } + _ => visit::walk_ty(self, t), + } + } + fn err_handler(&self) -> &errors::Handler { &self.session.diagnostic() } @@ -53,14 +192,6 @@ impl<'a> AstValidator<'a> { } } - fn invalid_non_exhaustive_attribute(&self, variant: &Variant) { - let has_non_exhaustive = attr::contains_name(&variant.node.attrs, "non_exhaustive"); - if has_non_exhaustive { - self.err_handler().span_err(variant.span, - "#[non_exhaustive] is not yet supported on variants"); - } - } - fn invalid_visibility(&self, vis: &Visibility, note: Option<&str>) { if let VisibilityKind::Inherited = vis.node { return @@ -120,21 +251,21 @@ impl<'a> AstValidator<'a> { } } - /// matches '-' lit | lit (cf. parser::Parser::parse_literal_maybe_minus), - /// or path for ranges. - /// - /// FIXME: do we want to allow expr -> pattern conversion to create path expressions? - /// That means making this work: - /// - /// ```rust,ignore (FIXME) - /// struct S; - /// macro_rules! m { - /// ($a:expr) => { - /// let $a = S; - /// } - /// } - /// m!(S); - /// ``` + /// Matches `'-' lit | lit (cf. parser::Parser::parse_literal_maybe_minus)`, + /// or paths for ranges. + // + // FIXME: do we want to allow `expr -> pattern` conversion to create path expressions? + // That means making this work: + // + // ```rust,ignore (FIXME) + // struct S; + // macro_rules! m { + // ($a:expr) => { + // let $a = S; + // } + // } + // m!(S); + // ``` fn check_expr_within_pat(&self, expr: &Expr, allow_paths: bool) { match expr.node { ExprKind::Lit(..) => {} @@ -182,7 +313,7 @@ impl<'a> AstValidator<'a> { ); if let Ok(snippet) = self.session.source_map().span_to_snippet(span) { - err.span_suggestion_with_applicability( + err.span_suggestion( span, "consider adding parentheses", format!("({})", snippet), Applicability::MachineApplicable, ); @@ -213,7 +344,92 @@ impl<'a> AstValidator<'a> { _ => None, } } +} +enum GenericPosition { + Param, + Arg, +} + +fn validate_generics_order<'a>( + sess: &Session, + handler: &errors::Handler, + generics: impl Iterator< + Item = ( + ParamKindOrd, + Option<&'a [GenericBound]>, + Span, + Option + ), + >, + pos: GenericPosition, + span: Span, +) { + let mut max_param: Option = None; + let mut out_of_order = FxHashMap::default(); + let mut param_idents = vec![]; + + for (kind, bounds, span, ident) in generics { + if let Some(ident) = ident { + param_idents.push((kind, bounds, param_idents.len(), ident)); + } + let max_param = &mut max_param; + match max_param { + Some(max_param) if *max_param > kind => { + let entry = out_of_order.entry(kind).or_insert((*max_param, vec![])); + entry.1.push(span); + } + Some(_) | None => *max_param = Some(kind), + }; + } + + let mut ordered_params = "<".to_string(); + if !out_of_order.is_empty() { + param_idents.sort_by_key(|&(po, _, i, _)| (po, i)); + let mut first = true; + for (_, bounds, _, ident) in param_idents { + if !first { + ordered_params += ", "; + } + ordered_params += &ident; + if let Some(bounds) = bounds { + if !bounds.is_empty() { + ordered_params += ": "; + ordered_params += &pprust::bounds_to_string(&bounds); + } + } + first = false; + } + } + ordered_params += ">"; + + let pos_str = match pos { + GenericPosition::Param => "parameter", + GenericPosition::Arg => "argument", + }; + + for (param_ord, (max_param, spans)) in out_of_order { + let mut err = handler.struct_span_err(spans, + &format!( + "{} {pos}s must be declared prior to {} {pos}s", + param_ord, + max_param, + pos = pos_str, + )); + if let GenericPosition::Param = pos { + err.span_suggestion( + span, + &format!( + "reorder the {}s: lifetimes, then types{}", + pos_str, + if sess.features_untracked().const_generics { ", then consts" } else { "" }, + ), + ordered_params.clone(), + Applicability::MachineApplicable, + ); + } + err.emit(); + } } impl<'a> Visitor<'a> for AstValidator<'a> { @@ -235,7 +451,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> { ); match val.node { ExprKind::Lit(ref v) if v.node.is_numeric() => { - err.span_suggestion_with_applicability( + err.span_suggestion( place.span.between(val.span), "if you meant to write a comparison against a negative value, add a \ space in between `<` and `-`", @@ -277,34 +493,47 @@ impl<'a> Visitor<'a> for AstValidator<'a> { self.no_questions_in_bounds(bounds, "trait object types", false); } TyKind::ImplTrait(_, ref bounds) => { + if self.is_impl_trait_banned { + if self.warning_period_57979_impl_trait_in_proj { + self.session.buffer_lint( + NESTED_IMPL_TRAIT, ty.id, ty.span, + "`impl Trait` is not allowed in path parameters"); + } else { + struct_span_err!(self.session, ty.span, E0667, + "`impl Trait` is not allowed in path parameters").emit(); + } + } + + if let Some(outer_impl_trait) = self.outer_impl_trait { + if outer_impl_trait.should_warn_instead_of_error() { + self.session.buffer_lint_with_diagnostic( + NESTED_IMPL_TRAIT, ty.id, ty.span, + "nested `impl Trait` is not allowed", + BuiltinLintDiagnostics::NestedImplTrait { + outer_impl_trait_span: outer_impl_trait.span, + inner_impl_trait_span: ty.span, + }); + } else { + struct_span_err!(self.session, ty.span, E0666, + "nested `impl Trait` is not allowed") + .span_label(outer_impl_trait.span, "outer `impl Trait`") + .span_label(ty.span, "nested `impl Trait` here") + .emit(); + } + } + if !bounds.iter() .any(|b| if let GenericBound::Trait(..) = *b { true } else { false }) { self.err_handler().span_err(ty.span, "at least one trait must be specified"); } + + self.with_impl_trait_in_proj_warning(true, |this| this.walk_ty(ty)); + return; } _ => {} } - visit::walk_ty(self, ty) - } - - fn visit_use_tree(&mut self, use_tree: &'a UseTree, id: NodeId, _nested: bool) { - // Check if the path in this `use` is not generic, such as `use foo::bar;` While this - // can't happen normally thanks to the parser, a generic might sneak in if the `use` is - // built using a macro. - // - // macro_use foo { - // ($p:path) => { use $p; } - // } - // foo!(bar::baz); - use_tree.prefix.segments.iter().find(|segment| { - segment.args.is_some() - }).map(|segment| { - self.err_handler().span_err(segment.args.as_ref().unwrap().span(), - "generic arguments in import path"); - }); - - visit::walk_use_tree(self, use_tree, id); + self.walk_ty(ty) } fn visit_label(&mut self, label: &'a Label) { @@ -318,6 +547,14 @@ impl<'a> Visitor<'a> for AstValidator<'a> { } fn visit_item(&mut self, item: &'a Item) { + if item.attrs.iter().any(|attr| is_proc_macro_attr(attr) ) { + self.has_proc_macro_decls = true; + } + + if attr::contains_name(&item.attrs, "global_allocator") { + self.has_global_allocator = true; + } + match item.node { ItemKind::Impl(unsafety, polarity, _, _, Some(..), ref ty, ref impl_items) => { self.invalid_visibility(&item.vis, None); @@ -333,7 +570,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> { self.invalid_visibility(&impl_item.vis, None); if let ImplItemKind::Method(ref sig, _) = impl_item.node { self.check_trait_fn_not_const(sig.header.constness); - self.check_trait_fn_not_async(impl_item.span, sig.header.asyncness); + self.check_trait_fn_not_async(impl_item.span, sig.header.asyncness.node); } } } @@ -352,6 +589,27 @@ impl<'a> Visitor<'a> for AstValidator<'a> { .note("only trait implementations may be annotated with default").emit(); } } + ItemKind::Fn(_, ref header, ref generics, _) => { + // We currently do not permit const generics in `const fn`, as + // this is tantamount to allowing compile-time dependent typing. + self.visit_fn_header(header); + if header.constness.node == Constness::Const { + // Look for const generics and error if we find any. + for param in &generics.params { + match param.kind { + GenericParamKind::Const { .. } => { + self.err_handler() + .struct_span_err( + item.span, + "const parameters are not permitted in `const fn`", + ) + .emit(); + } + _ => {} + } + } + } + } ItemKind::ForeignMod(..) => { self.invalid_visibility( &item.vis, @@ -360,7 +618,6 @@ impl<'a> Visitor<'a> for AstValidator<'a> { } ItemKind::Enum(ref def, _) => { for variant in &def.variants { - self.invalid_non_exhaustive_attribute(variant); for field in variant.node.data.fields() { self.invalid_visibility(&field.vis, None); } @@ -385,7 +642,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> { self.no_questions_in_bounds(bounds, "supertraits", true); for trait_item in trait_items { if let TraitItemKind::Method(ref sig, ref block) = trait_item.node { - self.check_trait_fn_not_async(trait_item.span, sig.header.asyncness); + self.check_trait_fn_not_async(trait_item.span, sig.header.asyncness.node); self.check_trait_fn_not_const(sig.header.constness); if block.is_none() { self.check_decl_no_pat(&sig.decl, |span, mut_ident| { @@ -413,7 +670,7 @@ impl<'a> Visitor<'a> for AstValidator<'a> { } } ItemKind::Union(ref vdata, _) => { - if !vdata.is_struct() { + if let VariantData::Tuple(..) | VariantData::Unit(..) = vdata { self.err_handler().span_err(item.span, "tuple and unit unions are not permitted"); } @@ -443,45 +700,83 @@ impl<'a> Visitor<'a> for AstValidator<'a> { visit::walk_foreign_item(self, fi) } - fn visit_vis(&mut self, vis: &'a Visibility) { - if let VisibilityKind::Restricted { ref path, .. } = vis.node { - path.segments.iter().find(|segment| segment.args.is_some()).map(|segment| { - self.err_handler().span_err(segment.args.as_ref().unwrap().span(), - "generic arguments in visibility path"); - }); - } + // Mirrors visit::walk_generic_args, but tracks relevant state + fn visit_generic_args(&mut self, _: Span, generic_args: &'a GenericArgs) { + match *generic_args { + GenericArgs::AngleBracketed(ref data) => { + walk_list!(self, visit_generic_arg, &data.args); + validate_generics_order( + self.session, + self.err_handler(), + data.args.iter().map(|arg| { + (match arg { + GenericArg::Lifetime(..) => ParamKindOrd::Lifetime, + GenericArg::Type(..) => ParamKindOrd::Type, + GenericArg::Const(..) => ParamKindOrd::Const, + }, None, arg.span(), None) + }), + GenericPosition::Arg, + generic_args.span(), + ); - visit::walk_vis(self, vis) + // Type bindings such as `Item=impl Debug` in `Iterator` + // are allowed to contain nested `impl Trait`. + self.with_impl_trait(None, |this| { + walk_list!(this, visit_assoc_type_binding_from_generic_args, &data.bindings); + }); + } + GenericArgs::Parenthesized(ref data) => { + walk_list!(self, visit_ty, &data.inputs); + if let Some(ref type_) = data.output { + // `-> Foo` syntax is essentially an associated type binding, + // so it is also allowed to contain nested `impl Trait`. + self.with_impl_trait(None, |this| this.visit_ty_from_generic_args(type_)); + } + } + } } fn visit_generics(&mut self, generics: &'a Generics) { - let mut seen_non_lifetime_param = false; - let mut seen_default = None; + let mut prev_ty_default = None; for param in &generics.params { - match (¶m.kind, seen_non_lifetime_param) { - (GenericParamKind::Lifetime { .. }, true) => { + if let GenericParamKind::Type { ref default, .. } = param.kind { + if default.is_some() { + prev_ty_default = Some(param.ident.span); + } else if let Some(span) = prev_ty_default { self.err_handler() - .span_err(param.ident.span, "lifetime parameters must be leading"); - }, - (GenericParamKind::Lifetime { .. }, false) => {} - (GenericParamKind::Type { ref default, .. }, _) => { - seen_non_lifetime_param = true; - if default.is_some() { - seen_default = Some(param.ident.span); - } else if let Some(span) = seen_default { - self.err_handler() - .span_err(span, "type parameters with a default must be trailing"); - break; - } + .span_err(span, "type parameters with a default must be trailing"); + break; } } } + + validate_generics_order( + self.session, + self.err_handler(), + generics.params.iter().map(|param| { + let ident = Some(param.ident.to_string()); + let (kind, ident) = match ¶m.kind { + GenericParamKind::Lifetime { .. } => (ParamKindOrd::Lifetime, ident), + GenericParamKind::Type { .. } => (ParamKindOrd::Type, ident), + GenericParamKind::Const { ref ty } => { + let ty = pprust::ty_to_string(ty); + (ParamKindOrd::Const, Some(format!("const {}: {}", param.ident, ty))) + } + }; + (kind, Some(&*param.bounds), param.ident.span, ident) + }), + GenericPosition::Param, + generics.span, + ); + for predicate in &generics.where_clause.predicates { if let WherePredicate::EqPredicate(ref predicate) = *predicate { - self.err_handler().span_err(predicate.span, "equality constraints are not yet \ - supported in where clauses (#20041)"); + self.err_handler() + .span_err(predicate.span, "equality constraints are not yet \ + supported in where clauses (see #20041)"); } } + visit::walk_generics(self, generics) } @@ -527,150 +822,26 @@ impl<'a> Visitor<'a> for AstValidator<'a> { .span_bug(mac.span, "macro invocation missed in expansion; did you forget to override \ the relevant `fold_*()` method in `PlaceholderExpander`?"); } -} - -// Bans nested `impl Trait`, e.g., `impl Into`. -// Nested `impl Trait` _is_ allowed in associated type position, -// e.g `impl Iterator` -struct NestedImplTraitVisitor<'a> { - session: &'a Session, - outer_impl_trait: Option, -} - -impl<'a> NestedImplTraitVisitor<'a> { - fn with_impl_trait(&mut self, outer_impl_trait: Option, f: F) - where F: FnOnce(&mut NestedImplTraitVisitor<'a>) - { - let old_outer_impl_trait = self.outer_impl_trait; - self.outer_impl_trait = outer_impl_trait; - f(self); - self.outer_impl_trait = old_outer_impl_trait; - } -} - - -impl<'a> Visitor<'a> for NestedImplTraitVisitor<'a> { - fn visit_ty(&mut self, t: &'a Ty) { - if let TyKind::ImplTrait(..) = t.node { - if let Some(outer_impl_trait) = self.outer_impl_trait { - struct_span_err!(self.session, t.span, E0666, - "nested `impl Trait` is not allowed") - .span_label(outer_impl_trait, "outer `impl Trait`") - .span_label(t.span, "nested `impl Trait` here") - .emit(); - - } - self.with_impl_trait(Some(t.span), |this| visit::walk_ty(this, t)); - } else { - visit::walk_ty(self, t); - } - } - fn visit_generic_args(&mut self, _: Span, generic_args: &'a GenericArgs) { - match *generic_args { - GenericArgs::AngleBracketed(ref data) => { - for arg in &data.args { - self.visit_generic_arg(arg) - } - for type_binding in &data.bindings { - // Type bindings such as `Item=impl Debug` in `Iterator` - // are allowed to contain nested `impl Trait`. - self.with_impl_trait(None, |this| visit::walk_ty(this, &type_binding.ty)); - } - } - GenericArgs::Parenthesized(ref data) => { - for type_ in &data.inputs { - self.visit_ty(type_); - } - if let Some(ref type_) = data.output { - // `-> Foo` syntax is essentially an associated type binding, - // so it is also allowed to contain nested `impl Trait`. - self.with_impl_trait(None, |this| visit::walk_ty(this, type_)); - } - } - } - } - - fn visit_mac(&mut self, _mac: &Spanned) { - // covered in AstValidator - } -} - -// Bans `impl Trait` in path projections like `::Item` or `Foo::Bar`. -struct ImplTraitProjectionVisitor<'a> { - session: &'a Session, - is_banned: bool, -} - -impl<'a> ImplTraitProjectionVisitor<'a> { - fn with_ban(&mut self, f: F) - where F: FnOnce(&mut ImplTraitProjectionVisitor<'a>) - { - let old_is_banned = self.is_banned; - self.is_banned = true; - f(self); - self.is_banned = old_is_banned; - } -} -impl<'a> Visitor<'a> for ImplTraitProjectionVisitor<'a> { - fn visit_ty(&mut self, t: &'a Ty) { - match t.node { - TyKind::ImplTrait(..) => { - if self.is_banned { - struct_span_err!(self.session, t.span, E0667, - "`impl Trait` is not allowed in path parameters").emit(); - } - } - TyKind::Path(ref qself, ref path) => { - // We allow these: - // - `Option` - // - `option::Option` - // - `option::Option::Foo - // - // But not these: - // - `::Foo` - // - `option::Option::Foo`. - // - // To implement this, we disallow `impl Trait` from `qself` - // (for cases like `::Foo>`) - // but we allow `impl Trait` in `GenericArgs` - // iff there are no more PathSegments. - if let Some(ref qself) = *qself { - // `impl Trait` in `qself` is always illegal - self.with_ban(|this| this.visit_ty(&qself.ty)); - } - - for (i, segment) in path.segments.iter().enumerate() { - // Allow `impl Trait` iff we're on the final path segment - if i == path.segments.len() - 1 { - visit::walk_path_segment(self, path.span, segment); - } else { - self.with_ban(|this| - visit::walk_path_segment(this, path.span, segment)); - } - } - } - _ => visit::walk_ty(self, t), + fn visit_fn_header(&mut self, header: &'a FnHeader) { + if header.asyncness.node.is_async() && self.session.rust_2015() { + struct_span_err!(self.session, header.asyncness.span, E0670, + "`async fn` is not permitted in the 2015 edition").emit(); } } - - fn visit_mac(&mut self, _mac: &Spanned) { - // covered in AstValidator - } } -pub fn check_crate(session: &Session, krate: &Crate) { - visit::walk_crate( - &mut NestedImplTraitVisitor { - session, - outer_impl_trait: None, - }, krate); - - visit::walk_crate( - &mut ImplTraitProjectionVisitor { - session, - is_banned: false, - }, krate); - - visit::walk_crate(&mut AstValidator { session }, krate) +pub fn check_crate(session: &Session, krate: &Crate) -> (bool, bool) { + let mut validator = AstValidator { + session, + has_proc_macro_decls: false, + has_global_allocator: false, + outer_impl_trait: None, + is_impl_trait_banned: false, + warning_period_57979_didnt_record_next_impl_trait: false, + warning_period_57979_impl_trait_in_proj: false, + }; + visit::walk_crate(&mut validator, krate); + + (validator.has_proc_macro_decls, validator.has_global_allocator) } diff --git a/src/librustc_passes/diagnostics.rs b/src/librustc_passes/diagnostics.rs index f1d0a4fee341e..e3c6b16703a4a 100644 --- a/src/librustc_passes/diagnostics.rs +++ b/src/librustc_passes/diagnostics.rs @@ -1,15 +1,7 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_snake_case)] +use syntax::{register_diagnostic, register_diagnostics, register_long_diagnostics}; + register_long_diagnostics! { /* E0014: r##" @@ -318,6 +310,18 @@ loop { break; } ``` +"##, + +E0670: r##" +Rust 2015 does not permit the use of `async fn`. + +Example of erroneous code: + +```compile_fail,E0670 +async fn foo() {} +``` + +Switch to the Rust 2018 edition to use `async fn`. "## } diff --git a/src/librustc_passes/hir_stats.rs b/src/librustc_passes/hir_stats.rs index 019fb9565f413..c74314ce0c4b5 100644 --- a/src/librustc_passes/hir_stats.rs +++ b/src/librustc_passes/hir_stats.rs @@ -1,18 +1,8 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // The visitors in this module collect sizes and counts of the most important // pieces of AST and HIR. The resulting numbers are good approximations but not // completely accurate (some things might be counted twice, others missed). -use rustc::hir; +use rustc::hir::{self, HirId}; use rustc::hir::intravisit as hir_visit; use rustc::util::common::to_readable_str; use rustc::util::nodemap::{FxHashMap, FxHashSet}; @@ -22,7 +12,7 @@ use syntax_pos::Span; #[derive(Copy, Clone, PartialEq, Eq, Hash)] enum Id { - Node(NodeId), + Node(HirId), Attr(AttrId), None, } @@ -71,7 +61,7 @@ impl<'k> StatCollector<'k> { }); entry.count += 1; - entry.size = ::std::mem::size_of_val(node); + entry.size = std::mem::size_of_val(node); } fn print(&self, title: &str) { @@ -129,32 +119,32 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> { } fn visit_item(&mut self, i: &'v hir::Item) { - self.record("Item", Id::Node(i.id), i); + self.record("Item", Id::Node(i.hir_id), i); hir_visit::walk_item(self, i) } - fn visit_mod(&mut self, m: &'v hir::Mod, _s: Span, n: NodeId) { + fn visit_mod(&mut self, m: &'v hir::Mod, _s: Span, n: hir::HirId) { self.record("Mod", Id::None, m); hir_visit::walk_mod(self, m, n) } fn visit_foreign_item(&mut self, i: &'v hir::ForeignItem) { - self.record("ForeignItem", Id::Node(i.id), i); + self.record("ForeignItem", Id::Node(i.hir_id), i); hir_visit::walk_foreign_item(self, i) } fn visit_local(&mut self, l: &'v hir::Local) { - self.record("Local", Id::Node(l.id), l); + self.record("Local", Id::Node(l.hir_id), l); hir_visit::walk_local(self, l) } fn visit_block(&mut self, b: &'v hir::Block) { - self.record("Block", Id::Node(b.id), b); + self.record("Block", Id::Node(b.hir_id), b); hir_visit::walk_block(self, b) } fn visit_stmt(&mut self, s: &'v hir::Stmt) { - self.record("Stmt", Id::Node(s.node.id()), s); + self.record("Stmt", Id::Node(s.hir_id), s); hir_visit::walk_stmt(self, s) } @@ -164,22 +154,17 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> { } fn visit_pat(&mut self, p: &'v hir::Pat) { - self.record("Pat", Id::Node(p.id), p); + self.record("Pat", Id::Node(p.hir_id), p); hir_visit::walk_pat(self, p) } - fn visit_decl(&mut self, d: &'v hir::Decl) { - self.record("Decl", Id::None, d); - hir_visit::walk_decl(self, d) - } - fn visit_expr(&mut self, ex: &'v hir::Expr) { - self.record("Expr", Id::Node(ex.id), ex); + self.record("Expr", Id::Node(ex.hir_id), ex); hir_visit::walk_expr(self, ex) } fn visit_ty(&mut self, t: &'v hir::Ty) { - self.record("Ty", Id::Node(t.id), t); + self.record("Ty", Id::Node(t.hir_id), t); hir_visit::walk_ty(self, t) } @@ -188,7 +173,7 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> { fd: &'v hir::FnDecl, b: hir::BodyId, s: Span, - id: NodeId) { + id: hir::HirId) { self.record("FnDecl", Id::None, fd); hir_visit::walk_fn(self, fk, fd, b, s, id) } @@ -199,12 +184,12 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> { } fn visit_trait_item(&mut self, ti: &'v hir::TraitItem) { - self.record("TraitItem", Id::Node(ti.id), ti); + self.record("TraitItem", Id::Node(ti.hir_id), ti); hir_visit::walk_trait_item(self, ti) } fn visit_impl_item(&mut self, ii: &'v hir::ImplItem) { - self.record("ImplItem", Id::Node(ii.id), ii); + self.record("ImplItem", Id::Node(ii.hir_id), ii); hir_visit::walk_impl_item(self, ii) } @@ -214,20 +199,20 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> { } fn visit_struct_field(&mut self, s: &'v hir::StructField) { - self.record("StructField", Id::Node(s.id), s); + self.record("StructField", Id::Node(s.hir_id), s); hir_visit::walk_struct_field(self, s) } fn visit_variant(&mut self, v: &'v hir::Variant, g: &'v hir::Generics, - item_id: NodeId) { + item_id: hir::HirId) { self.record("Variant", Id::None, v); hir_visit::walk_variant(self, v, g, item_id) } fn visit_lifetime(&mut self, lifetime: &'v hir::Lifetime) { - self.record("Lifetime", Id::Node(lifetime.id), lifetime); + self.record("Lifetime", Id::Node(lifetime.hir_id), lifetime); hir_visit::walk_lifetime(self, lifetime) } @@ -249,7 +234,7 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> { } fn visit_assoc_type_binding(&mut self, type_binding: &'v hir::TypeBinding) { - self.record("TypeBinding", Id::Node(type_binding.id), type_binding); + self.record("TypeBinding", Id::Node(type_binding.hir_id), type_binding); hir_visit::walk_assoc_type_binding(self, type_binding) } @@ -258,7 +243,7 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> { } fn visit_macro_def(&mut self, macro_def: &'v hir::MacroDef) { - self.record("MacroDef", Id::Node(macro_def.id), macro_def); + self.record("MacroDef", Id::Node(macro_def.hir_id), macro_def); hir_visit::walk_macro_def(self, macro_def) } } diff --git a/src/librustc_passes/layout_test.rs b/src/librustc_passes/layout_test.rs new file mode 100644 index 0000000000000..6940f8f442ee9 --- /dev/null +++ b/src/librustc_passes/layout_test.rs @@ -0,0 +1,129 @@ +use rustc::hir; +use rustc::hir::def_id::DefId; +use rustc::hir::itemlikevisit::ItemLikeVisitor; +use rustc::hir::ItemKind; +use rustc::ty::layout::HasDataLayout; +use rustc::ty::layout::HasTyCtxt; +use rustc::ty::layout::LayoutOf; +use rustc::ty::layout::TargetDataLayout; +use rustc::ty::layout::TyLayout; +use rustc::ty::ParamEnv; +use rustc::ty::Ty; +use rustc::ty::TyCtxt; +use syntax::ast::Attribute; + +pub fn test_layout<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { + if tcx.features().rustc_attrs { + // if the `rustc_attrs` feature is not enabled, don't bother testing layout + tcx.hir() + .krate() + .visit_all_item_likes(&mut VarianceTest { tcx }); + } +} + +struct VarianceTest<'a, 'tcx: 'a> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, +} + +impl<'a, 'tcx> ItemLikeVisitor<'tcx> for VarianceTest<'a, 'tcx> { + fn visit_item(&mut self, item: &'tcx hir::Item) { + let item_def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); + + if let ItemKind::Ty(..) = item.node { + for attr in self.tcx.get_attrs(item_def_id).iter() { + if attr.check_name("rustc_layout") { + self.dump_layout_of(item_def_id, item, attr); + } + } + } + } + + fn visit_trait_item(&mut self, _: &'tcx hir::TraitItem) {} + fn visit_impl_item(&mut self, _: &'tcx hir::ImplItem) {} +} + +impl<'a, 'tcx> VarianceTest<'a, 'tcx> { + fn dump_layout_of(&self, item_def_id: DefId, item: &hir::Item, attr: &Attribute) { + let tcx = self.tcx; + let param_env = self.tcx.param_env(item_def_id); + let ty = self.tcx.type_of(item_def_id); + match self.tcx.layout_of(param_env.and(ty)) { + Ok(ty_layout) => { + // Check out the `#[rustc_layout(..)]` attribute to tell what to dump. + // The `..` are the names of fields to dump. + let meta_items = attr.meta_item_list().unwrap_or_default(); + for meta_item in meta_items { + match meta_item.name_or_empty().get() { + "abi" => { + self.tcx + .sess + .span_err(item.span, &format!("abi: {:?}", ty_layout.abi)); + } + + "align" => { + self.tcx + .sess + .span_err(item.span, &format!("align: {:?}", ty_layout.align)); + } + + "size" => { + self.tcx + .sess + .span_err(item.span, &format!("size: {:?}", ty_layout.size)); + } + + "homogeneous_aggregate" => { + self.tcx.sess.span_err( + item.span, + &format!( + "homogeneous_aggregate: {:?}", + ty_layout + .homogeneous_aggregate(&UnwrapLayoutCx { tcx, param_env }), + ), + ); + } + + name => { + self.tcx.sess.span_err( + meta_item.span(), + &format!("unrecognized field name `{}`", name), + ); + } + } + } + } + + Err(layout_error) => { + self.tcx + .sess + .span_err(item.span, &format!("layout error: {:?}", layout_error)); + } + } + } +} + +struct UnwrapLayoutCx<'me, 'tcx> { + tcx: TyCtxt<'me, 'tcx, 'tcx>, + param_env: ParamEnv<'tcx>, +} + +impl<'me, 'tcx> LayoutOf for UnwrapLayoutCx<'me, 'tcx> { + type Ty = Ty<'tcx>; + type TyLayout = TyLayout<'tcx>; + + fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout { + self.tcx.layout_of(self.param_env.and(ty)).unwrap() + } +} + +impl<'me, 'tcx> HasTyCtxt<'tcx> for UnwrapLayoutCx<'me, 'tcx> { + fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.tcx + } +} + +impl<'me, 'tcx> HasDataLayout for UnwrapLayoutCx<'me, 'tcx> { + fn data_layout(&self) -> &TargetDataLayout { + self.tcx.data_layout() + } +} diff --git a/src/librustc_passes/lib.rs b/src/librustc_passes/lib.rs index a5d2edbc5d439..20442a4a566ec 100644 --- a/src/librustc_passes/lib.rs +++ b/src/librustc_passes/lib.rs @@ -1,37 +1,21 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Various checks //! //! # Note //! //! This API is completely unstable and subject to change. -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![feature(nll)] #![feature(rustc_diagnostic_macros)] -#[macro_use] -extern crate rustc; -extern crate rustc_mir; -extern crate rustc_data_structures; +#![recursion_limit="256"] + +#![deny(rust_2018_idioms)] +#![cfg_attr(not(stage0), deny(internal))] #[macro_use] -extern crate log; -#[macro_use] -extern crate syntax; -extern crate syntax_pos; -extern crate rustc_errors as errors; +extern crate rustc; use rustc::ty::query::Providers; @@ -40,10 +24,12 @@ mod diagnostics; pub mod ast_validation; pub mod rvalue_promotion; pub mod hir_stats; +pub mod layout_test; pub mod loops; __build_diagnostic_array! { librustc_passes, DIAGNOSTICS } -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { rvalue_promotion::provide(providers); + loops::provide(providers); } diff --git a/src/librustc_passes/loops.rs b/src/librustc_passes/loops.rs index a87e86aee0cf0..97bbb0adb2d79 100644 --- a/src/librustc_passes/loops.rs +++ b/src/librustc_passes/loops.rs @@ -1,20 +1,14 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. -use self::Context::*; +use Context::*; use rustc::session::Session; +use rustc::ty::query::Providers; +use rustc::ty::TyCtxt; +use rustc::hir::def_id::DefId; use rustc::hir::map::Map; use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; use rustc::hir::{self, Node, Destination}; -use syntax::ast; +use syntax::struct_span_err; use syntax_pos::Span; use errors::Applicability; @@ -51,28 +45,26 @@ struct CheckLoopVisitor<'a, 'hir: 'a> { cx: Context, } -pub fn check_crate(sess: &Session, map: &Map) { - let krate = map.krate(); - krate.visit_all_item_likes(&mut CheckLoopVisitor { - sess, - hir_map: map, +fn check_mod_loops<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>, module_def_id: DefId) { + tcx.hir().visit_item_likes_in_module(module_def_id, &mut CheckLoopVisitor { + sess: &tcx.sess, + hir_map: &tcx.hir(), cx: Normal, }.as_deep_visitor()); } +pub(crate) fn provide(providers: &mut Providers<'_>) { + *providers = Providers { + check_mod_loops, + ..*providers + }; +} + impl<'a, 'hir> Visitor<'hir> for CheckLoopVisitor<'a, 'hir> { fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'hir> { NestedVisitorMap::OnlyBodies(&self.hir_map) } - fn visit_item(&mut self, i: &'hir hir::Item) { - self.with_context(Normal, |v| intravisit::walk_item(v, i)); - } - - fn visit_impl_item(&mut self, i: &'hir hir::ImplItem) { - self.with_context(Normal, |v| intravisit::walk_impl_item(v, i)); - } - fn visit_anon_const(&mut self, c: &'hir hir::AnonConst) { self.with_context(AnonConst, |v| intravisit::walk_anon_const(v, c)); } @@ -106,25 +98,25 @@ impl<'a, 'hir> Visitor<'hir> for CheckLoopVisitor<'a, 'hir> { let loop_id = match label.target_id.into() { Ok(loop_id) => loop_id, - Err(hir::LoopIdError::OutsideLoopScope) => ast::DUMMY_NODE_ID, + Err(hir::LoopIdError::OutsideLoopScope) => hir::DUMMY_HIR_ID, Err(hir::LoopIdError::UnlabeledCfInWhileCondition) => { self.emit_unlabled_cf_in_while_condition(e.span, "break"); - ast::DUMMY_NODE_ID + hir::DUMMY_HIR_ID }, - Err(hir::LoopIdError::UnresolvedLabel) => ast::DUMMY_NODE_ID, + Err(hir::LoopIdError::UnresolvedLabel) => hir::DUMMY_HIR_ID, }; - if loop_id != ast::DUMMY_NODE_ID { - if let Node::Block(_) = self.hir_map.find(loop_id).unwrap() { + if loop_id != hir::DUMMY_HIR_ID { + if let Node::Block(_) = self.hir_map.find_by_hir_id(loop_id).unwrap() { return } } if opt_expr.is_some() { - let loop_kind = if loop_id == ast::DUMMY_NODE_ID { + let loop_kind = if loop_id == hir::DUMMY_HIR_ID { None } else { - Some(match self.hir_map.expect_expr(loop_id).node { + Some(match self.hir_map.expect_expr_by_hir_id(loop_id).node { hir::ExprKind::While(..) => LoopKind::WhileLoop, hir::ExprKind::Loop(_, _, source) => LoopKind::Loop(source), ref r => span_bug!(e.span, @@ -141,7 +133,7 @@ impl<'a, 'hir> Visitor<'hir> for CheckLoopVisitor<'a, 'hir> { .span_label(e.span, "can only break with a value inside \ `loop` or breakable block") - .span_suggestion_with_applicability( + .span_suggestion( e.span, &format!( "instead, use `break` on its own \ @@ -163,7 +155,7 @@ impl<'a, 'hir> Visitor<'hir> for CheckLoopVisitor<'a, 'hir> { match destination.target_id { Ok(loop_id) => { - if let Node::Block(block) = self.hir_map.find(loop_id).unwrap() { + if let Node::Block(block) = self.hir_map.find_by_hir_id(loop_id).unwrap() { struct_span_err!(self.sess, e.span, E0696, "`continue` pointing to a labeled block") .span_label(e.span, diff --git a/src/librustc_passes/rvalue_promotion.rs b/src/librustc_passes/rvalue_promotion.rs index bfe8b677a5e80..7c37c38f2d741 100644 --- a/src/librustc_passes/rvalue_promotion.rs +++ b/src/librustc_passes/rvalue_promotion.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // Verifies that the types and values of const and static items // are safe. The rules enforced by this module are: // @@ -24,7 +14,7 @@ // - It's not possible to take the address of a static item with unsafe interior. This is enforced // by borrowck::gather_loans -use rustc::ty::cast::CastKind; +use rustc::ty::cast::CastTy; use rustc::hir::def::{Def, CtorKind}; use rustc::hir::def_id::DefId; use rustc::middle::expr_use_visitor as euv; @@ -32,16 +22,16 @@ use rustc::middle::mem_categorization as mc; use rustc::middle::mem_categorization::Categorization; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::query::Providers; -use rustc::ty::subst::Substs; -use rustc::util::nodemap::{ItemLocalSet, NodeSet}; +use rustc::ty::subst::{InternalSubsts, SubstsRef}; +use rustc::util::nodemap::{ItemLocalSet, HirIdSet}; use rustc::hir; use rustc_data_structures::sync::Lrc; -use syntax::ast; use syntax_pos::{Span, DUMMY_SP}; -use self::Promotability::*; +use log::debug; +use Promotability::*; use std::ops::{BitAnd, BitAndAssign, BitOr}; -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { *providers = Providers { rvalue_promotable_map, const_is_rvalue_promotable_to_static, @@ -49,25 +39,16 @@ pub fn provide(providers: &mut Providers) { }; } -pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - for &body_id in &tcx.hir().krate().body_ids { - let def_id = tcx.hir().body_owner_def_id(body_id); - tcx.const_is_rvalue_promotable_to_static(def_id); - } - tcx.sess.abort_if_errors(); -} - fn const_is_rvalue_promotable_to_static<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> bool { assert!(def_id.is_local()); - let node_id = tcx.hir().as_local_node_id(def_id) + let hir_id = tcx.hir().as_local_hir_id(def_id) .expect("rvalue_promotable_map invoked with non-local def-id"); - let body_id = tcx.hir().body_owned_by(node_id); - let body_hir_id = tcx.hir().node_to_hir_id(body_id.node_id); - tcx.rvalue_promotable_map(def_id).contains(&body_hir_id.local_id) + let body_id = tcx.hir().body_owned_by(hir_id); + tcx.rvalue_promotable_map(def_id).contains(&body_id.hir_id.local_id) } fn rvalue_promotable_map<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -86,14 +67,14 @@ fn rvalue_promotable_map<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, in_static: false, mut_rvalue_borrows: Default::default(), param_env: ty::ParamEnv::empty(), - identity_substs: Substs::empty(), + identity_substs: InternalSubsts::empty(), result: ItemLocalSet::default(), }; // `def_id` should be a `Body` owner - let node_id = tcx.hir().as_local_node_id(def_id) + let hir_id = tcx.hir().as_local_hir_id(def_id) .expect("rvalue_promotable_map invoked with non-local def-id"); - let body_id = tcx.hir().body_owned_by(node_id); + let body_id = tcx.hir().body_owned_by(hir_id); let _ = visitor.check_nested_body(body_id); Lrc::new(visitor.result) @@ -103,9 +84,9 @@ struct CheckCrateVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, in_fn: bool, in_static: bool, - mut_rvalue_borrows: NodeSet, + mut_rvalue_borrows: HirIdSet, param_env: ty::ParamEnv<'tcx>, - identity_substs: &'tcx Substs<'tcx>, + identity_substs: SubstsRef<'tcx>, tables: &'a ty::TypeckTables<'tcx>, result: ItemLocalSet, } @@ -170,17 +151,17 @@ impl<'a, 'gcx> CheckCrateVisitor<'a, 'gcx> { } /// While the `ExprUseVisitor` walks, we will identify which - /// expressions are borrowed, and insert their ids into this + /// expressions are borrowed, and insert their IDs into this /// table. Actually, we insert the "borrow-id", which is normally - /// the id of the expression being borrowed: but in the case of + /// the ID of the expression being borrowed: but in the case of /// `ref mut` borrows, the `id` of the pattern is - /// inserted. Therefore later we remove that entry from the table + /// inserted. Therefore, later we remove that entry from the table /// and transfer it over to the value being matched. This will /// then prevent said value from being promoted. fn remove_mut_rvalue_borrow(&mut self, pat: &hir::Pat) -> bool { let mut any_removed = false; pat.walk(|p| { - any_removed |= self.mut_rvalue_borrows.remove(&p.id); + any_removed |= self.mut_rvalue_borrows.remove(&p.hir_id); true }); any_removed @@ -201,6 +182,7 @@ impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> { self.in_static = false; match self.tcx.hir().body_owner_kind(item_id) { + hir::BodyOwnerKind::Closure | hir::BodyOwnerKind::Fn => self.in_fn = true, hir::BodyOwnerKind::Static(_) => self.in_static = true, _ => {} @@ -209,7 +191,7 @@ impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> { self.tables = self.tcx.typeck_tables_of(item_def_id); self.param_env = self.tcx.param_env(item_def_id); - self.identity_substs = Substs::identity_for_item(self.tcx, item_def_id); + self.identity_substs = InternalSubsts::identity_for_item(self.tcx, item_def_id); let body = self.tcx.hir().body(body_id); @@ -230,26 +212,22 @@ impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> { fn check_stmt(&mut self, stmt: &'tcx hir::Stmt) -> Promotability { match stmt.node { - hir::StmtKind::Decl(ref decl, _node_id) => { - match &decl.node { - hir::DeclKind::Local(local) => { - if self.remove_mut_rvalue_borrow(&local.pat) { - if let Some(init) = &local.init { - self.mut_rvalue_borrows.insert(init.id); - } - } - - if let Some(ref expr) = local.init { - let _ = self.check_expr(&expr); - } - NotPromotable + hir::StmtKind::Local(ref local) => { + if self.remove_mut_rvalue_borrow(&local.pat) { + if let Some(init) = &local.init { + self.mut_rvalue_borrows.insert(init.hir_id); } - // Item statements are allowed - hir::DeclKind::Item(_) => Promotable } + + if let Some(ref expr) = local.init { + let _ = self.check_expr(&expr); + } + NotPromotable } - hir::StmtKind::Expr(ref box_expr, _node_id) | - hir::StmtKind::Semi(ref box_expr, _node_id) => { + // Item statements are allowed + hir::StmtKind::Item(..) => Promotable, + hir::StmtKind::Expr(ref box_expr) | + hir::StmtKind::Semi(ref box_expr) => { let _ = self.check_expr(box_expr); NotPromotable } @@ -257,12 +235,12 @@ impl<'a, 'tcx> CheckCrateVisitor<'a, 'tcx> { } fn check_expr(&mut self, ex: &'tcx hir::Expr) -> Promotability { - let node_ty = self.tables.node_id_to_type(ex.hir_id); + let node_ty = self.tables.node_type(ex.hir_id); let mut outer = check_expr_kind(self, ex, node_ty); outer &= check_adjustments(self, ex); // Handle borrows on (or inside the autorefs of) this expression. - if self.mut_rvalue_borrows.remove(&ex.id) { + if self.mut_rvalue_borrows.remove(&ex.hir_id) { outer = NotPromotable } @@ -319,7 +297,7 @@ fn check_expr_kind<'a, 'tcx>( if v.tables.is_method_call(e) { return NotPromotable; } - match v.tables.node_id_to_type(lhs.hir_id).sty { + match v.tables.node_type(lhs.hir_id).sty { ty::RawPtr(_) | ty::FnPtr(..) => { assert!(op.node == hir::BinOpKind::Eq || op.node == hir::BinOpKind::Ne || op.node == hir::BinOpKind::Le || op.node == hir::BinOpKind::Lt || @@ -332,23 +310,20 @@ fn check_expr_kind<'a, 'tcx>( } hir::ExprKind::Cast(ref from, _) => { let expr_promotability = v.check_expr(from); - debug!("Checking const cast(id={})", from.id); - match v.tables.cast_kinds().get(from.hir_id) { - None => { - v.tcx.sess.delay_span_bug(e.span, "no kind for cast"); - NotPromotable - }, - Some(&CastKind::PtrAddrCast) | Some(&CastKind::FnPtrAddrCast) => { - NotPromotable - } - _ => expr_promotability + debug!("Checking const cast(id={})", from.hir_id); + let cast_in = CastTy::from_ty(v.tables.expr_ty(from)); + let cast_out = CastTy::from_ty(v.tables.expr_ty(e)); + match (cast_in, cast_out) { + (Some(CastTy::FnPtr), Some(CastTy::Int(_))) | + (Some(CastTy::Ptr(_)), Some(CastTy::Int(_))) => NotPromotable, + (_, _) => expr_promotability } } hir::ExprKind::Path(ref qpath) => { let def = v.tables.qpath_def(qpath, e.hir_id); match def { - Def::VariantCtor(..) | Def::StructCtor(..) | - Def::Fn(..) | Def::Method(..) | Def::SelfCtor(..) => Promotable, + Def::Ctor(..) | Def::Fn(..) | Def::Method(..) | Def::SelfCtor(..) => + Promotable, // References to a static that are themselves within a static // are inherently promotable with the exception @@ -412,8 +387,7 @@ fn check_expr_kind<'a, 'tcx>( Def::Err }; let def_result = match def { - Def::StructCtor(_, CtorKind::Fn) | - Def::VariantCtor(_, CtorKind::Fn) | + Def::Ctor(_, _, CtorKind::Fn) | Def::SelfCtor(..) => Promotable, Def::Fn(did) => v.handle_const_fn_call(did), Def::Method(did) => { @@ -431,8 +405,7 @@ fn check_expr_kind<'a, 'tcx>( for index in hirvec.iter() { method_call_result &= v.check_expr(index); } - if let Some(def) = v.tables.type_dependent_defs().get(e.hir_id) { - let def_id = def.def_id(); + if let Some(def_id) = v.tables.type_dependent_def_id(e.hir_id) { match v.tcx.associated_item(def_id).container { ty::ImplContainer(_) => method_call_result & v.handle_const_fn_call(def_id), ty::TraitContainer(_) => NotPromotable, @@ -459,7 +432,8 @@ fn check_expr_kind<'a, 'tcx>( struct_result } - hir::ExprKind::Lit(_) => Promotable, + hir::ExprKind::Lit(_) | + hir::ExprKind::Err => Promotable, hir::ExprKind::AddrOf(_, ref expr) | hir::ExprKind::Repeat(ref expr, _) => { @@ -471,7 +445,7 @@ fn check_expr_kind<'a, 'tcx>( let nested_body_promotable = v.check_nested_body(body_id); // Paths in constant contexts cannot refer to local variables, // as there are none, and thus closures can't have upvars there. - if v.tcx.with_freevars(e.id, |fv| !fv.is_empty()) { + if v.tcx.with_freevars(e.hir_id, |fv| !fv.is_empty()) { NotPromotable } else { nested_body_promotable @@ -531,7 +505,7 @@ fn check_expr_kind<'a, 'tcx>( mut_borrow = v.remove_mut_rvalue_borrow(pat); } if mut_borrow { - v.mut_rvalue_borrows.insert(expr.id); + v.mut_rvalue_borrows.insert(expr.hir_id); } let _ = v.check_expr(expr); @@ -600,7 +574,7 @@ fn check_expr_kind<'a, 'tcx>( ty_result & node_result } -/// Check the adjustments of an expression +/// Checks the adjustments of an expression. fn check_adjustments<'a, 'tcx>( v: &mut CheckCrateVisitor<'a, 'tcx>, e: &hir::Expr) -> Promotability { @@ -612,7 +586,7 @@ fn check_adjustments<'a, 'tcx>( Adjust::NeverToAny | Adjust::ReifyFnPointer | Adjust::UnsafeFnPointer | - Adjust::ClosureFnPointer | + Adjust::ClosureFnPointer(_) | Adjust::MutToConstPointer | Adjust::Borrow(_) | Adjust::Unsize => {} @@ -632,13 +606,13 @@ fn check_adjustments<'a, 'tcx>( impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for CheckCrateVisitor<'a, 'gcx> { fn consume(&mut self, - _consume_id: ast::NodeId, + _consume_id: hir::HirId, _consume_span: Span, - _cmt: &mc::cmt_, + _cmt: &mc::cmt_<'_>, _mode: euv::ConsumeMode) {} fn borrow(&mut self, - borrow_id: ast::NodeId, + borrow_id: hir::HirId, _borrow_span: Span, cmt: &mc::cmt_<'tcx>, _loan_region: ty::Region<'tcx>, @@ -689,15 +663,18 @@ impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for CheckCrateVisitor<'a, 'gcx> { } } - fn decl_without_init(&mut self, _id: ast::NodeId, _span: Span) {} + fn decl_without_init(&mut self, _id: hir::HirId, _span: Span) {} fn mutate(&mut self, - _assignment_id: ast::NodeId, + _assignment_id: hir::HirId, _assignment_span: Span, - _assignee_cmt: &mc::cmt_, + _assignee_cmt: &mc::cmt_<'_>, _mode: euv::MutateMode) { } - fn matched_pat(&mut self, _: &hir::Pat, _: &mc::cmt_, _: euv::MatchMode) {} + fn matched_pat(&mut self, _: &hir::Pat, _: &mc::cmt_<'_>, _: euv::MatchMode) {} - fn consume_pat(&mut self, _consume_pat: &hir::Pat, _cmt: &mc::cmt_, _mode: euv::ConsumeMode) {} + fn consume_pat(&mut self, + _consume_pat: &hir::Pat, + _cmt: &mc::cmt_<'_>, + _mode: euv::ConsumeMode) {} } diff --git a/src/librustc_platform_intrinsics/Cargo.toml b/src/librustc_platform_intrinsics/Cargo.toml deleted file mode 100644 index 92f37f974efdc..0000000000000 --- a/src/librustc_platform_intrinsics/Cargo.toml +++ /dev/null @@ -1,9 +0,0 @@ -[package] -authors = ["The Rust Project Developers"] -name = "rustc_platform_intrinsics" -version = "0.0.0" - -[lib] -name = "rustc_platform_intrinsics" -path = "lib.rs" -crate-type = ["dylib"] diff --git a/src/librustc_platform_intrinsics/aarch64.rs b/src/librustc_platform_intrinsics/aarch64.rs deleted file mode 100644 index 14925cd6d4dac..0000000000000 --- a/src/librustc_platform_intrinsics/aarch64.rs +++ /dev/null @@ -1,3414 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// DO NOT EDIT: autogenerated by etc/platform-intrinsics/generator.py -// ignore-tidy-linelength - -#![allow(unused_imports)] - -use {Intrinsic, Type}; -use IntrinsicDef::Named; - -pub fn find(name: &str) -> Option { - if !name.starts_with("aarch64_v") { return None } - Some(match &name["aarch64_v".len()..] { - "hadd_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.shadd.v8i8") - }, - "hadd_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.uhadd.v8i8") - }, - "hadd_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.shadd.v4i16") - }, - "hadd_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.uhadd.v4i16") - }, - "hadd_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.shadd.v2i32") - }, - "hadd_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.uhadd.v2i32") - }, - "haddq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.shadd.v16i8") - }, - "haddq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.uhadd.v16i8") - }, - "haddq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.shadd.v8i16") - }, - "haddq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.uhadd.v8i16") - }, - "haddq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.shadd.v4i32") - }, - "haddq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.uhadd.v4i32") - }, - "rhadd_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.srhadd.v8i8") - }, - "rhadd_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.urhadd.v8i8") - }, - "rhadd_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.srhadd.v4i16") - }, - "rhadd_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.urhadd.v4i16") - }, - "rhadd_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.srhadd.v2i32") - }, - "rhadd_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.urhadd.v2i32") - }, - "rhaddq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.srhadd.v16i8") - }, - "rhaddq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.urhadd.v16i8") - }, - "rhaddq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.srhadd.v8i16") - }, - "rhaddq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.urhadd.v8i16") - }, - "rhaddq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.srhadd.v4i32") - }, - "rhaddq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.urhadd.v4i32") - }, - "qadd_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.sqadd.v8i8") - }, - "qadd_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.uqadd.v8i8") - }, - "qadd_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sqadd.v4i16") - }, - "qadd_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.uqadd.v4i16") - }, - "qadd_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sqadd.v2i32") - }, - "qadd_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.uqadd.v2i32") - }, - "qadd_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.aarch64.neon.sqadd.v1i64") - }, - "qadd_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.aarch64.neon.uqadd.v1i64") - }, - "qaddq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.sqadd.v16i8") - }, - "qaddq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.uqadd.v16i8") - }, - "qaddq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.sqadd.v8i16") - }, - "qaddq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.uqadd.v8i16") - }, - "qaddq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.sqadd.v4i32") - }, - "qaddq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.uqadd.v4i32") - }, - "qaddq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.sqadd.v2i64") - }, - "qaddq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.aarch64.neon.uqadd.v2i64") - }, - "uqadd_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::U8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.suqadd.v16i8") - }, - "uqadd_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.suqadd.v8i16") - }, - "uqadd_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.suqadd.v4i32") - }, - "uqadd_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.suqadd.v2i64") - }, - "sqadd_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.usqadd.v16i8") - }, - "sqadd_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.usqadd.v8i16") - }, - "sqadd_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.usqadd.v4i32") - }, - "sqadd_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.aarch64.neon.usqadd.v2i64") - }, - "raddhn_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.raddhn.v8i8") - }, - "raddhn_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.raddhn.v8i8") - }, - "raddhn_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.raddhn.v4i16") - }, - "raddhn_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.raddhn.v4i16") - }, - "raddhn_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.raddhn.v2i32") - }, - "raddhn_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.raddhn.v2i32") - }, - "fmulx_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.aarch64.neon.fmulx.v2f32") - }, - "fmulx_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, - output: &::F64x1, - definition: Named("llvm.aarch64.neon.fmulx.v1f64") - }, - "fmulxq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.aarch64.neon.fmulx.v4f32") - }, - "fmulxq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.aarch64.neon.fmulx.v2f64") - }, - "fma_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.fma.v2f32") - }, - "fma_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, - output: &::F64x1, - definition: Named("llvm.fma.v1f64") - }, - "fmaq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.fma.v4f32") - }, - "fmaq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.fma.v2f64") - }, - "qdmulh_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sqdmulh.v4i16") - }, - "qdmulh_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sqdmulh.v2i32") - }, - "qdmulhq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.sqdmulh.v8i16") - }, - "qdmulhq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.sqdmulh.v4i32") - }, - "qrdmulh_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sqrdmulh.v4i16") - }, - "qrdmulh_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sqrdmulh.v2i32") - }, - "qrdmulhq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.sqrdmulh.v8i16") - }, - "qrdmulhq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.sqrdmulh.v4i32") - }, - "mull_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.smull.v8i16") - }, - "mull_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.umull.v8i16") - }, - "mull_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.smull.v4i32") - }, - "mull_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.umull.v4i32") - }, - "mull_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.smull.v2i64") - }, - "mull_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.aarch64.neon.umull.v2i64") - }, - "qdmullq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.sqdmull.v8i16") - }, - "qdmullq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.sqdmull.v4i32") - }, - "hsub_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.shsub.v8i8") - }, - "hsub_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.uhsub.v8i8") - }, - "hsub_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.shsub.v4i16") - }, - "hsub_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.uhsub.v4i16") - }, - "hsub_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.shsub.v2i32") - }, - "hsub_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.uhsub.v2i32") - }, - "hsubq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.shsub.v16i8") - }, - "hsubq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.uhsub.v16i8") - }, - "hsubq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.shsub.v8i16") - }, - "hsubq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.uhsub.v8i16") - }, - "hsubq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.shsub.v4i32") - }, - "hsubq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.uhsub.v4i32") - }, - "qsub_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.sqsub.v8i8") - }, - "qsub_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.uqsub.v8i8") - }, - "qsub_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sqsub.v4i16") - }, - "qsub_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.uqsub.v4i16") - }, - "qsub_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sqsub.v2i32") - }, - "qsub_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.uqsub.v2i32") - }, - "qsub_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.aarch64.neon.sqsub.v1i64") - }, - "qsub_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.aarch64.neon.uqsub.v1i64") - }, - "qsubq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.sqsub.v16i8") - }, - "qsubq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.uqsub.v16i8") - }, - "qsubq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.sqsub.v8i16") - }, - "qsubq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.uqsub.v8i16") - }, - "qsubq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.sqsub.v4i32") - }, - "qsubq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.uqsub.v4i32") - }, - "qsubq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.sqsub.v2i64") - }, - "qsubq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.aarch64.neon.uqsub.v2i64") - }, - "rsubhn_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.rsubhn.v8i8") - }, - "rsubhn_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.rsubhn.v8i8") - }, - "rsubhn_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.rsubhn.v4i16") - }, - "rsubhn_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.rsubhn.v4i16") - }, - "rsubhn_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.rsubhn.v2i32") - }, - "rsubhn_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.rsubhn.v2i32") - }, - "abd_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.sabd.v8i8") - }, - "abd_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.uabd.v8i8") - }, - "abd_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sabd.v4i16") - }, - "abd_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.uabd.v4i16") - }, - "abd_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sabd.v2i32") - }, - "abd_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.uabd.v2i32") - }, - "abd_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.aarch64.neon.fabd.v2f32") - }, - "abd_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, - output: &::F64x1, - definition: Named("llvm.aarch64.neon.fabd.v1f64") - }, - "abdq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.sabd.v16i8") - }, - "abdq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.uabd.v16i8") - }, - "abdq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.sabd.v8i16") - }, - "abdq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.uabd.v8i16") - }, - "abdq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.sabd.v4i32") - }, - "abdq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.uabd.v4i32") - }, - "abdq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.aarch64.neon.fabd.v4f32") - }, - "abdq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.aarch64.neon.fabd.v2f64") - }, - "max_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.smax.v8i8") - }, - "max_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.umax.v8i8") - }, - "max_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.smax.v4i16") - }, - "max_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.umax.v4i16") - }, - "max_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.smax.v2i32") - }, - "max_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.umax.v2i32") - }, - "max_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.aarch64.neon.fmax.v2f32") - }, - "max_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, - output: &::F64x1, - definition: Named("llvm.aarch64.neon.fmax.v1f64") - }, - "maxq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.smax.v16i8") - }, - "maxq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.umax.v16i8") - }, - "maxq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.smax.v8i16") - }, - "maxq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.umax.v8i16") - }, - "maxq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.smax.v4i32") - }, - "maxq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.umax.v4i32") - }, - "maxq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.aarch64.neon.fmax.v4f32") - }, - "maxq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.aarch64.neon.fmax.v2f64") - }, - "min_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.smin.v8i8") - }, - "min_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.umin.v8i8") - }, - "min_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.smin.v4i16") - }, - "min_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.umin.v4i16") - }, - "min_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.smin.v2i32") - }, - "min_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.umin.v2i32") - }, - "min_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.aarch64.neon.fmin.v2f32") - }, - "min_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, - output: &::F64x1, - definition: Named("llvm.aarch64.neon.fmin.v1f64") - }, - "minq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.smin.v16i8") - }, - "minq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.umin.v16i8") - }, - "minq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.smin.v8i16") - }, - "minq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.umin.v8i16") - }, - "minq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.smin.v4i32") - }, - "minq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.umin.v4i32") - }, - "minq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.aarch64.neon.fmin.v4f32") - }, - "minq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.aarch64.neon.fmin.v2f64") - }, - "maxnm_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.aarch64.neon.fmaxnm.v2f32") - }, - "maxnm_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, - output: &::F64x1, - definition: Named("llvm.aarch64.neon.fmaxnm.v1f64") - }, - "maxnmq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.aarch64.neon.fmaxnm.v4f32") - }, - "maxnmq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.aarch64.neon.fmaxnm.v2f64") - }, - "minnm_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.aarch64.neon.fminnm.v2f32") - }, - "minnm_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, - output: &::F64x1, - definition: Named("llvm.aarch64.neon.fminnm.v1f64") - }, - "minnmq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.aarch64.neon.fminnm.v4f32") - }, - "minnmq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.aarch64.neon.fminnm.v2f64") - }, - "shl_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.sshl.v8i8") - }, - "shl_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.ushl.v8i8") - }, - "shl_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sshl.v4i16") - }, - "shl_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.ushl.v4i16") - }, - "shl_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sshl.v2i32") - }, - "shl_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.ushl.v2i32") - }, - "shl_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.aarch64.neon.sshl.v1i64") - }, - "shl_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.aarch64.neon.ushl.v1i64") - }, - "shlq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.sshl.v16i8") - }, - "shlq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.ushl.v16i8") - }, - "shlq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.sshl.v8i16") - }, - "shlq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.ushl.v8i16") - }, - "shlq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.sshl.v4i32") - }, - "shlq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.ushl.v4i32") - }, - "shlq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.sshl.v2i64") - }, - "shlq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.aarch64.neon.ushl.v2i64") - }, - "qshl_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.sqshl.v8i8") - }, - "qshl_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.uqshl.v8i8") - }, - "qshl_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sqshl.v4i16") - }, - "qshl_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.uqshl.v4i16") - }, - "qshl_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sqshl.v2i32") - }, - "qshl_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.uqshl.v2i32") - }, - "qshl_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.aarch64.neon.sqshl.v1i64") - }, - "qshl_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.aarch64.neon.uqshl.v1i64") - }, - "qshlq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.sqshl.v16i8") - }, - "qshlq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.uqshl.v16i8") - }, - "qshlq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.sqshl.v8i16") - }, - "qshlq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.uqshl.v8i16") - }, - "qshlq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.sqshl.v4i32") - }, - "qshlq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.uqshl.v4i32") - }, - "qshlq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.sqshl.v2i64") - }, - "qshlq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.aarch64.neon.uqshl.v2i64") - }, - "rshl_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.srshl.v8i8") - }, - "rshl_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.urshl.v8i8") - }, - "rshl_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.srshl.v4i16") - }, - "rshl_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.urshl.v4i16") - }, - "rshl_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.srshl.v2i32") - }, - "rshl_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.urshl.v2i32") - }, - "rshl_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.aarch64.neon.srshl.v1i64") - }, - "rshl_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.aarch64.neon.urshl.v1i64") - }, - "rshlq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.srshl.v16i8") - }, - "rshlq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.urshl.v16i8") - }, - "rshlq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.srshl.v8i16") - }, - "rshlq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.urshl.v8i16") - }, - "rshlq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.srshl.v4i32") - }, - "rshlq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.urshl.v4i32") - }, - "rshlq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.srshl.v2i64") - }, - "rshlq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.aarch64.neon.urshl.v2i64") - }, - "qrshl_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.sqrshl.v8i8") - }, - "qrshl_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.uqrshl.v8i8") - }, - "qrshl_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sqrshl.v4i16") - }, - "qrshl_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.uqrshl.v4i16") - }, - "qrshl_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sqrshl.v2i32") - }, - "qrshl_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.uqrshl.v2i32") - }, - "qrshl_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.aarch64.neon.sqrshl.v1i64") - }, - "qrshl_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.aarch64.neon.uqrshl.v1i64") - }, - "qrshlq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.sqrshl.v16i8") - }, - "qrshlq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.uqrshl.v16i8") - }, - "qrshlq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.sqrshl.v8i16") - }, - "qrshlq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.uqrshl.v8i16") - }, - "qrshlq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.sqrshl.v4i32") - }, - "qrshlq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.uqrshl.v4i32") - }, - "qrshlq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.sqrshl.v2i64") - }, - "qrshlq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.aarch64.neon.uqrshl.v2i64") - }, - "qshrun_n_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.sqshrun.v8i8") - }, - "qshrun_n_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sqshrun.v4i16") - }, - "qshrun_n_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sqshrun.v2i32") - }, - "qrshrun_n_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.sqrshrun.v8i8") - }, - "qrshrun_n_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sqrshrun.v4i16") - }, - "qrshrun_n_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sqrshrun.v2i32") - }, - "qshrn_n_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.sqshrn.v8i8") - }, - "qshrn_n_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.uqshrn.v8i8") - }, - "qshrn_n_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sqshrn.v4i16") - }, - "qshrn_n_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.uqshrn.v4i16") - }, - "qshrn_n_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sqshrn.v2i32") - }, - "qshrn_n_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.uqshrn.v2i32") - }, - "rshrn_n_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.rshrn.v8i8") - }, - "rshrn_n_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.rshrn.v8i8") - }, - "rshrn_n_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.rshrn.v4i16") - }, - "rshrn_n_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.rshrn.v4i16") - }, - "rshrn_n_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.rshrn.v2i32") - }, - "rshrn_n_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.rshrn.v2i32") - }, - "qrshrn_n_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.sqrshrn.v8i8") - }, - "qrshrn_n_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.uqrshrn.v8i8") - }, - "qrshrn_n_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sqrshrn.v4i16") - }, - "qrshrn_n_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.uqrshrn.v4i16") - }, - "qrshrn_n_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sqrshrn.v2i32") - }, - "qrshrn_n_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.uqrshrn.v2i32") - }, - "sri_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.vsri.v8i8") - }, - "sri_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.vsri.v8i8") - }, - "sri_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.vsri.v4i16") - }, - "sri_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.vsri.v4i16") - }, - "sri_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.vsri.v2i32") - }, - "sri_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.vsri.v2i32") - }, - "sri_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.aarch64.neon.vsri.v1i64") - }, - "sri_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.aarch64.neon.vsri.v1i64") - }, - "sriq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.vsri.v16i8") - }, - "sriq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.vsri.v16i8") - }, - "sriq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.vsri.v8i16") - }, - "sriq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.vsri.v8i16") - }, - "sriq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.vsri.v4i32") - }, - "sriq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.vsri.v4i32") - }, - "sriq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.vsri.v2i64") - }, - "sriq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.aarch64.neon.vsri.v2i64") - }, - "sli_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.vsli.v8i8") - }, - "sli_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.vsli.v8i8") - }, - "sli_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.vsli.v4i16") - }, - "sli_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.vsli.v4i16") - }, - "sli_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.vsli.v2i32") - }, - "sli_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.vsli.v2i32") - }, - "sli_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.aarch64.neon.vsli.v1i64") - }, - "sli_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.aarch64.neon.vsli.v1i64") - }, - "sliq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.vsli.v16i8") - }, - "sliq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.vsli.v16i8") - }, - "sliq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.vsli.v8i16") - }, - "sliq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.vsli.v8i16") - }, - "sliq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.vsli.v4i32") - }, - "sliq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.vsli.v4i32") - }, - "sliq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.vsli.v2i64") - }, - "sliq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.aarch64.neon.vsli.v2i64") - }, - "vqmovn_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.sqxtn.v8i8") - }, - "vqmovn_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.uqxtn.v8i8") - }, - "vqmovn_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sqxtn.v4i16") - }, - "vqmovn_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.uqxtn.v4i16") - }, - "vqmovn_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sqxtn.v2i32") - }, - "vqmovn_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U64x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.uqxtn.v2i32") - }, - "abs_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.abs.v8i8") - }, - "abs_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.abs.v4i16") - }, - "abs_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.abs.v2i32") - }, - "abs_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.aarch64.neon.abs.v1i64") - }, - "absq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.abs.v16i8") - }, - "absq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.abs.v8i16") - }, - "absq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.abs.v4i32") - }, - "absq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.abs.v2i64") - }, - "abs_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.fabs.v2f32") - }, - "abs_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x1]; &INPUTS }, - output: &::F64x1, - definition: Named("llvm.fabs.v1f64") - }, - "absq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.fabs.v4f32") - }, - "absq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.fabs.v2f64") - }, - "qabs_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.sqabs.v8i8") - }, - "qabs_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sqabs.v4i16") - }, - "qabs_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sqabs.v2i32") - }, - "qabs_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.aarch64.neon.sqabs.v1i64") - }, - "qabsq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.sqabs.v16i8") - }, - "qabsq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.sqabs.v8i16") - }, - "qabsq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.sqabs.v4i32") - }, - "qabsq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.sqabs.v2i64") - }, - "qneg_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.sqneg.v8i8") - }, - "qneg_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sqneg.v4i16") - }, - "qneg_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sqneg.v2i32") - }, - "qneg_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.aarch64.neon.sqneg.v1i64") - }, - "qnegq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.sqneg.v16i8") - }, - "qnegq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.sqneg.v8i16") - }, - "qnegq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.sqneg.v4i32") - }, - "qnegq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.sqneg.v2i64") - }, - "clz_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.ctlz.v8i8") - }, - "clz_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.ctlz.v8i8") - }, - "clz_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.ctlz.v4i16") - }, - "clz_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.ctlz.v4i16") - }, - "clz_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.ctlz.v2i32") - }, - "clz_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.ctlz.v2i32") - }, - "clzq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.ctlz.v16i8") - }, - "clzq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.ctlz.v16i8") - }, - "clzq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ctlz.v8i16") - }, - "clzq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.ctlz.v8i16") - }, - "clzq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ctlz.v4i32") - }, - "clzq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.ctlz.v4i32") - }, - "cls_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.cls.v8i8") - }, - "cls_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.cls.v8i8") - }, - "cls_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.cls.v4i16") - }, - "cls_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.cls.v4i16") - }, - "cls_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.cls.v2i32") - }, - "cls_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.cls.v2i32") - }, - "clsq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.cls.v16i8") - }, - "clsq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.cls.v16i8") - }, - "clsq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.cls.v8i16") - }, - "clsq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.cls.v8i16") - }, - "clsq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.cls.v4i32") - }, - "clsq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.cls.v4i32") - }, - "cnt_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.ctpop.v8i8") - }, - "cnt_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.ctpop.v8i8") - }, - "cntq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.ctpop.v16i8") - }, - "cntq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.ctpop.v16i8") - }, - "recpe_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.urecpe.v2i32") - }, - "recpe_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.aarch64.neon.frecpe.v2f32") - }, - "recpe_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x1]; &INPUTS }, - output: &::F64x1, - definition: Named("llvm.aarch64.neon.frecpe.v1f64") - }, - "recpeq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.urecpe.v4i32") - }, - "recpeq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.aarch64.neon.frecpe.v4f32") - }, - "recpeq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.aarch64.neon.frecpe.v2f64") - }, - "recps_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.aarch64.neon.frecps.v2f32") - }, - "recps_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, - output: &::F64x1, - definition: Named("llvm.aarch64.neon.frecps.v1f64") - }, - "recpsq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.aarch64.neon.frecps.v4f32") - }, - "recpsq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.aarch64.neon.frecps.v2f64") - }, - "sqrt_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.sqrt.v2f32") - }, - "sqrt_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x1]; &INPUTS }, - output: &::F64x1, - definition: Named("llvm.sqrt.v1f64") - }, - "sqrtq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.sqrt.v4f32") - }, - "sqrtq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.sqrt.v2f64") - }, - "rsqrte_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.ursqrte.v2i32") - }, - "rsqrte_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.aarch64.neon.frsqrte.v2f32") - }, - "rsqrte_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x1]; &INPUTS }, - output: &::F64x1, - definition: Named("llvm.aarch64.neon.frsqrte.v1f64") - }, - "rsqrteq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.ursqrte.v4i32") - }, - "rsqrteq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.aarch64.neon.frsqrte.v4f32") - }, - "rsqrteq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.aarch64.neon.frsqrte.v2f64") - }, - "rsqrts_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.aarch64.neon.frsqrts.v2f32") - }, - "rsqrts_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &INPUTS }, - output: &::F64x1, - definition: Named("llvm.aarch64.neon.frsqrts.v1f64") - }, - "rsqrtsq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.aarch64.neon.frsqrts.v4f32") - }, - "rsqrtsq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.aarch64.neon.frsqrts.v2f64") - }, - "rbit_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.rbit.v8i8") - }, - "rbit_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.rbit.v8i8") - }, - "rbitq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.rbit.v16i8") - }, - "rbitq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.rbit.v16i8") - }, - "ld2_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x8), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v8i8.p0v8i8") - }, - "ld2_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x8), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v8i8.p0v8i8") - }, - "ld2_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x4), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v4i16.p0v4i16") - }, - "ld2_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x4), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v4i16.p0v4i16") - }, - "ld2_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v2i32.p0v2i32") - }, - "ld2_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v2i32.p0v2i32") - }, - "ld2_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x1), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v1i64.p0v1i64") - }, - "ld2_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x1), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v1i64.p0v1i64") - }, - "ld2_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v2f32.p0v2f32") - }, - "ld2_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x1), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v1f64.p0v1f64") - }, - "ld2q_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x16), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v16i8.p0v16i8") - }, - "ld2q_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x16), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v16i8.p0v16i8") - }, - "ld2q_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x8), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v8i16.p0v8i16") - }, - "ld2q_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x8), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v8i16.p0v8i16") - }, - "ld2q_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x4), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v4i32.p0v4i32") - }, - "ld2q_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x4), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v4i32.p0v4i32") - }, - "ld2q_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v2i64.p0v2i64") - }, - "ld2q_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v2i64.p0v2i64") - }, - "ld2q_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x4), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v4f32.p0v4f32") - }, - "ld2q_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v2f64.p0v2f64") - }, - "ld3_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x8), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v8i8.p0v8i8") - }, - "ld3_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x8), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v8i8.p0v8i8") - }, - "ld3_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x4), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I16x4, &::I16x4, &::I16x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v4i16.p0v4i16") - }, - "ld3_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x4), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U16x4, &::U16x4, &::U16x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v4i16.p0v4i16") - }, - "ld3_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I32x2, &::I32x2, &::I32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v2i32.p0v2i32") - }, - "ld3_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U32x2, &::U32x2, &::U32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v2i32.p0v2i32") - }, - "ld3_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x1), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I64x1, &::I64x1, &::I64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v1i64.p0v1i64") - }, - "ld3_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x1), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U64x1, &::U64x1, &::U64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v1i64.p0v1i64") - }, - "ld3_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F32x2, &::F32x2, &::F32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v2f32.p0v2f32") - }, - "ld3_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x1), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F64x1, &::F64x1, &::F64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v1f64.p0v1f64") - }, - "ld3q_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x16), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v16i8.p0v16i8") - }, - "ld3q_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x16), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v16i8.p0v16i8") - }, - "ld3q_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x8), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I16x8, &::I16x8, &::I16x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v8i16.p0v8i16") - }, - "ld3q_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x8), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U16x8, &::U16x8, &::U16x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v8i16.p0v8i16") - }, - "ld3q_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x4), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I32x4, &::I32x4, &::I32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v4i32.p0v4i32") - }, - "ld3q_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x4), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U32x4, &::U32x4, &::U32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v4i32.p0v4i32") - }, - "ld3q_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I64x2, &::I64x2, &::I64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v2i64.p0v2i64") - }, - "ld3q_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U64x2, &::U64x2, &::U64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v2i64.p0v2i64") - }, - "ld3q_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x4), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v4f32.p0v4f32") - }, - "ld3q_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v2f64.p0v2f64") - }, - "ld4_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x8), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I8x8, &::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v8i8.p0v8i8") - }, - "ld4_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x8), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U8x8, &::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v8i8.p0v8i8") - }, - "ld4_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x4), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I16x4, &::I16x4, &::I16x4, &::I16x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v4i16.p0v4i16") - }, - "ld4_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x4), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U16x4, &::U16x4, &::U16x4, &::U16x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v4i16.p0v4i16") - }, - "ld4_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I32x2, &::I32x2, &::I32x2, &::I32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v2i32.p0v2i32") - }, - "ld4_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U32x2, &::U32x2, &::U32x2, &::U32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v2i32.p0v2i32") - }, - "ld4_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x1), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I64x1, &::I64x1, &::I64x1, &::I64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v1i64.p0v1i64") - }, - "ld4_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x1), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U64x1, &::U64x1, &::U64x1, &::U64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v1i64.p0v1i64") - }, - "ld4_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F32x2, &::F32x2, &::F32x2, &::F32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v2f32.p0v2f32") - }, - "ld4_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x1), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F64x1, &::F64x1, &::F64x1, &::F64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v1f64.p0v1f64") - }, - "ld4q_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, Some(&::I8x16), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v16i8.p0v16i8") - }, - "ld4q_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, Some(&::U8x16), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v16i8.p0v16i8") - }, - "ld4q_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, Some(&::I16x8), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I16x8, &::I16x8, &::I16x8, &::I16x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v8i16.p0v8i16") - }, - "ld4q_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, Some(&::U16x8), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U16x8, &::U16x8, &::U16x8, &::U16x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v8i16.p0v8i16") - }, - "ld4q_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I32x4), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I32x4, &::I32x4, &::I32x4, &::I32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v4i32.p0v4i32") - }, - "ld4q_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, Some(&::U32x4), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U32x4, &::U32x4, &::U32x4, &::U32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v4i32.p0v4i32") - }, - "ld4q_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I64x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I64x2, &::I64x2, &::I64x2, &::I64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v2i64.p0v2i64") - }, - "ld4q_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, Some(&::U64x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U64x2, &::U64x2, &::U64x2, &::U64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v2i64.p0v2i64") - }, - "ld4q_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::F32x4), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F32x4, &::F32x4, &::F32x4, &::F32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v4f32.p0v4f32") - }, - "ld4q_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::F64x2), true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F64x2, &::F64x2, &::F64x2, &::F64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v2f64.p0v2f64") - }, - "ld2_dup_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v8i8.p0i8") - }, - "ld2_dup_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v8i8.p0i8") - }, - "ld2_dup_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v4i16.p0i16") - }, - "ld2_dup_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v4i16.p0i16") - }, - "ld2_dup_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v2i32.p0i32") - }, - "ld2_dup_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v2i32.p0i32") - }, - "ld2_dup_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v1i64.p0i64") - }, - "ld2_dup_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v1i64.p0i64") - }, - "ld2_dup_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v2f32.p0f32") - }, - "ld2_dup_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F64x1, &::F64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v1f64.p0f64") - }, - "ld2q_dup_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v16i8.p0i8") - }, - "ld2q_dup_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v16i8.p0i8") - }, - "ld2q_dup_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v8i16.p0i16") - }, - "ld2q_dup_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v8i16.p0i16") - }, - "ld2q_dup_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v4i32.p0i32") - }, - "ld2q_dup_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v4i32.p0i32") - }, - "ld2q_dup_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v2i64.p0i64") - }, - "ld2q_dup_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v2i64.p0i64") - }, - "ld2q_dup_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v4f32.p0f32") - }, - "ld2q_dup_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld2.v2f64.p0f64") - }, - "ld3_dup_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v8i8.p0i8") - }, - "ld3_dup_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v8i8.p0i8") - }, - "ld3_dup_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I16x4, &::I16x4, &::I16x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v4i16.p0i16") - }, - "ld3_dup_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U16x4, &::U16x4, &::U16x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v4i16.p0i16") - }, - "ld3_dup_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I32x2, &::I32x2, &::I32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v2i32.p0i32") - }, - "ld3_dup_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U32x2, &::U32x2, &::U32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v2i32.p0i32") - }, - "ld3_dup_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I64x1, &::I64x1, &::I64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v1i64.p0i64") - }, - "ld3_dup_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U64x1, &::U64x1, &::U64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v1i64.p0i64") - }, - "ld3_dup_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F32x2, &::F32x2, &::F32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v2f32.p0f32") - }, - "ld3_dup_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F64x1, &::F64x1, &::F64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v1f64.p0f64") - }, - "ld3q_dup_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v16i8.p0i8") - }, - "ld3q_dup_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v16i8.p0i8") - }, - "ld3q_dup_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I16x8, &::I16x8, &::I16x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v8i16.p0i16") - }, - "ld3q_dup_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U16x8, &::U16x8, &::U16x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v8i16.p0i16") - }, - "ld3q_dup_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I32x4, &::I32x4, &::I32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v4i32.p0i32") - }, - "ld3q_dup_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U32x4, &::U32x4, &::U32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v4i32.p0i32") - }, - "ld3q_dup_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::I64x2, &::I64x2, &::I64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v2i64.p0i64") - }, - "ld3q_dup_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::U64x2, &::U64x2, &::U64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v2i64.p0i64") - }, - "ld3q_dup_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v4f32.p0f32") - }, - "ld3q_dup_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld3.v2f64.p0f64") - }, - "ld4_dup_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I8x8, &::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v8i8.p0i8") - }, - "ld4_dup_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U8x8, &::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v8i8.p0i8") - }, - "ld4_dup_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I16x4, &::I16x4, &::I16x4, &::I16x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v4i16.p0i16") - }, - "ld4_dup_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U16x4, &::U16x4, &::U16x4, &::U16x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v4i16.p0i16") - }, - "ld4_dup_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I32x2, &::I32x2, &::I32x2, &::I32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v2i32.p0i32") - }, - "ld4_dup_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U32x2, &::U32x2, &::U32x2, &::U32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v2i32.p0i32") - }, - "ld4_dup_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I64x1, &::I64x1, &::I64x1, &::I64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v1i64.p0i64") - }, - "ld4_dup_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U64x1, &::U64x1, &::U64x1, &::U64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v1i64.p0i64") - }, - "ld4_dup_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F32x2, &::F32x2, &::F32x2, &::F32x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v2f32.p0f32") - }, - "ld4_dup_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F64x1, &::F64x1, &::F64x1, &::F64x1]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v1f64.p0f64") - }, - "ld4q_dup_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v16i8.p0i8") - }, - "ld4q_dup_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v16i8.p0i8") - }, - "ld4q_dup_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I16, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I16x8, &::I16x8, &::I16x8, &::I16x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v8i16.p0i16") - }, - "ld4q_dup_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U16, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U16x8, &::U16x8, &::U16x8, &::U16x8]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v8i16.p0i16") - }, - "ld4q_dup_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I32x4, &::I32x4, &::I32x4, &::I32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v4i32.p0i32") - }, - "ld4q_dup_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U32x4, &::U32x4, &::U32x4, &::U32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v4i32.p0i32") - }, - "ld4q_dup_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::I64x2, &::I64x2, &::I64x2, &::I64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v2i64.p0i64") - }, - "ld4q_dup_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::U64x2, &::U64x2, &::U64x2, &::U64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v2i64.p0i64") - }, - "ld4q_dup_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F32, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F32x4, &::F32x4, &::F32x4, &::F32x4]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v4f32.p0f32") - }, - "ld4q_dup_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::F64, None, true); &PTR }]; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 4] = [&::F64x2, &::F64x2, &::F64x2, &::F64x2]; &PARTS }); &AGG }, - definition: Named("llvm.aarch64.neon.ld4.v2f64.p0f64") - }, - "padd_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.addp.v8i8") - }, - "padd_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.addp.v8i8") - }, - "padd_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.addp.v4i16") - }, - "padd_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.addp.v4i16") - }, - "padd_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.addp.v2i32") - }, - "padd_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.addp.v2i32") - }, - "padd_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.aarch64.neon.addp.v2f32") - }, - "paddq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.addp.v16i8") - }, - "paddq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.addp.v16i8") - }, - "paddq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.addp.v8i16") - }, - "paddq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.addp.v8i16") - }, - "paddq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.addp.v4i32") - }, - "paddq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.addp.v4i32") - }, - "paddq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.aarch64.neon.addp.v4f32") - }, - "paddq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.addp.v2i64") - }, - "paddq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.aarch64.neon.addp.v2i64") - }, - "paddq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.aarch64.neon.addp.v2f64") - }, - "paddl_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.saddlp.v4i16.v8i8") - }, - "paddl_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.uaddlp.v4i16.v8i8") - }, - "paddl_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.saddlp.v2i32.v4i16") - }, - "paddl_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.uaddlp.v2i32.v4i16") - }, - "paddl_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.aarch64.neon.saddlp.v1i64.v2i32") - }, - "paddl_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.aarch64.neon.uaddlp.v1i64.v2i32") - }, - "paddlq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.saddlp.v8i16.v16i8") - }, - "paddlq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.uaddlp.v8i16.v16i8") - }, - "paddlq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.saddlp.v4i32.v8i16") - }, - "paddlq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.uaddlp.v4i32.v8i16") - }, - "paddlq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.saddlp.v2i64.v4i32") - }, - "paddlq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.aarch64.neon.uaddlp.v2i64.v4i32") - }, - "pmax_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.smaxp.v8i8") - }, - "pmax_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.umaxp.v8i8") - }, - "pmax_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.smaxp.v4i16") - }, - "pmax_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.umaxp.v4i16") - }, - "pmax_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.smaxp.v2i32") - }, - "pmax_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.umaxp.v2i32") - }, - "pmax_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.aarch64.neon.fmaxp.v2f32") - }, - "pmaxq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.smaxp.v16i8") - }, - "pmaxq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.umaxp.v16i8") - }, - "pmaxq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.smaxp.v8i16") - }, - "pmaxq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.umaxp.v8i16") - }, - "pmaxq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.smaxp.v4i32") - }, - "pmaxq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.umaxp.v4i32") - }, - "pmaxq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.aarch64.neon.fmaxp.v4f32") - }, - "pmaxq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.smaxp.v2i64") - }, - "pmaxq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.aarch64.neon.umaxp.v2i64") - }, - "pmaxq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.aarch64.neon.fmaxp.v2f64") - }, - "pmin_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.sminp.v8i8") - }, - "pmin_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.uminp.v8i8") - }, - "pmin_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.sminp.v4i16") - }, - "pmin_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.uminp.v4i16") - }, - "pmin_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.sminp.v2i32") - }, - "pmin_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.uminp.v2i32") - }, - "pmin_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.aarch64.neon.fminp.v2f32") - }, - "pminq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.sminp.v16i8") - }, - "pminq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.uminp.v16i8") - }, - "pminq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.sminp.v8i16") - }, - "pminq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.uminp.v8i16") - }, - "pminq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.sminp.v4i32") - }, - "pminq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.uminp.v4i32") - }, - "pminq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.aarch64.neon.fminp.v4f32") - }, - "pminq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.sminp.v2i64") - }, - "pminq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.aarch64.neon.uminp.v2i64") - }, - "pminq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.aarch64.neon.fminp.v2f64") - }, - "pmaxnm_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.smaxnmp.v8i8") - }, - "pmaxnm_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.umaxnmp.v8i8") - }, - "pmaxnm_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.aarch64.neon.smaxnmp.v4i16") - }, - "pmaxnm_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.aarch64.neon.umaxnmp.v4i16") - }, - "pmaxnm_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.aarch64.neon.smaxnmp.v2i32") - }, - "pmaxnm_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.aarch64.neon.umaxnmp.v2i32") - }, - "pmaxnm_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.aarch64.neon.fmaxnmp.v2f32") - }, - "pmaxnmq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.smaxnmp.v16i8") - }, - "pmaxnmq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.umaxnmp.v16i8") - }, - "pmaxnmq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.aarch64.neon.smaxnmp.v8i16") - }, - "pmaxnmq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.aarch64.neon.umaxnmp.v8i16") - }, - "pmaxnmq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.aarch64.neon.smaxnmp.v4i32") - }, - "pmaxnmq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.aarch64.neon.umaxnmp.v4i32") - }, - "pmaxnmq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.aarch64.neon.fmaxnmp.v4f32") - }, - "pmaxnmq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.aarch64.neon.smaxnmp.v2i64") - }, - "pmaxnmq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.aarch64.neon.umaxnmp.v2i64") - }, - "pmaxnmq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.aarch64.neon.fmaxnmp.v2f64") - }, - "pminnm_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.aarch64.neon.fminnmp.v2f32") - }, - "pminnmq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.aarch64.neon.fminnmp.v4f32") - }, - "pminnmq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.aarch64.neon.fminnmp.v2f64") - }, - "addv_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8, - definition: Named("llvm.aarch64.neon.saddv.i8.v8i8") - }, - "addv_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, - output: &::U8, - definition: Named("llvm.aarch64.neon.uaddv.i8.v8i8") - }, - "addv_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I16, - definition: Named("llvm.aarch64.neon.saddv.i16.v4i16") - }, - "addv_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, - output: &::U16, - definition: Named("llvm.aarch64.neon.uaddv.i16.v4i16") - }, - "addv_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I32, - definition: Named("llvm.aarch64.neon.saddv.i32.v2i32") - }, - "addv_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, - output: &::U32, - definition: Named("llvm.aarch64.neon.uaddv.i32.v2i32") - }, - "addv_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, - output: &::F32, - definition: Named("llvm.aarch64.neon.faddv.f32.v2f32") - }, - "addvq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8, - definition: Named("llvm.aarch64.neon.saddv.i8.v16i8") - }, - "addvq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, - output: &::U8, - definition: Named("llvm.aarch64.neon.uaddv.i8.v16i8") - }, - "addvq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I16, - definition: Named("llvm.aarch64.neon.saddv.i16.v8i16") - }, - "addvq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, - output: &::U16, - definition: Named("llvm.aarch64.neon.uaddv.i16.v8i16") - }, - "addvq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I32, - definition: Named("llvm.aarch64.neon.saddv.i32.v4i32") - }, - "addvq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U32, - definition: Named("llvm.aarch64.neon.uaddv.i32.v4i32") - }, - "addvq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32, - definition: Named("llvm.aarch64.neon.faddv.f32.v4f32") - }, - "addvq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS }, - output: &::I64, - definition: Named("llvm.aarch64.neon.saddv.i64.v2i64") - }, - "addvq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U64x2]; &INPUTS }, - output: &::U64, - definition: Named("llvm.aarch64.neon.uaddv.i64.v2i64") - }, - "addvq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, - output: &::F64, - definition: Named("llvm.aarch64.neon.faddv.f64.v2f64") - }, - "addlv_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I16, - definition: Named("llvm.aarch64.neon.saddlv.i16.v8i8") - }, - "addlv_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, - output: &::U16, - definition: Named("llvm.aarch64.neon.uaddlv.i16.v8i8") - }, - "addlv_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I32, - definition: Named("llvm.aarch64.neon.saddlv.i32.v4i16") - }, - "addlv_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, - output: &::U32, - definition: Named("llvm.aarch64.neon.uaddlv.i32.v4i16") - }, - "addlv_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I64, - definition: Named("llvm.aarch64.neon.saddlv.i64.v2i32") - }, - "addlv_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, - output: &::U64, - definition: Named("llvm.aarch64.neon.uaddlv.i64.v2i32") - }, - "addlvq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I16, - definition: Named("llvm.aarch64.neon.saddlv.i16.v16i8") - }, - "addlvq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, - output: &::U16, - definition: Named("llvm.aarch64.neon.uaddlv.i16.v16i8") - }, - "addlvq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.aarch64.neon.saddlv.i32.v8i16") - }, - "addlvq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, - output: &::U32, - definition: Named("llvm.aarch64.neon.uaddlv.i32.v8i16") - }, - "addlvq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I64, - definition: Named("llvm.aarch64.neon.saddlv.i64.v4i32") - }, - "addlvq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U64, - definition: Named("llvm.aarch64.neon.uaddlv.i64.v4i32") - }, - "maxv_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8, - definition: Named("llvm.aarch64.neon.smaxv.i8.v8i8") - }, - "maxv_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, - output: &::U8, - definition: Named("llvm.aarch64.neon.umaxv.i8.v8i8") - }, - "maxv_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I16, - definition: Named("llvm.aarch64.neon.smaxv.i16.v4i16") - }, - "maxv_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, - output: &::U16, - definition: Named("llvm.aarch64.neon.umaxv.i16.v4i16") - }, - "maxv_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I32, - definition: Named("llvm.aarch64.neon.smaxv.i32.v2i32") - }, - "maxv_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, - output: &::U32, - definition: Named("llvm.aarch64.neon.umaxv.i32.v2i32") - }, - "maxv_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, - output: &::F32, - definition: Named("llvm.aarch64.neon.fmaxv.f32.v2f32") - }, - "maxvq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8, - definition: Named("llvm.aarch64.neon.smaxv.i8.v16i8") - }, - "maxvq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, - output: &::U8, - definition: Named("llvm.aarch64.neon.umaxv.i8.v16i8") - }, - "maxvq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I16, - definition: Named("llvm.aarch64.neon.smaxv.i16.v8i16") - }, - "maxvq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, - output: &::U16, - definition: Named("llvm.aarch64.neon.umaxv.i16.v8i16") - }, - "maxvq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I32, - definition: Named("llvm.aarch64.neon.smaxv.i32.v4i32") - }, - "maxvq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U32, - definition: Named("llvm.aarch64.neon.umaxv.i32.v4i32") - }, - "maxvq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32, - definition: Named("llvm.aarch64.neon.fmaxv.f32.v4f32") - }, - "maxvq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, - output: &::F64, - definition: Named("llvm.aarch64.neon.fmaxv.f64.v2f64") - }, - "minv_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8, - definition: Named("llvm.aarch64.neon.sminv.i8.v8i8") - }, - "minv_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, - output: &::U8, - definition: Named("llvm.aarch64.neon.uminv.i8.v8i8") - }, - "minv_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I16, - definition: Named("llvm.aarch64.neon.sminv.i16.v4i16") - }, - "minv_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, - output: &::U16, - definition: Named("llvm.aarch64.neon.uminv.i16.v4i16") - }, - "minv_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I32, - definition: Named("llvm.aarch64.neon.sminv.i32.v2i32") - }, - "minv_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, - output: &::U32, - definition: Named("llvm.aarch64.neon.uminv.i32.v2i32") - }, - "minv_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, - output: &::F32, - definition: Named("llvm.aarch64.neon.fminv.f32.v2f32") - }, - "minvq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8, - definition: Named("llvm.aarch64.neon.sminv.i8.v16i8") - }, - "minvq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, - output: &::U8, - definition: Named("llvm.aarch64.neon.uminv.i8.v16i8") - }, - "minvq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I16, - definition: Named("llvm.aarch64.neon.sminv.i16.v8i16") - }, - "minvq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, - output: &::U16, - definition: Named("llvm.aarch64.neon.uminv.i16.v8i16") - }, - "minvq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I32, - definition: Named("llvm.aarch64.neon.sminv.i32.v4i32") - }, - "minvq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U32, - definition: Named("llvm.aarch64.neon.uminv.i32.v4i32") - }, - "minvq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32, - definition: Named("llvm.aarch64.neon.fminv.f32.v4f32") - }, - "minvq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, - output: &::F64, - definition: Named("llvm.aarch64.neon.fminv.f64.v2f64") - }, - "maxnmv_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, - output: &::F32, - definition: Named("llvm.aarch64.neon.fmaxnmv.f32.v2f32") - }, - "maxnmvq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32, - definition: Named("llvm.aarch64.neon.fmaxnmv.f32.v4f32") - }, - "maxnmvq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, - output: &::F64, - definition: Named("llvm.aarch64.neon.fmaxnmv.f64.v2f64") - }, - "minnmv_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, - output: &::F32, - definition: Named("llvm.aarch64.neon.fminnmv.f32.v2f32") - }, - "minnmvq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32, - definition: Named("llvm.aarch64.neon.fminnmv.f32.v4f32") - }, - "minnmvq_f64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, - output: &::F64, - definition: Named("llvm.aarch64.neon.fminnmv.f64.v2f64") - }, - "qtbl1_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.tbl1.v8i8") - }, - "qtbl1_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.tbl1.v8i8") - }, - "qtbl1q_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::U8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.tbl1.v16i8") - }, - "qtbl1q_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.tbl1.v16i8") - }, - "qtbx1_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, &::I8x16, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.tbx1.v8i8") - }, - "qtbx1_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, &::U8x16, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.tbx1.v8i8") - }, - "qtbx1q_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::U8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.tbx1.v16i8") - }, - "qtbx1q_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.tbx1.v16i8") - }, - "qtbl2_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.tbl2.v8i8") - }, - "qtbl2_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.tbl2.v8i8") - }, - "qtbl2q_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.tbl2.v16i8") - }, - "qtbl2q_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.tbl2.v16i8") - }, - "qtbx2_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.tbx2.v8i8") - }, - "qtbx2_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.tbx2.v8i8") - }, - "qtbx2q_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.tbx2.v16i8") - }, - "qtbx2q_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.tbx2.v16i8") - }, - "qtbl3_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.tbl3.v8i8") - }, - "qtbl3_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.tbl3.v8i8") - }, - "qtbl3q_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.tbl3.v16i8") - }, - "qtbl3q_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.tbl3.v16i8") - }, - "qtbx3_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.tbx3.v8i8") - }, - "qtbx3_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.tbx3.v8i8") - }, - "qtbx3q_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.tbx3.v16i8") - }, - "qtbx3q_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.tbx3.v16i8") - }, - "qtbl4_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.tbl4.v8i8") - }, - "qtbl4_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.tbl4.v8i8") - }, - "qtbl4q_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.tbl4.v16i8") - }, - "qtbl4q_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.tbl4.v16i8") - }, - "qtbx4_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.aarch64.neon.tbx4.v8i8") - }, - "qtbx4_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.aarch64.neon.tbx4.v8i8") - }, - "qtbx4q_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x16, &::I8x16, &::I8x16, &::I8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.aarch64.neon.tbx4.v16i8") - }, - "qtbx4q_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x16, &::U8x16, &::U8x16, &::U8x16]; &PARTS }); &AGG }, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.aarch64.neon.tbx4.v16i8") - }, - _ => return None, - }) -} diff --git a/src/librustc_platform_intrinsics/arm.rs b/src/librustc_platform_intrinsics/arm.rs deleted file mode 100644 index 8b320d90fac2c..0000000000000 --- a/src/librustc_platform_intrinsics/arm.rs +++ /dev/null @@ -1,2084 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// DO NOT EDIT: autogenerated by etc/platform-intrinsics/generator.py -// ignore-tidy-linelength - -#![allow(unused_imports)] - -use {Intrinsic, Type}; -use IntrinsicDef::Named; - -pub fn find(name: &str) -> Option { - if !name.starts_with("arm_v") { return None } - Some(match &name["arm_v".len()..] { - "hadd_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vhadds.v8i8") - }, - "hadd_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vhaddu.v8i8") - }, - "hadd_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vhadds.v4i16") - }, - "hadd_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vhaddu.v4i16") - }, - "hadd_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vhadds.v2i32") - }, - "hadd_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vhaddu.v2i32") - }, - "haddq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vhadds.v16i8") - }, - "haddq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vhaddu.v16i8") - }, - "haddq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vhadds.v8i16") - }, - "haddq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vhaddu.v8i16") - }, - "haddq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vhadds.v4i32") - }, - "haddq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vhaddu.v4i32") - }, - "rhadd_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vrhadds.v8i8") - }, - "rhadd_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vrhaddu.v8i8") - }, - "rhadd_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vrhadds.v4i16") - }, - "rhadd_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vrhaddu.v4i16") - }, - "rhadd_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vrhadds.v2i32") - }, - "rhadd_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vrhaddu.v2i32") - }, - "rhaddq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vrhadds.v16i8") - }, - "rhaddq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vrhaddu.v16i8") - }, - "rhaddq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vrhadds.v8i16") - }, - "rhaddq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vrhaddu.v8i16") - }, - "rhaddq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vrhadds.v4i32") - }, - "rhaddq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vrhaddu.v4i32") - }, - "qadd_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vqadds.v8i8") - }, - "qadd_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vqaddu.v8i8") - }, - "qadd_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vqadds.v4i16") - }, - "qadd_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vqaddu.v4i16") - }, - "qadd_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vqadds.v2i32") - }, - "qadd_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vqaddu.v2i32") - }, - "qadd_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.arm.neon.vqadds.v1i64") - }, - "qadd_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.arm.neon.vqaddu.v1i64") - }, - "qaddq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vqadds.v16i8") - }, - "qaddq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vqaddu.v16i8") - }, - "qaddq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vqadds.v8i16") - }, - "qaddq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vqaddu.v8i16") - }, - "qaddq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vqadds.v4i32") - }, - "qaddq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vqaddu.v4i32") - }, - "qaddq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.arm.neon.vqadds.v2i64") - }, - "qaddq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.arm.neon.vqaddu.v2i64") - }, - "raddhn_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vraddhn.v8i8") - }, - "raddhn_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vraddhn.v8i8") - }, - "raddhn_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vraddhn.v4i16") - }, - "raddhn_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vraddhn.v4i16") - }, - "raddhn_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vraddhn.v2i32") - }, - "raddhn_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vraddhn.v2i32") - }, - "fma_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.fma.v2f32") - }, - "fmaq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.fma.v4f32") - }, - "qdmulh_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vsqdmulh.v4i16") - }, - "qdmulh_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vsqdmulh.v2i32") - }, - "qdmulhq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vsqdmulh.v8i16") - }, - "qdmulhq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vsqdmulh.v4i32") - }, - "qrdmulh_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vsqrdmulh.v4i16") - }, - "qrdmulh_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vsqrdmulh.v2i32") - }, - "qrdmulhq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vsqrdmulh.v8i16") - }, - "qrdmulhq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vsqrdmulh.v4i32") - }, - "mull_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vmulls.v8i16") - }, - "mull_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vmullu.v8i16") - }, - "mull_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vmulls.v4i32") - }, - "mull_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vmullu.v4i32") - }, - "mull_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.arm.neon.vmulls.v2i64") - }, - "mull_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.arm.neon.vmullu.v2i64") - }, - "qdmullq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vsqdmull.v8i16") - }, - "qdmullq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vsqdmull.v4i32") - }, - "hsub_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vhsubs.v8i8") - }, - "hsub_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vhsubu.v8i8") - }, - "hsub_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vhsubs.v4i16") - }, - "hsub_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vhsubu.v4i16") - }, - "hsub_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vhsubs.v2i32") - }, - "hsub_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vhsubu.v2i32") - }, - "hsubq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vhsubs.v16i8") - }, - "hsubq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vhsubu.v16i8") - }, - "hsubq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vhsubs.v8i16") - }, - "hsubq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vhsubu.v8i16") - }, - "hsubq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vhsubs.v4i32") - }, - "hsubq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vhsubu.v4i32") - }, - "qsub_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vqsubs.v8i8") - }, - "qsub_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vqsubu.v8i8") - }, - "qsub_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vqsubs.v4i16") - }, - "qsub_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vqsubu.v4i16") - }, - "qsub_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vqsubs.v2i32") - }, - "qsub_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vqsubu.v2i32") - }, - "qsub_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.arm.neon.vqsubs.v1i64") - }, - "qsub_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.arm.neon.vqsubu.v1i64") - }, - "qsubq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vqsubs.v16i8") - }, - "qsubq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vqsubu.v16i8") - }, - "qsubq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vqsubs.v8i16") - }, - "qsubq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vqsubu.v8i16") - }, - "qsubq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vqsubs.v4i32") - }, - "qsubq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vqsubu.v4i32") - }, - "qsubq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.arm.neon.vqsubs.v2i64") - }, - "qsubq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.arm.neon.vqsubu.v2i64") - }, - "rsubhn_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vrsubhn.v8i8") - }, - "rsubhn_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vrsubhn.v8i8") - }, - "rsubhn_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vrsubhn.v4i16") - }, - "rsubhn_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vrsubhn.v4i16") - }, - "rsubhn_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vrsubhn.v2i32") - }, - "rsubhn_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vrsubhn.v2i32") - }, - "abd_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vabds.v8i8") - }, - "abd_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vabdu.v8i8") - }, - "abd_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vabds.v4i16") - }, - "abd_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vabdu.v4i16") - }, - "abd_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vabds.v2i32") - }, - "abd_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vabdu.v2i32") - }, - "abd_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.arm.neon.vabdf.v2f32") - }, - "abdq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vabds.v16i8") - }, - "abdq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vabdu.v16i8") - }, - "abdq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vabds.v8i16") - }, - "abdq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vabdu.v8i16") - }, - "abdq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vabds.v4i32") - }, - "abdq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vabdu.v4i32") - }, - "abdq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.arm.neon.vabdf.v4f32") - }, - "max_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vmaxs.v8i8") - }, - "max_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vmaxu.v8i8") - }, - "max_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vmaxs.v4i16") - }, - "max_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vmaxu.v4i16") - }, - "max_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vmaxs.v2i32") - }, - "max_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vmaxu.v2i32") - }, - "max_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.arm.neon.vmaxf.v2f32") - }, - "maxq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vmaxs.v16i8") - }, - "maxq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vmaxu.v16i8") - }, - "maxq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vmaxs.v8i16") - }, - "maxq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vmaxu.v8i16") - }, - "maxq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vmaxs.v4i32") - }, - "maxq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vmaxu.v4i32") - }, - "maxq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.arm.neon.vmaxf.v4f32") - }, - "min_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vmins.v8i8") - }, - "min_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vminu.v8i8") - }, - "min_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vmins.v4i16") - }, - "min_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vminu.v4i16") - }, - "min_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vmins.v2i32") - }, - "min_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vminu.v2i32") - }, - "min_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.arm.neon.vminf.v2f32") - }, - "minq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vmins.v16i8") - }, - "minq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vminu.v16i8") - }, - "minq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vmins.v8i16") - }, - "minq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vminu.v8i16") - }, - "minq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vmins.v4i32") - }, - "minq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vminu.v4i32") - }, - "minq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.arm.neon.vminf.v4f32") - }, - "shl_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vshls.v8i8") - }, - "shl_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vshlu.v8i8") - }, - "shl_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vshls.v4i16") - }, - "shl_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vshlu.v4i16") - }, - "shl_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vshls.v2i32") - }, - "shl_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vshlu.v2i32") - }, - "shl_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.arm.neon.vshls.v1i64") - }, - "shl_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.arm.neon.vshlu.v1i64") - }, - "shlq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vshls.v16i8") - }, - "shlq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vshlu.v16i8") - }, - "shlq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vshls.v8i16") - }, - "shlq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vshlu.v8i16") - }, - "shlq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vshls.v4i32") - }, - "shlq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vshlu.v4i32") - }, - "shlq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.arm.neon.vshls.v2i64") - }, - "shlq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.arm.neon.vshlu.v2i64") - }, - "qshl_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vqshls.v8i8") - }, - "qshl_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vqshlu.v8i8") - }, - "qshl_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vqshls.v4i16") - }, - "qshl_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vqshlu.v4i16") - }, - "qshl_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vqshls.v2i32") - }, - "qshl_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vqshlu.v2i32") - }, - "qshl_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.arm.neon.vqshls.v1i64") - }, - "qshl_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.arm.neon.vqshlu.v1i64") - }, - "qshlq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vqshls.v16i8") - }, - "qshlq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vqshlu.v16i8") - }, - "qshlq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vqshls.v8i16") - }, - "qshlq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vqshlu.v8i16") - }, - "qshlq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vqshls.v4i32") - }, - "qshlq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vqshlu.v4i32") - }, - "qshlq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.arm.neon.vqshls.v2i64") - }, - "qshlq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.arm.neon.vqshlu.v2i64") - }, - "rshl_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vrshls.v8i8") - }, - "rshl_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vrshlu.v8i8") - }, - "rshl_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vrshls.v4i16") - }, - "rshl_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vrshlu.v4i16") - }, - "rshl_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vrshls.v2i32") - }, - "rshl_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vrshlu.v2i32") - }, - "rshl_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.arm.neon.vrshls.v1i64") - }, - "rshl_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.arm.neon.vrshlu.v1i64") - }, - "rshlq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vrshls.v16i8") - }, - "rshlq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vrshlu.v16i8") - }, - "rshlq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vrshls.v8i16") - }, - "rshlq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vrshlu.v8i16") - }, - "rshlq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vrshls.v4i32") - }, - "rshlq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vrshlu.v4i32") - }, - "rshlq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.arm.neon.vrshls.v2i64") - }, - "rshlq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.arm.neon.vrshlu.v2i64") - }, - "qrshl_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vqrshls.v8i8") - }, - "qrshl_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vqrshlu.v8i8") - }, - "qrshl_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vqrshls.v4i16") - }, - "qrshl_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vqrshlu.v4i16") - }, - "qrshl_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vqrshls.v2i32") - }, - "qrshl_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vqrshlu.v2i32") - }, - "qrshl_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.arm.neon.vqrshls.v1i64") - }, - "qrshl_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.arm.neon.vqrshlu.v1i64") - }, - "qrshlq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vqrshls.v16i8") - }, - "qrshlq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vqrshlu.v16i8") - }, - "qrshlq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vqrshls.v8i16") - }, - "qrshlq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vqrshlu.v8i16") - }, - "qrshlq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vqrshls.v4i32") - }, - "qrshlq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vqrshlu.v4i32") - }, - "qrshlq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.arm.neon.vqrshls.v2i64") - }, - "qrshlq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.arm.neon.vqrshlu.v2i64") - }, - "qshrun_n_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vsqshrun.v8i8") - }, - "qshrun_n_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vsqshrun.v4i16") - }, - "qshrun_n_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vsqshrun.v2i32") - }, - "qrshrun_n_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vsqrshrun.v8i8") - }, - "qrshrun_n_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vsqrshrun.v4i16") - }, - "qrshrun_n_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vsqrshrun.v2i32") - }, - "qshrn_n_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vqshrns.v8i8") - }, - "qshrn_n_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vqshrnu.v8i8") - }, - "qshrn_n_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vqshrns.v4i16") - }, - "qshrn_n_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vqshrnu.v4i16") - }, - "qshrn_n_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vqshrns.v2i32") - }, - "qshrn_n_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vqshrnu.v2i32") - }, - "rshrn_n_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vrshrn.v8i8") - }, - "rshrn_n_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vrshrn.v8i8") - }, - "rshrn_n_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vrshrn.v4i16") - }, - "rshrn_n_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vrshrn.v4i16") - }, - "rshrn_n_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vrshrn.v2i32") - }, - "rshrn_n_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vrshrn.v2i32") - }, - "qrshrn_n_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::U32]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vqrshrns.v8i8") - }, - "qrshrn_n_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U32]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vqrshrnu.v8i8") - }, - "qrshrn_n_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::U32]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vqrshrns.v4i16") - }, - "qrshrn_n_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vqrshrnu.v4i16") - }, - "qrshrn_n_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::U32]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vqrshrns.v2i32") - }, - "qrshrn_n_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vqrshrnu.v2i32") - }, - "sri_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vvsri.v8i8") - }, - "sri_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vvsri.v8i8") - }, - "sri_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vvsri.v4i16") - }, - "sri_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vvsri.v4i16") - }, - "sri_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vvsri.v2i32") - }, - "sri_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vvsri.v2i32") - }, - "sri_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.arm.neon.vvsri.v1i64") - }, - "sri_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.arm.neon.vvsri.v1i64") - }, - "sriq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vvsri.v16i8") - }, - "sriq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vvsri.v16i8") - }, - "sriq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vvsri.v8i16") - }, - "sriq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vvsri.v8i16") - }, - "sriq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vvsri.v4i32") - }, - "sriq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vvsri.v4i32") - }, - "sriq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.arm.neon.vvsri.v2i64") - }, - "sriq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.arm.neon.vvsri.v2i64") - }, - "sli_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vvsli.v8i8") - }, - "sli_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vvsli.v8i8") - }, - "sli_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vvsli.v4i16") - }, - "sli_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vvsli.v4i16") - }, - "sli_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vvsli.v2i32") - }, - "sli_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vvsli.v2i32") - }, - "sli_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.arm.neon.vvsli.v1i64") - }, - "sli_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.arm.neon.vvsli.v1i64") - }, - "sliq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vvsli.v16i8") - }, - "sliq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vvsli.v16i8") - }, - "sliq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vvsli.v8i16") - }, - "sliq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vvsli.v8i16") - }, - "sliq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vvsli.v4i32") - }, - "sliq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vvsli.v4i32") - }, - "sliq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.arm.neon.vvsli.v2i64") - }, - "sliq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.arm.neon.vvsli.v2i64") - }, - "vqmovn_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vqxtns.v8i8") - }, - "vqmovn_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vqxtnu.v8i8") - }, - "vqmovn_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vqxtns.v4i16") - }, - "vqmovn_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vqxtnu.v4i16") - }, - "vqmovn_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I64x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vqxtns.v2i32") - }, - "vqmovn_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U64x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vqxtnu.v2i32") - }, - "abs_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vabs.v8i8") - }, - "abs_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vabs.v4i16") - }, - "abs_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vabs.v2i32") - }, - "absq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vabs.v16i8") - }, - "absq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vabs.v8i16") - }, - "absq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vabs.v4i32") - }, - "abs_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.fabs.v2f32") - }, - "absq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.fabs.v4f32") - }, - "qabs_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vsqabs.v8i8") - }, - "qabs_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vsqabs.v4i16") - }, - "qabs_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vsqabs.v2i32") - }, - "qabsq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vsqabs.v16i8") - }, - "qabsq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vsqabs.v8i16") - }, - "qabsq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vsqabs.v4i32") - }, - "qneg_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vsqneg.v8i8") - }, - "qneg_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vsqneg.v4i16") - }, - "qneg_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vsqneg.v2i32") - }, - "qnegq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vsqneg.v16i8") - }, - "qnegq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vsqneg.v8i16") - }, - "qnegq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vsqneg.v4i32") - }, - "clz_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.ctlz.v8i8") - }, - "clz_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.ctlz.v8i8") - }, - "clz_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.ctlz.v4i16") - }, - "clz_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.ctlz.v4i16") - }, - "clz_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.ctlz.v2i32") - }, - "clz_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.ctlz.v2i32") - }, - "clzq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.ctlz.v16i8") - }, - "clzq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.ctlz.v16i8") - }, - "clzq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ctlz.v8i16") - }, - "clzq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.ctlz.v8i16") - }, - "clzq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ctlz.v4i32") - }, - "clzq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.ctlz.v4i32") - }, - "cls_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vcls.v8i8") - }, - "cls_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vcls.v8i8") - }, - "cls_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vcls.v4i16") - }, - "cls_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vcls.v4i16") - }, - "cls_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vcls.v2i32") - }, - "cls_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vcls.v2i32") - }, - "clsq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vcls.v16i8") - }, - "clsq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vcls.v16i8") - }, - "clsq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vcls.v8i16") - }, - "clsq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vcls.v8i16") - }, - "clsq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vcls.v4i32") - }, - "clsq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vcls.v4i32") - }, - "cnt_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.ctpop.v8i8") - }, - "cnt_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.ctpop.v8i8") - }, - "cntq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.ctpop.v16i8") - }, - "cntq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.ctpop.v16i8") - }, - "recpe_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vrecpe.v2i32") - }, - "recpe_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.arm.neon.vrecpe.v2f32") - }, - "recpeq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vrecpe.v4i32") - }, - "recpeq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.arm.neon.vrecpe.v4f32") - }, - "recps_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.arm.neon.vfrecps.v2f32") - }, - "recpsq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.arm.neon.vfrecps.v4f32") - }, - "sqrt_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.sqrt.v2f32") - }, - "sqrtq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.sqrt.v4f32") - }, - "rsqrte_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vrsqrte.v2i32") - }, - "rsqrte_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.arm.neon.vrsqrte.v2f32") - }, - "rsqrteq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vrsqrte.v4i32") - }, - "rsqrteq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.arm.neon.vrsqrte.v4f32") - }, - "rsqrts_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.arm.neon.vrsqrts.v2f32") - }, - "rsqrtsq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.arm.neon.vrsqrts.v4f32") - }, - "bsl_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vbsl.v8i8") - }, - "bsl_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vbsl.v8i8") - }, - "bsl_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vbsl.v4i16") - }, - "bsl_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vbsl.v4i16") - }, - "bsl_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vbsl.v2i32") - }, - "bsl_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vbsl.v2i32") - }, - "bsl_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::I64x1]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.arm.neon.vbsl.v1i64") - }, - "bsl_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U64x1]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.arm.neon.vbsl.v1i64") - }, - "bslq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vbsl.v16i8") - }, - "bslq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vbsl.v16i8") - }, - "bslq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vbsl.v8i16") - }, - "bslq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vbsl.v8i16") - }, - "bslq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vbsl.v4i32") - }, - "bslq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vbsl.v4i32") - }, - "bslq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.arm.neon.vbsl.v2i64") - }, - "bslq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.arm.neon.vbsl.v2i64") - }, - "padd_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vpadd.v8i8") - }, - "padd_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vpadd.v8i8") - }, - "padd_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vpadd.v4i16") - }, - "padd_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vpadd.v4i16") - }, - "padd_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vpadd.v2i32") - }, - "padd_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vpadd.v2i32") - }, - "padd_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.arm.neon.vpadd.v2f32") - }, - "paddl_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x8]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vpaddls.v4i16.v8i8") - }, - "paddl_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x8]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vpaddlu.v4i16.v8i8") - }, - "paddl_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x4]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vpaddls.v2i32.v4i16") - }, - "paddl_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x4]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vpaddlu.v2i32.v4i16") - }, - "paddl_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x2]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.arm.neon.vpaddls.v1i64.v2i32") - }, - "paddl_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.arm.neon.vpaddlu.v1i64.v2i32") - }, - "paddlq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vpaddls.v8i16.v16i8") - }, - "paddlq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x16]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vpaddlu.v8i16.v16i8") - }, - "paddlq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vpaddls.v4i32.v8i16") - }, - "paddlq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vpaddlu.v4i32.v8i16") - }, - "paddlq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.arm.neon.vpaddls.v2i64.v4i32") - }, - "paddlq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.arm.neon.vpaddlu.v2i64.v4i32") - }, - "padal_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I8x8]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vpadals.v4i16.v4i16") - }, - "padal_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U8x8]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vpadalu.v4i16.v4i16") - }, - "padal_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I16x4]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vpadals.v2i32.v2i32") - }, - "padal_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U16x4]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vpadalu.v2i32.v2i32") - }, - "padal_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x1, &::I32x2]; &INPUTS }, - output: &::I64x1, - definition: Named("llvm.arm.neon.vpadals.v1i64.v1i64") - }, - "padal_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x1, &::U32x2]; &INPUTS }, - output: &::U64x1, - definition: Named("llvm.arm.neon.vpadalu.v1i64.v1i64") - }, - "padalq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I8x16]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vpadals.v8i16.v8i16") - }, - "padalq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U8x16]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vpadalu.v8i16.v8i16") - }, - "padalq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I16x8]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vpadals.v4i32.v4i32") - }, - "padalq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U16x8]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vpadalu.v4i32.v4i32") - }, - "padalq_s64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I64x2, &::I32x4]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.arm.neon.vpadals.v2i64.v2i64") - }, - "padalq_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U32x4]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.arm.neon.vpadalu.v2i64.v2i64") - }, - "pmax_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vpmaxs.v8i8") - }, - "pmax_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vpmaxu.v8i8") - }, - "pmax_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vpmaxs.v4i16") - }, - "pmax_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vpmaxu.v4i16") - }, - "pmax_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vpmaxs.v2i32") - }, - "pmax_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vpmaxu.v2i32") - }, - "pmax_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.arm.neon.vpmaxf.v2f32") - }, - "pmin_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vpmins.v8i8") - }, - "pmin_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vpminu.v8i8") - }, - "pmin_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x4, &::I16x4]; &INPUTS }, - output: &::I16x4, - definition: Named("llvm.arm.neon.vpmins.v4i16") - }, - "pmin_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x4, &::U16x4]; &INPUTS }, - output: &::U16x4, - definition: Named("llvm.arm.neon.vpminu.v4i16") - }, - "pmin_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x2, &::I32x2]; &INPUTS }, - output: &::I32x2, - definition: Named("llvm.arm.neon.vpmins.v2i32") - }, - "pmin_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.arm.neon.vpminu.v2i32") - }, - "pmin_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x2, &::F32x2]; &INPUTS }, - output: &::F32x2, - definition: Named("llvm.arm.neon.vpminf.v2f32") - }, - "pminq_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.arm.neon.vpmins.v16i8") - }, - "pminq_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.arm.neon.vpminu.v16i8") - }, - "pminq_s16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.arm.neon.vpmins.v8i16") - }, - "pminq_u16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.arm.neon.vpminu.v8i16") - }, - "pminq_s32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.arm.neon.vpmins.v4i32") - }, - "pminq_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.arm.neon.vpminu.v4i32") - }, - "pminq_f32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.arm.neon.vpminf.v4f32") - }, - "tbl1_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x8, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vtbl1") - }, - "tbl1_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vtbl1") - }, - "tbx1_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vtbx1") - }, - "tbx1_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vtbx1") - }, - "tbl2_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vtbl2") - }, - "tbl2_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vtbl2") - }, - "tbx2_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vtbx2") - }, - "tbx2_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 2] = [&::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vtbx2") - }, - "tbl3_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vtbl3") - }, - "tbl3_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vtbl3") - }, - "tbx3_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vtbx3") - }, - "tbx3_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 3] = [&::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vtbx3") - }, - "tbl4_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x8, &::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vtbl4") - }, - "tbl4_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x8, &::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vtbl4") - }, - "tbx4_s8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::I8x8, &::I8x8, &::I8x8, &::I8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::I8x8, - definition: Named("llvm.arm.neon.vtbx4") - }, - "tbx4_u8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x8, { static AGG: Type = Type::Aggregate(true, { static PARTS: [&'static Type; 4] = [&::U8x8, &::U8x8, &::U8x8, &::U8x8]; &PARTS }); &AGG }, &::U8x8]; &INPUTS }, - output: &::U8x8, - definition: Named("llvm.arm.neon.vtbx4") - }, - _ => return None, - }) -} diff --git a/src/librustc_platform_intrinsics/hexagon.rs b/src/librustc_platform_intrinsics/hexagon.rs deleted file mode 100644 index 65460cfb8bd51..0000000000000 --- a/src/librustc_platform_intrinsics/hexagon.rs +++ /dev/null @@ -1,2934 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// DO NOT EDIT: autogenerated by etc/platform-intrinsics/generator.py -// ignore-tidy-linelength - -#![allow(unused_imports)] - -use {Intrinsic, Type}; -use IntrinsicDef::Named; - -pub fn find(name: &str) -> Option { - if !name.starts_with("Q6_") { return None } - Some(match &name["Q6_".len()..] { - "R_vextract64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x16, &::U32]; &INPUTS }, - output: &::U32, - definition: Named("llvm.hexagon.V6.extractw") - }, - "R_vextract128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x32, &::U32]; &INPUTS }, - output: &::U32, - definition: Named("llvm.hexagon.V6.extractw.128B") - }, - "V_lo64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x32]; &INPUTS }, - output: &::U32x16, - definition: Named("llvm.hexagon.V6.lo") - }, - "V_lo128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x64]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.lo.128B") - }, - "V_hi64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x32]; &INPUTS }, - output: &::U32x16, - definition: Named("llvm.hexagon.V6.hi") - }, - "V_hi128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x64]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.hi.128B") - }, - "V_vsplat_R64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32]; &INPUTS }, - output: &::U32x16, - definition: Named("llvm.hexagon.V6.lvsplatuw") - }, - "V_vsplat_R128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.lvsplatuw.128B") - }, - "Q_and_QQ64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.pred.and") - }, - "Q_and_QQ128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.pred.and.128B") - }, - "Q_not_Q64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.pred.not") - }, - "Q_not_Q128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.pred.not.128B") - }, - "Q_or_QQ64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.pred.or") - }, - "Q_or_QQ128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.pred.or.128B") - }, - "Q_xor_QQ64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32x2]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.pred.xor") - }, - "Q_xor_QQ128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.pred.xor.128B") - }, - "Vub_vabsdiff_VubVub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U8x64]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vabsdiffub") - }, - "Vuh_vabsdiff_VuhVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x32, &::U16x32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vabsdiffuh") - }, - "Vub_vabsdiff_VubVub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vabsdiffub.128B") - }, - "Vuh_vabsdiff_VuhVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vabsdiffuh.128B") - }, - "Vuh_vabsdiff_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vabsdiffh") - }, - "Vuw_vabsdiff_VwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::U32x16, - definition: Named("llvm.hexagon.V6.vabsdiffw") - }, - "Vuh_vabsdiff_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vabsdiffh.128B") - }, - "Vuw_vabsdiff_VwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vabsdiffw.128B") - }, - "Vh_vabs_Vh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vabsh") - }, - "Vw_vabs_Vw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vabsw") - }, - "Vh_vabs_Vh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vabsh.128B") - }, - "Vw_vabs_Vw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vabsw.128B") - }, - "Vh_vabs_Vh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vabsh.sat") - }, - "Vw_vabs_Vw_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vabsw.sat") - }, - "Vh_vabs_Vh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vabsh.sat.128B") - }, - "Vw_vabs_Vw_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vabsw.sat.128B") - }, - "Vb_vadd_VbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x64, &::I8x64]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vaddb") - }, - "Vh_vadd_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vaddh") - }, - "Vw_vadd_VwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vaddw") - }, - "Vb_vadd_VbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x128, &::I8x128]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vaddb.128B") - }, - "Vh_vadd_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vaddh.128B") - }, - "Vw_vadd_VwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vaddw.128B") - }, - "Vh_vadd_VhVh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vaddhsat") - }, - "Vw_vadd_VwVw_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vaddwsat") - }, - "Vh_vadd_VhVh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vaddhsat.128B") - }, - "Vw_vadd_VwVw_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vaddwsat.128B") - }, - "Vub_vadd_VubVub_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U8x64]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vaddubsat") - }, - "Vuh_vadd_VuhVuh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x32, &::U16x32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vadduhsat") - }, - "Vub_vadd_VubVub_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vaddubsat.128B") - }, - "Vuh_vadd_VuhVuh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vadduhsat.128B") - }, - "Wb_vadd_WbWb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x128, &::I8x128]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vaddb.dv") - }, - "Wh_vadd_WhWh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vaddh.dv") - }, - "Ww_vadd_WwWw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vaddw.dv") - }, - "Wb_vadd_WbWb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x256, &::I8x256]; &INPUTS }, - output: &::I8x256, - definition: Named("llvm.hexagon.V6.vaddb.dv.128B") - }, - "Wh_vadd_WhWh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x128, &::I16x128]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vaddh.dv.128B") - }, - "Ww_vadd_WwWw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x64, &::I32x64]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vaddw.dv.128B") - }, - "Wh_vadd_WhWh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vaddhsat.dv") - }, - "Ww_vadd_WwWw_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vaddwsat.dv") - }, - "Wh_vadd_WhWh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x128, &::I16x128]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vaddhsat.dv.128B") - }, - "Ww_vadd_WwWw_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x64, &::I32x64]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vaddwsat.dv.128B") - }, - "Wub_vadd_WubWub_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vaddubsat.dv") - }, - "Wuh_vadd_WuhWuh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vadduhsat.dv") - }, - "Wub_vadd_WubWub_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x256, &::U8x256]; &INPUTS }, - output: &::U8x256, - definition: Named("llvm.hexagon.V6.vaddubsat.dv.128B") - }, - "Wuh_vadd_WuhWuh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x128, &::U16x128]; &INPUTS }, - output: &::U16x128, - definition: Named("llvm.hexagon.V6.vadduhsat.dv.128B") - }, - "V_valign_VVR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x64, &::U8x64, &::U32]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.valignb") - }, - "V_valign_VVR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x128, &::U8x128, &::U32]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.valignb.128B") - }, - "V_valign_VVI64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x64, &::U8x64, &::U32]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.valignbi") - }, - "V_valign_VVI128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x128, &::U8x128, &::U32]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.valignbi.128B") - }, - "V_vlalign_VVR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x64, &::U8x64, &::U32]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vlalignb") - }, - "V_vlalign_VVR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x128, &::U8x128, &::U32]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vlalignb.128B") - }, - "V_vlalign_VVI64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x64, &::U8x64, &::U32]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vlalignbi") - }, - "V_vlalign_VVI128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x128, &::U8x128, &::U32]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vlalignbi.128B") - }, - "V_vand_VV64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x32, &::U16x32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vand") - }, - "V_vand_VV128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vand.128B") - }, - "V_vand_QR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x2, &::U32]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vandqrt") - }, - "V_vand_QR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vandqrt.128B") - }, - "V_vandor_VQR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x64, &::U32x2, &::U32]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vandqrt.acc") - }, - "V_vandor_VQR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x128, &::U32x4, &::U32]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vandqrt.acc.128B") - }, - "Q_vand_VR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vandvrt") - }, - "Q_vand_VR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U32]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vandvrt.128B") - }, - "Q_vandor_QVR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::U8x64, &::U32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vandvrt") - }, - "Q_vandor_QVR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::U8x128, &::U32]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vandvrt.128B") - }, - "Vh_vasl_VhR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::U32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vaslh") - }, - "Vw_vasl_VwR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vaslw") - }, - "Vh_vasl_VhR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vaslh.128B") - }, - "Vw_vasl_VwR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vaslw.128B") - }, - "Vh_vasl_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vaslhv") - }, - "Vw_vasl_VwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vaslwv") - }, - "Vh_vasl_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vaslhv.128B") - }, - "Vw_vasl_VwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vaslwv.128B") - }, - "Vw_vaslacc_VwVwR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I32x16, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vaslw.acc") - }, - "Vw_vaslacc_VwVwR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I32x32, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vaslw.acc.128B") - }, - "Vh_vasr_VhR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::U32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vasrh") - }, - "Vw_vasr_VwR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vasrw") - }, - "Vh_vasr_VhR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vasrh.128B") - }, - "Vw_vasr_VwR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vasrw.128B") - }, - "Vh_vasr_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vasrhv") - }, - "Vw_vasr_VwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vasrwv") - }, - "Vh_vasr_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vasrhv.128B") - }, - "Vw_vasr_VwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vasrwv.128B") - }, - "Vw_vasracc_VwVwR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I32x16, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vasrw.acc") - }, - "Vw_vasracc_VwVwR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I32x32, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vasrw.acc.128B") - }, - "Vh_vasr_VwVwR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I32x16, &::U32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vasrhw") - }, - "Vh_vasr_VwVwR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I32x32, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vasrhw.128B") - }, - "Vb_vasr_VhVhR_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x32, &::I16x32, &::U32]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vasrhbsat") - }, - "Vub_vasr_VhVhR_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x32, &::I16x32, &::U32]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vasrhbsat") - }, - "Vh_vasr_VwVwR_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I32x16, &::U32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vasrwhsat") - }, - "Vuh_vasr_VwVwR_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I32x16, &::U32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vasrwhsat") - }, - "Vb_vasr_VhVhR_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x64, &::I16x64, &::U32]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vasrhbsat.128B") - }, - "Vub_vasr_VhVhR_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x64, &::I16x64, &::U32]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vasrhbsat.128B") - }, - "Vh_vasr_VwVwR_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I32x32, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vasrwhsat.128B") - }, - "Vuh_vasr_VwVwR_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I32x32, &::U32]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vasrwhsat.128B") - }, - "Vb_vasr_VhVhR_rnd_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x32, &::I16x32, &::U32]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vasrhbrndsat") - }, - "Vub_vasr_VhVhR_rnd_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x32, &::I16x32, &::U32]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vasrhbrndsat") - }, - "Vh_vasr_VwVwR_rnd_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I32x16, &::U32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vasrwhrndsat") - }, - "Vuh_vasr_VwVwR_rnd_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I32x16, &::U32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vasrwhrndsat") - }, - "Vb_vasr_VhVhR_rnd_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x64, &::I16x64, &::U32]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vasrhbrndsat.128B") - }, - "Vub_vasr_VhVhR_rnd_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x64, &::I16x64, &::U32]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vasrhbrndsat.128B") - }, - "Vh_vasr_VwVwR_rnd_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I32x32, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vasrwhrndsat.128B") - }, - "Vuh_vasr_VwVwR_rnd_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I32x32, &::U32]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vasrwhrndsat.128B") - }, - "V_equals_V64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x16]; &INPUTS }, - output: &::U32x16, - definition: Named("llvm.hexagon.V6.vassign") - }, - "V_equals_V128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vassign.128B") - }, - "W_equals_W64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vassignp") - }, - "W_equals_W128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x64]; &INPUTS }, - output: &::U32x64, - definition: Named("llvm.hexagon.V6.vassignp.128B") - }, - "Vh_vavg_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vavgh") - }, - "Vw_vavg_VwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vavgw") - }, - "Vh_vavg_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vavgh.128B") - }, - "Vw_vavg_VwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vavgw.128B") - }, - "Vub_vavg_VubVub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U8x64]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vavgub") - }, - "Vuh_vavg_VuhVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x32, &::U16x32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vavguh") - }, - "Vub_vavg_VubVub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vavgub.128B") - }, - "Vuh_vavg_VuhVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vavguh.128B") - }, - "Vh_vavg_VhVh_rnd64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vavgrndh") - }, - "Vw_vavg_VwVw_rnd64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vavgrndw") - }, - "Vh_vavg_VhVh_rnd128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vavgrndh.128B") - }, - "Vw_vavg_VwVw_rnd128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vavgrndw.128B") - }, - "Vub_vavg_VubVub_rnd64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U8x64]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vavgrndub") - }, - "Vuh_vavg_VuhVuh_rnd64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x32, &::U16x32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vavgrnduh") - }, - "Vub_vavg_VubVub_rnd128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vavgrndub.128B") - }, - "Vuh_vavg_VuhVuh_rnd128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vavgrnduh.128B") - }, - "Vuh_vcl0_Vuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vcl0h") - }, - "Vuw_vcl0_Vuw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x16]; &INPUTS }, - output: &::U32x16, - definition: Named("llvm.hexagon.V6.vcl0w") - }, - "Vuh_vcl0_Vuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vcl0h.128B") - }, - "Vuw_vcl0_Vuw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U32x32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vcl0w.128B") - }, - "W_vcombine_VV64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U8x64]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vcombine") - }, - "W_vcombine_VV128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U8x256, - definition: Named("llvm.hexagon.V6.vcombine.128B") - }, - "V_vzero64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::U32x16, - definition: Named("llvm.hexagon.V6.vd0") - }, - "V_vzero128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vd0.128B") - }, - "Vb_vdeal_Vb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x64]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vdealb") - }, - "Vh_vdeal_Vh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vdealh") - }, - "Vb_vdeal_Vb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x128]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vdealb.128B") - }, - "Vh_vdeal_Vh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vdealh.128B") - }, - "Vb_vdeale_VbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x64, &::I8x64]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vdealb4w") - }, - "Vb_vdeale_VbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x128, &::I8x128]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vdealb4w.128B") - }, - "W_vdeal_VVR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x64, &::U8x64, &::U32]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vdealvdd") - }, - "W_vdeal_VVR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x128, &::U8x128, &::U32]; &INPUTS }, - output: &::U8x256, - definition: Named("llvm.hexagon.V6.vdealvdd.128B") - }, - "V_vdelta_VV64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U8x64]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vdelta") - }, - "V_vdelta_VV128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vdelta.128B") - }, - "Vh_vdmpy_VubRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vdmpybus") - }, - "Vh_vdmpy_VubRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vdmpybus.128B") - }, - "Vh_vdmpyacc_VhVubRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x32, &::U8x64, &::U32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vdmpybus.acc") - }, - "Vh_vdmpyacc_VhVubRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x64, &::U8x128, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vdmpybus.acc.128B") - }, - "Wh_vdmpy_WubRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vdmpybus.dv") - }, - "Wh_vdmpy_WubRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x256, &::U32]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vdmpybus.dv.128B") - }, - "Wh_vdmpyacc_WhWubRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x64, &::U8x128, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vdmpybus.dv.acc") - }, - "Wh_vdmpyacc_WhWubRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x128, &::U8x256, &::U32]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vdmpybus.dv.acc.128B") - }, - "Vw_vdmpy_VhRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vdmpyhb") - }, - "Vw_vdmpy_VhRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vdmpyhb.128B") - }, - "Vw_vdmpyacc_VwVhRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I16x32, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vdmpyhb.acc") - }, - "Vw_vdmpyacc_VwVhRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I16x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vdmpyhb.acc.128B") - }, - "Ww_vdmpy_WhRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vdmpyhb.dv") - }, - "Ww_vdmpy_WhRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x128, &::U32]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vdmpyhb.dv.128B") - }, - "Ww_vdmpyacc_WwWhRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I16x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vdmpyhb.dv.acc") - }, - "Ww_vdmpyacc_WwWhRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x64, &::I16x128, &::U32]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vdmpyhb.dv.acc.128B") - }, - "Vw_vdmpy_WwRh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vdmpyhisat") - }, - "Vw_vdmpy_WwRh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vdmpyhisat.128B") - }, - "Vw_vdmpy_VhRh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vdmpyhsat") - }, - "Vw_vdmpy_VhRh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vdmpyhsat.128B") - }, - "Vw_vdmpy_WhRuh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vdmpyhsuisat") - }, - "Vw_vdmpy_WhRuh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x128, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vdmpyhsuisat.128B") - }, - "Vw_vdmpy_VhRuh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vdmpyhsusat") - }, - "Vw_vdmpy_VhRuh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vdmpyhsusat.128B") - }, - "Vw_vdmpy_VhVh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vdmpyhvsat") - }, - "Vw_vdmpy_VhVh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vdmpyhvsat.128B") - }, - "Vw_vdmpyacc_VwWwRh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I32x32, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vdmpyhisat_acc") - }, - "Vw_vdmpyacc_VwWwRh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I32x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vdmpyhisat_acc.128B") - }, - "Wuw_vdsad_WuhRuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vdsaduh") - }, - "Wuw_vdsad_WuhRuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x128, &::U32]; &INPUTS }, - output: &::U32x64, - definition: Named("llvm.hexagon.V6.vdsaduh.128B") - }, - "Wuw_vdsadacc_WuwWuhRuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x32, &::U16x64, &::U32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vdsaduh.acc") - }, - "Wuw_vdsadacc_WuwWuhRuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x64, &::U16x128, &::U32]; &INPUTS }, - output: &::U32x64, - definition: Named("llvm.hexagon.V6.vdsaduh.acc.128B") - }, - "Vw_vdmpyacc_VwVhRh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I16x32, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vdmpyhsat_acc") - }, - "Vw_vdmpyacc_VwVhRh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I16x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vdmpyhsat_acc.128B") - }, - "Vw_vdmpyacc_VwWhRuh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I16x64, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vdmpyhsuisat_acc") - }, - "Vw_vdmpyacc_VwWhRuh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I16x128, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vdmpyhsuisat_acc.128B") - }, - "Vw_vdmpyacc_VwVhRuh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I16x32, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vdmpyhsusat_acc") - }, - "Vw_vdmpyacc_VwVhRuh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I16x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vdmpyhsusat_acc.128B") - }, - "Vw_vdmpyacc_VwVhVh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I16x32, &::I16x32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vdmpyhvsat_acc") - }, - "Vw_vdmpyacc_VwVhVh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I16x64, &::I16x64]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vdmpyhvsat_acc.128B") - }, - "Q_vcmp_eq_VbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x64, &::I8x64]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.veqb") - }, - "Q_vcmp_eq_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.veqh") - }, - "Q_vcmp_eq_VwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.veqw") - }, - "Q_vcmp_eq_VbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x128, &::I8x128]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.veqb.128B") - }, - "Q_vcmp_eq_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.veqh.128B") - }, - "Q_vcmp_eq_VwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.veqw.128B") - }, - "Q_vcmp_eqand_QVbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I8x64, &::I8x64]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.veqb.and") - }, - "Q_vcmp_eqand_QVhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I16x32, &::I16x32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.veqh.and") - }, - "Q_vcmp_eqand_QVwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I32x16, &::I32x16]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.veqw.and") - }, - "Q_vcmp_eqand_QVbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I8x128, &::I8x128]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.veqb.and.128B") - }, - "Q_vcmp_eqand_QVhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I16x64, &::I16x64]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.veqh.and.128B") - }, - "Q_vcmp_eqand_QVwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I32x32, &::I32x32]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.veqw.and.128B") - }, - "Q_vcmp_eqor_QVbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I8x64, &::I8x64]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.veqb.or") - }, - "Q_vcmp_eqor_QVhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I16x32, &::I16x32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.veqh.or") - }, - "Q_vcmp_eqor_QVwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I32x16, &::I32x16]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.veqw.or") - }, - "Q_vcmp_eqor_QVbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I8x128, &::I8x128]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.veqb.or.128B") - }, - "Q_vcmp_eqor_QVhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I16x64, &::I16x64]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.veqh.or.128B") - }, - "Q_vcmp_eqor_QVwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I32x32, &::I32x32]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.veqw.or.128B") - }, - "Q_vcmp_eqxacc_QVbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I8x64, &::I8x64]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.veqb.xor") - }, - "Q_vcmp_eqxacc_QVhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I16x32, &::I16x32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.veqh.xor") - }, - "Q_vcmp_eqxacc_QVwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I32x16, &::I32x16]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.veqw.xor") - }, - "Q_vcmp_eqxacc_QVbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I8x128, &::I8x128]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.veqb.xor.128B") - }, - "Q_vcmp_eqxacc_QVhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I16x64, &::I16x64]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.veqh.xor.128B") - }, - "Q_vcmp_eqxacc_QVwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I32x32, &::I32x32]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.veqw.xor.128B") - }, - "Q_vcmp_gt_VbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x64, &::I8x64]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtb") - }, - "Q_vcmp_gt_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgth") - }, - "Q_vcmp_gt_VwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtw") - }, - "Q_vcmp_gt_VbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x128, &::I8x128]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtb.128B") - }, - "Q_vcmp_gt_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgth.128B") - }, - "Q_vcmp_gt_VwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtw.128B") - }, - "Q_vcmp_gt_VubVub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U8x64]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtub") - }, - "Q_vcmp_gt_VuhVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x32, &::U16x32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtuh") - }, - "Q_vcmp_gt_VubVub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtub.128B") - }, - "Q_vcmp_gt_VuhVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U16x64]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtuh.128B") - }, - "Q_vcmp_gtand_QVbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I8x64, &::I8x64]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtb.and") - }, - "Q_vcmp_gtand_QVhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I16x32, &::I16x32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgth.and") - }, - "Q_vcmp_gtand_QVwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I32x16, &::I32x16]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtw.and") - }, - "Q_vcmp_gtand_QVbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I8x128, &::I8x128]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtb.and.128B") - }, - "Q_vcmp_gtand_QVhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I16x64, &::I16x64]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgth.and.128B") - }, - "Q_vcmp_gtand_QVwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I32x32, &::I32x32]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtw.and.128B") - }, - "Q_vcmp_gtand_QVubVub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::U8x64, &::U8x64]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtub.and") - }, - "Q_vcmp_gtand_QVuhVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::U16x32, &::U16x32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtuh.and") - }, - "Q_vcmp_gtand_QVubVub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::U8x128, &::U8x128]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtub.and.128B") - }, - "Q_vcmp_gtand_QVuhVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::U16x64, &::U16x64]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtuh.and.128B") - }, - "Q_vcmp_gtor_QVbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I8x64, &::I8x64]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtb.or") - }, - "Q_vcmp_gtor_QVhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I16x32, &::I16x32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgth.or") - }, - "Q_vcmp_gtor_QVwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I32x16, &::I32x16]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtw.or") - }, - "Q_vcmp_gtor_QVbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I8x128, &::I8x128]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtb.or.128B") - }, - "Q_vcmp_gtor_QVhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I16x64, &::I16x64]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgth.or.128B") - }, - "Q_vcmp_gtor_QVwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I32x32, &::I32x32]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtw.or.128B") - }, - "Q_vcmp_gtor_QVubVub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::U8x64, &::U8x64]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtub.or") - }, - "Q_vcmp_gtor_QVuhVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::U16x32, &::U16x32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtuh.or") - }, - "Q_vcmp_gtor_QVubVub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::U8x128, &::U8x128]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtub.or.128B") - }, - "Q_vcmp_gtor_QVuhVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::U16x64, &::U16x64]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtuh.or.128B") - }, - "Q_vcmp_gtxacc_QVbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I8x64, &::I8x64]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtb.xor") - }, - "Q_vcmp_gtxacc_QVhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I16x32, &::I16x32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgth.xor") - }, - "Q_vcmp_gtxacc_QVwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I32x16, &::I32x16]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtw.xor") - }, - "Q_vcmp_gtxacc_QVbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I8x128, &::I8x128]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtb.xor.128B") - }, - "Q_vcmp_gtxacc_QVhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I16x64, &::I16x64]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgth.xor.128B") - }, - "Q_vcmp_gtxacc_QVwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I32x32, &::I32x32]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtw.xor.128B") - }, - "Q_vcmp_gtxacc_QVubVub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::U8x64, &::U8x64]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtub.xor") - }, - "Q_vcmp_gtxacc_QVuhVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::U16x32, &::U16x32]; &INPUTS }, - output: &::U32x2, - definition: Named("llvm.hexagon.V6.vgtuh.xor") - }, - "Q_vcmp_gtxacc_QVubVub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::U8x128, &::U8x128]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtub.xor.128B") - }, - "Q_vcmp_gtxacc_QVuhVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::U16x64, &::U16x64]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.hexagon.V6.vgtuh.xor.128B") - }, - "Vw_vinsert_VwR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vinsertwr") - }, - "Vw_vinsert_VwR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vinsertwr.128B") - }, - "Vuh_vlsr_VuhR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x32, &::U32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vlsrh") - }, - "Vuw_vlsr_VuwR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x16, &::U32]; &INPUTS }, - output: &::U32x16, - definition: Named("llvm.hexagon.V6.vlsrw") - }, - "Vuh_vlsr_VuhR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U32]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vlsrh.128B") - }, - "Vuw_vlsr_VuwR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x32, &::U32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vlsrw.128B") - }, - "Vh_vlsr_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vlsrhv") - }, - "Vw_vlsr_VwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vlsrwv") - }, - "Vh_vlsr_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vlsrhv.128B") - }, - "Vw_vlsr_VwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vlsrwv.128B") - }, - "Vb_vlut32_VbVbR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x64, &::I8x64, &::U32]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vlutvvb") - }, - "Vb_vlut32_VbVbR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x128, &::I8x128, &::U32]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vlutvvb.128B") - }, - "Wh_vlut16_VbVhR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x64, &::I16x32, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vlutvwh") - }, - "Wh_vlut16_VbVhR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x128, &::I16x64, &::U32]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vlutvwh.128B") - }, - "Vb_vlut32or_VbVbVbR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 4] = [&::I8x64, &::I8x64, &::I8x64, &::U32]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vlutvvb.oracc") - }, - "Vb_vlut32or_VbVbVbR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 4] = [&::I8x128, &::I8x128, &::I8x128, &::U32]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vlutvvb.oracc.128B") - }, - "Wh_vlut16or_WhVbVhR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 4] = [&::I16x64, &::I8x64, &::I16x32, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vlutvwh.oracc") - }, - "Wh_vlut16or_WhVbVhR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 4] = [&::I16x128, &::I8x128, &::I16x64, &::U32]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vlutvwh.oracc.128B") - }, - "Vh_vmax_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vmaxh") - }, - "Vw_vmax_VwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmaxw") - }, - "Vh_vmax_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmaxh.128B") - }, - "Vw_vmax_VwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmaxw.128B") - }, - "Vub_vmax_VubVub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U8x64]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vmaxub") - }, - "Vuh_vmax_VuhVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x32, &::U16x32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vmaxuh") - }, - "Vub_vmax_VubVub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vmaxub.128B") - }, - "Vuh_vmax_VuhVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vmaxuh.128B") - }, - "Vh_vmin_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vminh") - }, - "Vw_vmin_VwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vminw") - }, - "Vh_vmin_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vminh.128B") - }, - "Vw_vmin_VwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vminw.128B") - }, - "Vub_vmin_VubVub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U8x64]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vminub") - }, - "Vuh_vmin_VuhVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x32, &::U16x32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vminuh") - }, - "Vub_vmin_VubVub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vminub.128B") - }, - "Vuh_vmin_VuhVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vminuh.128B") - }, - "Wh_vmpa_WubRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmpabus") - }, - "Wh_vmpa_WubRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x256, &::U32]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vmpabus.128B") - }, - "Wh_vmpaacc_WhWubRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x64, &::U8x128, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmpabus.acc") - }, - "Wh_vmpaacc_WhWubRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x128, &::U8x256, &::U32]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vmpabus.acc.128B") - }, - "Wh_vmpa_WubWb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::I8x128]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmpabusv") - }, - "Wh_vmpa_WubWub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmpabuuv") - }, - "Wh_vmpa_WubWb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x256, &::I8x256]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vmpabusv.128B") - }, - "Wh_vmpa_WubWub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x256, &::U8x256]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vmpabuuv.128B") - }, - "Ww_vmpa_WhRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpahb") - }, - "Ww_vmpa_WhRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x128, &::U32]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vmpahb.128B") - }, - "Ww_vmpaacc_WwWhRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I16x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpahb.acc") - }, - "Ww_vmpaacc_WwWhRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x64, &::I16x128, &::U32]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vmpahb.acc.128B") - }, - "Wh_vmpy_VbVub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x64, &::U8x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmpybus") - }, - "Ww_vmpy_VhVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::U16x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyhus") - }, - "Wh_vmpy_VbVub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x128, &::U8x128]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vmpybus.128B") - }, - "Ww_vmpy_VhVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::U16x64]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vmpyhus.128B") - }, - "Wh_vmpyacc_WhVbVub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x64, &::I8x64, &::U8x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmpybus.acc") - }, - "Ww_vmpyacc_WwVhVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I16x32, &::U16x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyhus.acc") - }, - "Wh_vmpyacc_WhVbVub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x128, &::I8x128, &::U8x128]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vmpybus.acc.128B") - }, - "Ww_vmpyacc_WwVhVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x64, &::I16x64, &::U16x64]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vmpyhus.acc.128B") - }, - "Wh_vmpy_VubVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::I8x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmpybusv") - }, - "Wh_vmpy_VubVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::I8x128]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vmpybusv.128B") - }, - "Wh_vmpyacc_WhVubVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x64, &::U8x64, &::I8x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmpybusv.acc") - }, - "Wh_vmpyacc_WhVubVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x128, &::U8x128, &::I8x128]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vmpybusv.acc.128B") - }, - "Wh_vmpy_VbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x64, &::I8x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmpybv") - }, - "Wuh_vmpy_VubVub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U8x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vmpyubv") - }, - "Ww_vmpy_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyhv") - }, - "Wuw_vmpy_VuhVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x32, &::U16x32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vmpyuhv") - }, - "Wh_vmpy_VbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x128, &::I8x128]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vmpybv.128B") - }, - "Wuh_vmpy_VubVub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U16x128, - definition: Named("llvm.hexagon.V6.vmpyubv.128B") - }, - "Ww_vmpy_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vmpyhv.128B") - }, - "Wuw_vmpy_VuhVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U16x64]; &INPUTS }, - output: &::U32x64, - definition: Named("llvm.hexagon.V6.vmpyuhv.128B") - }, - "Wh_vmpyacc_WhVbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x64, &::I8x64, &::I8x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmpybv.acc") - }, - "Wuh_vmpyacc_WuhVubVub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U16x64, &::U8x64, &::U8x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vmpyubv.acc") - }, - "Ww_vmpyacc_WwVhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I16x32, &::I16x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyhv.acc") - }, - "Wuw_vmpyacc_WuwVuhVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x32, &::U16x32, &::U16x32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vmpyuhv.acc") - }, - "Wh_vmpyacc_WhVbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x128, &::I8x128, &::I8x128]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vmpybv.acc.128B") - }, - "Wuh_vmpyacc_WuhVubVub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U16x128, &::U8x128, &::U8x128]; &INPUTS }, - output: &::U16x128, - definition: Named("llvm.hexagon.V6.vmpyubv.acc.128B") - }, - "Ww_vmpyacc_WwVhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x64, &::I16x64, &::I16x64]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vmpyhv.acc.128B") - }, - "Wuw_vmpyacc_WuwVuhVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x64, &::U16x64, &::U16x64]; &INPUTS }, - output: &::U32x64, - definition: Named("llvm.hexagon.V6.vmpyuhv.acc.128B") - }, - "Vw_vmpye_VwVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::U16x32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyewuh") - }, - "Vw_vmpye_VwVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::U16x64]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyewuh.128B") - }, - "Ww_vmpy_VhRh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyh") - }, - "Wuw_vmpy_VuhRuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x32, &::U32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vmpyuh") - }, - "Ww_vmpy_VhRh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::U32]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vmpyh.128B") - }, - "Wuw_vmpy_VuhRuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U32]; &INPUTS }, - output: &::U32x64, - definition: Named("llvm.hexagon.V6.vmpyuh.128B") - }, - "Ww_vmpyacc_WwVhRh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I16x32, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyhsat.acc") - }, - "Ww_vmpyacc_WwVhRh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x64, &::I16x64, &::U32]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vmpyhsat.acc.128B") - }, - "Vw_vmpy_VhRh_s1_rnd_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyhsrs") - }, - "Vw_vmpy_VhRh_s1_rnd_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyhsrs.128B") - }, - "Vw_vmpy_VhRh_s1_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyhss") - }, - "Vw_vmpy_VhRh_s1_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyhss.128B") - }, - "Vh_vmpy_VhVh_s1_rnd_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vmpyhvsrs") - }, - "Vh_vmpy_VhVh_s1_rnd_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmpyhvsrs.128B") - }, - "Vw_vmpyieo_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyieoh") - }, - "Vw_vmpyieo_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyieoh.128B") - }, - "Vw_vmpyieacc_VwVwVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I32x16, &::I16x32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyiewh.acc") - }, - "Vw_vmpyieacc_VwVwVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I32x16, &::U16x32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyiewuh.acc") - }, - "Vw_vmpyieacc_VwVwVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I32x32, &::I16x64]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyiewh.acc.128B") - }, - "Vw_vmpyieacc_VwVwVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I32x32, &::U16x64]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyiewuh.acc.128B") - }, - "Vw_vmpyie_VwVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::U16x32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyiewuh") - }, - "Vw_vmpyie_VwVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::U16x64]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyiewuh.128B") - }, - "Vh_vmpyi_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vmpyih") - }, - "Vh_vmpyi_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmpyih.128B") - }, - "Vh_vmpyiacc_VhVhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x32, &::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vmpyih.acc") - }, - "Vh_vmpyiacc_VhVhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x64, &::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmpyih.acc.128B") - }, - "Vh_vmpyi_VhRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::U32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vmpyihb") - }, - "Vw_vmpyi_VwRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyiwb") - }, - "Vh_vmpyi_VhRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmpyihb.128B") - }, - "Vw_vmpyi_VwRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyiwb.128B") - }, - "Vh_vmpyiacc_VhVhRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x32, &::I16x32, &::U32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vmpyihb.acc") - }, - "Vw_vmpyiacc_VwVwRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I32x16, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyiwb.acc") - }, - "Vh_vmpyiacc_VhVhRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x64, &::I16x64, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vmpyihb.acc.128B") - }, - "Vw_vmpyiacc_VwVwRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I32x32, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyiwb.acc.128B") - }, - "Vw_vmpyi_VwRh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyiwh") - }, - "Vw_vmpyi_VwRh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyiwh.128B") - }, - "Vw_vmpyiacc_VwVwRh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I32x16, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyiwh.acc") - }, - "Vw_vmpyiacc_VwVwRh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I32x32, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyiwh.acc.128B") - }, - "Vw_vmpyi_VwRub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyiwub") - }, - "Vw_vmpyi_VwRub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyiwub.128B") - }, - "Vw_vmpyiacc_VwVwRub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I32x16, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyiwub.acc") - }, - "Vw_vmpyiacc_VwVwRub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I32x32, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyiwub.acc.128B") - }, - "Vw_vmpyo_VwVh_s1_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I16x32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyowh") - }, - "Vw_vmpyo_VwVh_s1_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I16x64]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyowh.128B") - }, - "Vw_vmpyo_VwVh_s1_rnd_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I16x32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyowh.rnd") - }, - "Vw_vmpyo_VwVh_s1_rnd_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I16x64]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyowh.rnd.128B") - }, - "Vw_vmpyo_VwVh_s1_rnd_sat_shift64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I16x32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyowh.rnd.sacc") - }, - "Vw_vmpyo_VwVh_s1_rnd_sat_shift128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I16x64]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyowh.rnd.sacc.128B") - }, - "Vw_vmpyo_VwVh_s1_sat_shift64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I16x32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyowh.sacc") - }, - "Vw_vmpyo_VwVh_s1_sat_shift128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I16x64]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyowh.sacc.128B") - }, - "Vw_vmpyio_VwVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I16x32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vmpyiowh") - }, - "Vw_vmpyio_VwVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I16x64]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vmpyiowh.128B") - }, - "Wuh_vmpy_VubRub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U32]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vmpyub") - }, - "Wuh_vmpy_VubRub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U32]; &INPUTS }, - output: &::U16x128, - definition: Named("llvm.hexagon.V6.vmpyub.128B") - }, - "Wuh_vmpyacc_WuhVubRub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U16x64, &::U8x64, &::U32]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vmpyub.acc") - }, - "Wuw_vmpyacc_WuwVuhRuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x32, &::U16x32, &::U32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vmpyuh.acc") - }, - "Wuh_vmpyacc_WuhVubRub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U16x128, &::U8x128, &::U32]; &INPUTS }, - output: &::U16x128, - definition: Named("llvm.hexagon.V6.vmpyub.acc.128B") - }, - "Wuw_vmpyacc_WuwVuhRuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x64, &::U16x64, &::U32]; &INPUTS }, - output: &::U32x64, - definition: Named("llvm.hexagon.V6.vmpyuh.acc.128B") - }, - "Vuw_vmux_QVV64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::U32x16, &::U32x16]; &INPUTS }, - output: &::U32x16, - definition: Named("llvm.hexagon.V6.vmux") - }, - "Vuw_vmux_QVV128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::U32x32, &::U32x32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vmux.128B") - }, - "Vh_vnavg_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vnavgh") - }, - "Vuh_vnavg_VuhVuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x32, &::U16x32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vnavguh") - }, - "Vw_vnavg_VwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vnavgw") - }, - "Vuw_vnavg_VuwVuw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x16, &::U32x16]; &INPUTS }, - output: &::U32x16, - definition: Named("llvm.hexagon.V6.vnavguw") - }, - "Vh_vnavg_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vnavgh.128B") - }, - "Vuh_vnavg_VuhVuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vnavguh.128B") - }, - "Vw_vnavg_VwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vnavgw.128B") - }, - "Vuw_vnavg_VuwVuw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x32, &::U32x32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vnavguw.128B") - }, - "Vub_vnavg_VubVub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U8x64]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vnavgub") - }, - "Vub_vnavg_VubVub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vnavgub.128B") - }, - "Vh_vnormamt_Vh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vnormamth") - }, - "Vw_vnormamt_Vw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vnormamtw") - }, - "Vh_vnormamt_Vh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vnormamth.128B") - }, - "Vw_vnormamt_Vw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vnormamtw.128B") - }, - "V_vnot_VV64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vnot") - }, - "V_vnot_VV128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vnot.128B") - }, - "V_vor_VV64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x32, &::U16x32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vor") - }, - "V_vor_VV128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vor.128B") - }, - "Vb_vpacke_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vpackhe") - }, - "Vh_vpacke_VwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vpackwe") - }, - "Vb_vpacke_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vpackhe.128B") - }, - "Vh_vpacke_VwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vpackwe.128B") - }, - "Vb_vpacko_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vpackho") - }, - "Vh_vpacko_VwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vpackwo") - }, - "Vb_vpacko_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vpackho.128B") - }, - "Vh_vpacko_VwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vpackwo.128B") - }, - "Vb_vpack_VhVh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vpackhb.sat") - }, - "Vub_vpack_VhVh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vpackhub.sat") - }, - "Vh_vpack_VwVw_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vpackwh.sat") - }, - "Vuh_vpack_VwVw_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vpackwuh.sat") - }, - "Vb_vpack_VhVh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vpackhb.sat.128B") - }, - "Vub_vpack_VhVh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vpackhub.sat.128B") - }, - "Vh_vpack_VwVw_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vpackwh.sat.128B") - }, - "Vuh_vpack_VwVw_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vpackwuh.sat.128B") - }, - "Vh_vpopcount_Vh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vpopcounth") - }, - "Vh_vpopcount_Vh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vpopcounth.128B") - }, - "V_vrdelta_VV64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U8x64]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vrdelta") - }, - "V_vrdelta_VV128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vrdelta.128B") - }, - "Vw_vrmpy_VubRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vrmpybus") - }, - "Vw_vrmpy_VubRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vrmpybus.128B") - }, - "Vw_vrmpyacc_VwVubRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::U8x64, &::U32]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vrmpybus.acc") - }, - "Vw_vrmpyacc_VwVubRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::U8x128, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vrmpybus.acc.128B") - }, - "Ww_vrmpy_WubRbI64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vrmpybusi") - }, - "Ww_vrmpy_WubRbI128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x256, &::U32]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vrmpybusi.128B") - }, - "Ww_vrmpyacc_WwWubRbI64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::U8x128, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vrmpybusi.acc") - }, - "Ww_vrmpyacc_WwWubRbI128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x64, &::U8x256, &::U32]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vrmpybusi.acc.128B") - }, - "Vw_vrmpy_VubVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::I8x64]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vrmpybusv") - }, - "Vw_vrmpy_VubVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::I8x128]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vrmpybusv.128B") - }, - "Vw_vrmpyacc_VwVubVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::U8x64, &::I8x64]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vrmpybusv.acc") - }, - "Vw_vrmpyacc_VwVubVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::U8x128, &::I8x128]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vrmpybusv.acc.128B") - }, - "Vw_vrmpy_VbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x64, &::I8x64]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vrmpybv") - }, - "Vuw_vrmpy_VubVub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U8x64]; &INPUTS }, - output: &::U32x16, - definition: Named("llvm.hexagon.V6.vrmpyubv") - }, - "Vw_vrmpy_VbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x128, &::I8x128]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vrmpybv.128B") - }, - "Vuw_vrmpy_VubVub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vrmpyubv.128B") - }, - "Vw_vrmpyacc_VwVbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x16, &::I8x64, &::I8x64]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vrmpywv.acc") - }, - "Vuw_vrmpyacc_VuwVubVub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x16, &::U8x64, &::U8x64]; &INPUTS }, - output: &::U32x16, - definition: Named("llvm.hexagon.V6.vrmpyuwv.acc") - }, - "Vw_vrmpyacc_VwVbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I8x128, &::I8x128]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vrmpywv.acc.128B") - }, - "Vuw_vrmpyacc_VuwVubVub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x32, &::U8x128, &::U8x128]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vrmpyuwv.acc.128B") - }, - "Vuw_vrmpy_VubRub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U32]; &INPUTS }, - output: &::U32x16, - definition: Named("llvm.hexagon.V6.vrmpyub") - }, - "Vuw_vrmpy_VubRub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vrmpyub.128B") - }, - "Vuw_vrmpyacc_VuwVubRub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x16, &::U8x64, &::U32]; &INPUTS }, - output: &::U32x16, - definition: Named("llvm.hexagon.V6.vrmpyub.acc") - }, - "Vuw_vrmpyacc_VuwVubRub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x32, &::U8x128, &::U32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vrmpyub.acc.128B") - }, - "Wuw_vrmpy_WubRubI64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vrmpyubi") - }, - "Wuw_vrmpy_WubRubI128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x256, &::U32]; &INPUTS }, - output: &::U32x64, - definition: Named("llvm.hexagon.V6.vrmpyubi.128B") - }, - "Wuw_vrmpyacc_WuwWubRubI64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x32, &::U8x128, &::U32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vrmpyubi.acc") - }, - "Wuw_vrmpyacc_WuwWubRubI128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x64, &::U8x256, &::U32]; &INPUTS }, - output: &::U32x64, - definition: Named("llvm.hexagon.V6.vrmpyubi.acc.128B") - }, - "V_vror_VR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U32]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vror") - }, - "V_vror_VR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U32]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vror.128B") - }, - "Vb_vround_VhVh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vroundhb") - }, - "Vub_vround_VhVh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vroundhub") - }, - "Vh_vround_VwVw_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vroundwh") - }, - "Vuh_vround_VwVw_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vroundwuh") - }, - "Vb_vround_VhVh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vroundhb.128B") - }, - "Vub_vround_VhVh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vroundhub.128B") - }, - "Vh_vround_VwVw_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vroundwh.128B") - }, - "Vuh_vround_VwVw_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vroundwuh.128B") - }, - "Wuw_vrsad_WubRubI64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vrsadubi") - }, - "Wuw_vrsad_WubRubI128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x256, &::U32]; &INPUTS }, - output: &::U32x64, - definition: Named("llvm.hexagon.V6.vrsadubi.128B") - }, - "Wuw_vrsadacc_WuwWubRubI64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x32, &::U8x128, &::U32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vrsadubi.acc") - }, - "Wuw_vrsadacc_WuwWubRubI128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x64, &::U8x256, &::U32]; &INPUTS }, - output: &::U32x64, - definition: Named("llvm.hexagon.V6.vrsadubi.acc.128B") - }, - "Vub_vsat_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vsathub") - }, - "Vub_vsat_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vsathub.128B") - }, - "Vh_vsat_VwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vsatwh") - }, - "Vh_vsat_VwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vsatwh.128B") - }, - "Wh_vsxt_Vb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vsb") - }, - "Ww_vsxt_Vh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vsh") - }, - "Wh_vsxt_Vb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x128]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vsb.128B") - }, - "Ww_vsxt_Vh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x64]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vsh.128B") - }, - "Wuh_vzxt_Vub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vzb") - }, - "Wuw_vzxt_Vuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vzh") - }, - "Wuh_vzxt_Vub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x128]; &INPUTS }, - output: &::U16x128, - definition: Named("llvm.hexagon.V6.vzb.128B") - }, - "Wuw_vzxt_Vuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x64]; &INPUTS }, - output: &::U32x64, - definition: Named("llvm.hexagon.V6.vzh.128B") - }, - "Vb_condacc_QVbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I8x64, &::I8x64]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vaddbq") - }, - "Vh_condacc_QVhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vaddhq") - }, - "Vw_condacc_QVwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vaddwq") - }, - "Vb_condacc_QVbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I8x128, &::I8x128]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vaddbq.128B") - }, - "Vh_condacc_QVhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vaddhq.128B") - }, - "Vw_condacc_QVwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vaddwq.128B") - }, - "Vb_condacc_QnVbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I8x64, &::I8x64]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vaddbnq") - }, - "Vh_condacc_QnVhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vaddhnq") - }, - "Vw_condacc_QnVwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vaddwnq") - }, - "Vb_condacc_QnVbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I8x128, &::I8x128]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vaddbnq.128B") - }, - "Vh_condacc_QnVhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vaddhnq.128B") - }, - "Vw_condacc_QnVwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vaddwnq.128B") - }, - "Vb_condnac_QVbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I8x64, &::I8x64]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vsubbq") - }, - "Vh_condnac_QVhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vsubhq") - }, - "Vw_condnac_QVwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vsubwq") - }, - "Vb_condnac_QVbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I8x128, &::I8x128]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vsubbq.128B") - }, - "Vh_condnac_QVhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vsubhq.128B") - }, - "Vw_condnac_QVwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vsubwq.128B") - }, - "Vb_condnac_QnVbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I8x64, &::I8x64]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vsubbnq") - }, - "Vh_condnac_QnVhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vsubhnq") - }, - "Vw_condnac_QnVwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vsubwnq") - }, - "Vb_condnac_QnVbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I8x128, &::I8x128]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vsubbnq.128B") - }, - "Vh_condnac_QnVhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vsubhnq.128B") - }, - "Vw_condnac_QnVwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vsubwnq.128B") - }, - "Vh_vshuffe_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vshufeh") - }, - "Vh_vshuffe_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vshufeh.128B") - }, - "Vh_vshuffo_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vshufoh") - }, - "Vh_vshuffo_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vshufoh.128B") - }, - "Vb_vshuff_Vb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x64]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vshuffb") - }, - "Vh_vshuff_Vh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vshuffh") - }, - "Vb_vshuff_Vb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x128]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vshuffb.128B") - }, - "Vh_vshuff_Vh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vshuffh.128B") - }, - "Vb_vshuffe_VbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x64, &::I8x64]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vshuffeb") - }, - "Vb_vshuffe_VbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x128, &::I8x128]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vshuffeb.128B") - }, - "Vb_vshuffo_VbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x64, &::I8x64]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vshuffob") - }, - "Vb_vshuffo_VbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x128, &::I8x128]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vshuffob.128B") - }, - "Vb_vshuffoe_VbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x64, &::I8x64]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vshuffoeb") - }, - "Vh_vshuffoe_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vshuffoeh") - }, - "Vb_vshuffoe_VbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x128, &::I8x128]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vshuffoeb.128B") - }, - "Vh_vshuffoe_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vshuffoeh.128B") - }, - "W_vshuff_VVR64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x64, &::U8x64, &::U32]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vshufvvd") - }, - "W_vshuff_VVR128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x128, &::U8x128, &::U32]; &INPUTS }, - output: &::U8x256, - definition: Named("llvm.hexagon.V6.vshufvvd.128B") - }, - "Vb_vsub_VbVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x64, &::I8x64]; &INPUTS }, - output: &::I8x64, - definition: Named("llvm.hexagon.V6.vsubb") - }, - "Vh_vsub_VhVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vsubh") - }, - "Vw_vsub_VwVw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vsubw") - }, - "Vb_vsub_VbVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x128, &::I8x128]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vsubb.128B") - }, - "Vh_vsub_VhVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vsubh.128B") - }, - "Vw_vsub_VwVw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vsubw.128B") - }, - "Vh_vsub_VhVh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x32, &::I16x32]; &INPUTS }, - output: &::I16x32, - definition: Named("llvm.hexagon.V6.vsubhsat") - }, - "Vw_vsub_VwVw_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x16, &::I32x16]; &INPUTS }, - output: &::I32x16, - definition: Named("llvm.hexagon.V6.vsubwsat") - }, - "Vh_vsub_VhVh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vsubhsat.128B") - }, - "Vw_vsub_VwVw_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vsubwsat.128B") - }, - "Vub_vsub_VubVub_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x64, &::U8x64]; &INPUTS }, - output: &::U8x64, - definition: Named("llvm.hexagon.V6.vsububsat") - }, - "Vuh_vsub_VuhVuh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x32, &::U16x32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vsubuhsat") - }, - "Vub_vsub_VubVub_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vsububsat.128B") - }, - "Vuh_vsub_VuhVuh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vsubuhsat.128B") - }, - "Wb_vsub_WbWb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x128, &::I8x128]; &INPUTS }, - output: &::I8x128, - definition: Named("llvm.hexagon.V6.vsubb.dv") - }, - "Wh_vsub_WhWh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vsubh.dv") - }, - "Ww_vsub_WwWw64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vsubw.dv") - }, - "Wb_vsub_WbWb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x256, &::I8x256]; &INPUTS }, - output: &::I8x256, - definition: Named("llvm.hexagon.V6.vsubb.dv.128B") - }, - "Wh_vsub_WhWh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x128, &::I16x128]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vsubh.dv.128B") - }, - "Ww_vsub_WwWw128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x64, &::I32x64]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vsubw.dv.128B") - }, - "Wh_vsub_WhWh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I16x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vsubhsat.dv") - }, - "Ww_vsub_WwWw_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I32x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vsubwsat.dv") - }, - "Wh_vsub_WhWh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x128, &::I16x128]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vsubhsat.dv.128B") - }, - "Ww_vsub_WwWw_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x64, &::I32x64]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vsubwsat.dv.128B") - }, - "Wub_vsub_WubWub_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U8x128]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vsububsat.dv") - }, - "Wuh_vsub_WuhWuh_sat64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vsubuhsat.dv") - }, - "Wub_vsub_WubWub_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x256, &::U8x256]; &INPUTS }, - output: &::U8x256, - definition: Named("llvm.hexagon.V6.vsububsat.dv.128B") - }, - "Wuh_vsub_WuhWuh_sat128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x128, &::U16x128]; &INPUTS }, - output: &::U16x128, - definition: Named("llvm.hexagon.V6.vsubuhsat.dv.128B") - }, - "W_vswap_QVV64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x2, &::U8x64, &::U8x64]; &INPUTS }, - output: &::U8x128, - definition: Named("llvm.hexagon.V6.vswap") - }, - "W_vswap_QVV128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U32x4, &::U8x128, &::U8x128]; &INPUTS }, - output: &::U8x256, - definition: Named("llvm.hexagon.V6.vswap.128B") - }, - "Wh_vtmpy_WbRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x128, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vtmpyb") - }, - "Wh_vtmpy_WbRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x256, &::U32]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vtmpyb.128B") - }, - "Wh_vtmpyacc_WhWbRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x64, &::I8x128, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vtmpyb.acc") - }, - "Wh_vtmpyacc_WhWbRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x128, &::I8x256, &::U32]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vtmpyb.acc.128B") - }, - "Wh_vtmpy_WubRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x128, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vtmpybus") - }, - "Wh_vtmpy_WubRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x256, &::U32]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vtmpybus.128B") - }, - "Wh_vtmpyacc_WhWubRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x64, &::U8x128, &::U32]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vtmpybus.acc") - }, - "Wh_vtmpyacc_WhWubRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x128, &::U8x256, &::U32]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vtmpybus.acc.128B") - }, - "Ww_vtmpy_WhRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vtmpyhb") - }, - "Ww_vtmpy_WhRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x128, &::U32]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vtmpyhb.128B") - }, - "Wh_vunpack_Vb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vunpackb") - }, - "Wuh_vunpack_Vub64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vunpackub") - }, - "Ww_vunpack_Vh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vunpackh") - }, - "Wuw_vunpack_Vuh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x32]; &INPUTS }, - output: &::U32x32, - definition: Named("llvm.hexagon.V6.vunpackuh") - }, - "Wh_vunpack_Vb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x128]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vunpackb.128B") - }, - "Wuh_vunpack_Vub128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U8x128]; &INPUTS }, - output: &::U16x128, - definition: Named("llvm.hexagon.V6.vunpackub.128B") - }, - "Ww_vunpack_Vh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x64]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vunpackh.128B") - }, - "Wuw_vunpack_Vuh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x64]; &INPUTS }, - output: &::U32x64, - definition: Named("llvm.hexagon.V6.vunpackuh.128B") - }, - "Wh_vunpackoor_WhVb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x64, &::I8x64]; &INPUTS }, - output: &::I16x64, - definition: Named("llvm.hexagon.V6.vunpackob") - }, - "Ww_vunpackoor_WwVh64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x32, &::I16x32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vunpackoh") - }, - "Wh_vunpackoor_WhVb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x128, &::I8x128]; &INPUTS }, - output: &::I16x128, - definition: Named("llvm.hexagon.V6.vunpackob.128B") - }, - "Ww_vunpackoor_WwVh128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x64, &::I16x64]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vunpackoh.128B") - }, - "Ww_vtmpyacc_WwWhRb64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x32, &::I16x64, &::U32]; &INPUTS }, - output: &::I32x32, - definition: Named("llvm.hexagon.V6.vtmpyhb.acc") - }, - "Ww_vtmpyacc_WwWhRb128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x64, &::I16x128, &::U32]; &INPUTS }, - output: &::I32x64, - definition: Named("llvm.hexagon.V6.vtmpyhb.acc.128B") - }, - "V_vxor_VV64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x32, &::U16x32]; &INPUTS }, - output: &::U16x32, - definition: Named("llvm.hexagon.V6.vxor") - }, - "V_vxor_VV128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x64, &::U16x64]; &INPUTS }, - output: &::U16x64, - definition: Named("llvm.hexagon.V6.vxor.128B") - }, - _ => return None, - }) -} diff --git a/src/librustc_platform_intrinsics/lib.rs b/src/librustc_platform_intrinsics/lib.rs deleted file mode 100644 index ca2628078178b..0000000000000 --- a/src/librustc_platform_intrinsics/lib.rs +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![allow(nonstandard_style)] - -#![feature(nll)] - -pub struct Intrinsic { - pub inputs: &'static [&'static Type], - pub output: &'static Type, - - pub definition: IntrinsicDef, -} - -#[derive(Clone, Hash, Eq, PartialEq)] -pub enum Type { - Void, - Integer(/* signed */ bool, u8, /* llvm width */ u8), - Float(u8), - Pointer(&'static Type, Option<&'static Type>, /* const */ bool), - Vector(&'static Type, Option<&'static Type>, u16), - Aggregate(bool, &'static [&'static Type]), -} - -pub enum IntrinsicDef { - Named(&'static str), -} - -static I8: Type = Type::Integer(true, 8, 8); -static I16: Type = Type::Integer(true, 16, 16); -static I32: Type = Type::Integer(true, 32, 32); -static I64: Type = Type::Integer(true, 64, 64); -static U8: Type = Type::Integer(false, 8, 8); -static U16: Type = Type::Integer(false, 16, 16); -static U32: Type = Type::Integer(false, 32, 32); -static U64: Type = Type::Integer(false, 64, 64); -static F32: Type = Type::Float(32); -static F64: Type = Type::Float(64); - -static I32_8: Type = Type::Integer(true, 32, 8); - -static I8x8: Type = Type::Vector(&I8, None, 8); -static U8x8: Type = Type::Vector(&U8, None, 8); -static I8x16: Type = Type::Vector(&I8, None, 16); -static U8x16: Type = Type::Vector(&U8, None, 16); -static I8x32: Type = Type::Vector(&I8, None, 32); -static U8x32: Type = Type::Vector(&U8, None, 32); -static I8x64: Type = Type::Vector(&I8, None, 64); -static U8x64: Type = Type::Vector(&U8, None, 64); -static I8x128: Type = Type::Vector(&I8, None, 128); -static U8x128: Type = Type::Vector(&U8, None, 128); -static I8x256: Type = Type::Vector(&I8, None, 256); -static U8x256: Type = Type::Vector(&U8, None, 256); - -static I16x4: Type = Type::Vector(&I16, None, 4); -static U16x4: Type = Type::Vector(&U16, None, 4); -static I16x8: Type = Type::Vector(&I16, None, 8); -static U16x8: Type = Type::Vector(&U16, None, 8); -static I16x16: Type = Type::Vector(&I16, None, 16); -static U16x16: Type = Type::Vector(&U16, None, 16); -static I16x32: Type = Type::Vector(&I16, None, 32); -static U16x32: Type = Type::Vector(&U16, None, 32); -static I16x64: Type = Type::Vector(&I16, None, 64); -static U16x64: Type = Type::Vector(&U16, None, 64); -static I16x128: Type = Type::Vector(&I16, None, 128); -static U16x128: Type = Type::Vector(&U16, None, 128); - -static I32x2: Type = Type::Vector(&I32, None, 2); -static U32x2: Type = Type::Vector(&U32, None, 2); -static I32x4: Type = Type::Vector(&I32, None, 4); -static U32x4: Type = Type::Vector(&U32, None, 4); -static I32x8: Type = Type::Vector(&I32, None, 8); -static U32x8: Type = Type::Vector(&U32, None, 8); -static I32x16: Type = Type::Vector(&I32, None, 16); -static U32x16: Type = Type::Vector(&U32, None, 16); -static I32x32: Type = Type::Vector(&I32, None, 32); -static U32x32: Type = Type::Vector(&U32, None, 32); -static I32x64: Type = Type::Vector(&I32, None, 64); -static U32x64: Type = Type::Vector(&U32, None, 64); - -static I64x1: Type = Type::Vector(&I64, None, 1); -static U64x1: Type = Type::Vector(&U64, None, 1); -static I64x2: Type = Type::Vector(&I64, None, 2); -static U64x2: Type = Type::Vector(&U64, None, 2); -static I64x4: Type = Type::Vector(&I64, None, 4); -static U64x4: Type = Type::Vector(&U64, None, 4); - -static F32x2: Type = Type::Vector(&F32, None, 2); -static F32x4: Type = Type::Vector(&F32, None, 4); -static F32x8: Type = Type::Vector(&F32, None, 8); -static F64x1: Type = Type::Vector(&F64, None, 1); -static F64x2: Type = Type::Vector(&F64, None, 2); -static F64x4: Type = Type::Vector(&F64, None, 4); - -static I32x4_F32: Type = Type::Vector(&I32, Some(&F32), 4); -static I32x8_F32: Type = Type::Vector(&I32, Some(&F32), 8); -static I64x2_F64: Type = Type::Vector(&I64, Some(&F64), 2); -static I64x4_F64: Type = Type::Vector(&I64, Some(&F64), 4); - -static VOID: Type = Type::Void; - -mod x86; -mod arm; -mod aarch64; -mod nvptx; -mod hexagon; -mod powerpc; - -impl Intrinsic { - pub fn find(name: &str) -> Option { - if name.starts_with("x86_") { - x86::find(name) - } else if name.starts_with("arm_") { - arm::find(name) - } else if name.starts_with("aarch64_") { - aarch64::find(name) - } else if name.starts_with("nvptx_") { - nvptx::find(name) - } else if name.starts_with("Q6_") { - hexagon::find(name) - } else if name.starts_with("powerpc_") { - powerpc::find(name) - } else { - None - } - } -} diff --git a/src/librustc_platform_intrinsics/nvptx.rs b/src/librustc_platform_intrinsics/nvptx.rs deleted file mode 100644 index 0c0ee429d0cc4..0000000000000 --- a/src/librustc_platform_intrinsics/nvptx.rs +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// DO NOT EDIT: autogenerated by etc/platform-intrinsics/generator.py -// ignore-tidy-linelength - -#![allow(unused_imports)] - -use {Intrinsic, Type}; -use IntrinsicDef::Named; - -pub fn find(name: &str) -> Option { - if !name.starts_with("nvptx") { return None } - Some(match &name["nvptx".len()..] { - "_syncthreads" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::VOID, - definition: Named("llvm.cuda.syncthreads") - }, - "_block_dim_x" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::I32, - definition: Named("llvm.nvvm.read.ptx.sreg.ntid.x") - }, - "_block_dim_y" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::I32, - definition: Named("llvm.nvvm.read.ptx.sreg.ntid.y") - }, - "_block_dim_z" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::I32, - definition: Named("llvm.nvvm.read.ptx.sreg.ntid.z") - }, - "_block_idx_x" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::I32, - definition: Named("llvm.nvvm.read.ptx.sreg.ctaid.x") - }, - "_block_idx_y" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::I32, - definition: Named("llvm.nvvm.read.ptx.sreg.ctaid.y") - }, - "_block_idx_z" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::I32, - definition: Named("llvm.nvvm.read.ptx.sreg.ctaid.z") - }, - "_grid_dim_x" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::I32, - definition: Named("llvm.nvvm.read.ptx.sreg.nctaid.x") - }, - "_grid_dim_y" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::I32, - definition: Named("llvm.nvvm.read.ptx.sreg.nctaid.y") - }, - "_grid_dim_z" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::I32, - definition: Named("llvm.nvvm.read.ptx.sreg.nctaid.z") - }, - "_thread_idx_x" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::I32, - definition: Named("llvm.nvvm.read.ptx.sreg.tid.x") - }, - "_thread_idx_y" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::I32, - definition: Named("llvm.nvvm.read.ptx.sreg.tid.y") - }, - "_thread_idx_z" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::I32, - definition: Named("llvm.nvvm.read.ptx.sreg.tid.z") - }, - _ => return None, - }) -} diff --git a/src/librustc_platform_intrinsics/powerpc.rs b/src/librustc_platform_intrinsics/powerpc.rs deleted file mode 100644 index 5c062c0ecec03..0000000000000 --- a/src/librustc_platform_intrinsics/powerpc.rs +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// DO NOT EDIT: autogenerated by etc/platform-intrinsics/generator.py -// ignore-tidy-linelength - -#![allow(unused_imports)] - -use {Intrinsic, Type}; -use IntrinsicDef::Named; - -pub fn find(name: &str) -> Option { - if !name.starts_with("powerpc") { return None } - Some(match &name["powerpc".len()..] { - "_vec_perm" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I32x4, &::I32x4, &::I8x16]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vperm") - }, - "_vec_mradds" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x8, &::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vmhraddshs") - }, - "_vec_cmpb" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vcmpbfp") - }, - "_vec_cmpeqb" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.ppc.altivec.vcmpequb") - }, - "_vec_cmpeqh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vcmpequh") - }, - "_vec_cmpeqw" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vcmpequw") - }, - "_vec_cmpgtub" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.ppc.altivec.vcmpgtub") - }, - "_vec_cmpgtuh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vcmpgtuh") - }, - "_vec_cmpgtuw" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vcmpgtuw") - }, - "_vec_cmpgtsb" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.ppc.altivec.vcmpgtsb") - }, - "_vec_cmpgtsh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vcmpgtsh") - }, - "_vec_cmpgtsw" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vcmpgtsw") - }, - "_vec_maxsb" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.ppc.altivec.vmaxsb") - }, - "_vec_maxub" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.ppc.altivec.vmaxub") - }, - "_vec_maxsh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vmaxsh") - }, - "_vec_maxuh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.ppc.altivec.vmaxuh") - }, - "_vec_maxsw" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vmaxsw") - }, - "_vec_maxuw" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.ppc.altivec.vmaxuw") - }, - "_vec_minsb" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.ppc.altivec.vminsb") - }, - "_vec_minub" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.ppc.altivec.vminub") - }, - "_vec_minsh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vminsh") - }, - "_vec_minuh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.ppc.altivec.vminuh") - }, - "_vec_minsw" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vminsw") - }, - "_vec_minuw" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.ppc.altivec.vminuw") - }, - "_vec_subsbs" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.ppc.altivec.vsubsbs") - }, - "_vec_sububs" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.ppc.altivec.vsububs") - }, - "_vec_subshs" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vsubshs") - }, - "_vec_subuhs" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.ppc.altivec.vsubuhs") - }, - "_vec_subsws" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vsubsws") - }, - "_vec_subuws" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.ppc.altivec.vsubuws") - }, - "_vec_subc" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.ppc.altivec.vsubcuw") - }, - "_vec_addsbs" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.ppc.altivec.vaddsbs") - }, - "_vec_addubs" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.ppc.altivec.vaddubs") - }, - "_vec_addshs" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vaddshs") - }, - "_vec_adduhs" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.ppc.altivec.vadduhs") - }, - "_vec_addsws" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vaddsws") - }, - "_vec_adduws" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.ppc.altivec.vadduws") - }, - "_vec_addc" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.ppc.altivec.vaddcuw") - }, - "_vec_mulesb" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vmulesb") - }, - "_vec_muleub" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.ppc.altivec.vmuleub") - }, - "_vec_mulesh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vmulesh") - }, - "_vec_muleuh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.ppc.altivec.vmuleuh") - }, - "_vec_mulosb" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vmulosb") - }, - "_vec_muloub" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.ppc.altivec.vmuloub") - }, - "_vec_mulosh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vmulosh") - }, - "_vec_mulouh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.ppc.altivec.vmulouh") - }, - "_vec_avgsb" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.ppc.altivec.vavgsb") - }, - "_vec_avgub" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.ppc.altivec.vavgub") - }, - "_vec_avgsh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vavgsh") - }, - "_vec_avguh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.ppc.altivec.vavguh") - }, - "_vec_avgsw" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vavgsw") - }, - "_vec_avguw" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.ppc.altivec.vavguw") - }, - "_vec_packssh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.ppc.altivec.vpkshss") - }, - "_vec_packsuh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.ppc.altivec.vpkuhus") - }, - "_vec_packssw" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vpkswss") - }, - "_vec_packsuw" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.ppc.altivec.vpkuwus") - }, - "_vec_packsush" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.ppc.altivec.vpkshus") - }, - "_vec_packsusw" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.ppc.altivec.vpkswus") - }, - "_vec_packpx" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vpkpx") - }, - "_vec_unpacklsb" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vupklsb") - }, - "_vec_unpacklsh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vupklsh") - }, - "_vec_unpackhsb" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vupkhsb") - }, - "_vec_unpackhsh" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vupkhsh") - }, - "_vec_madds" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x8, &::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.ppc.altivec.vmhaddshs") - }, - "_vec_msumubm" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.ppc.altivec.vmsumubm") - }, - "_vec_msumuhm" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U16x8, &::U16x8, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.ppc.altivec.vmsumuhm") - }, - "_vec_msummbm" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::U8x16, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vmsummbm") - }, - "_vec_msumshm" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x8, &::I16x8, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vmsumshm") - }, - "_vec_msumshs" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I16x8, &::I16x8, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vmsumshs") - }, - "_vec_msumuhs" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U16x8, &::U16x8, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.ppc.altivec.vmsumuhs") - }, - "_vec_sum2s" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vsum2sws") - }, - "_vec_sum4sbs" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vsum4sbs") - }, - "_vec_sum4ubs" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.ppc.altivec.vsum4ubs") - }, - "_vec_sum4shs" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vsum4shs") - }, - "_vec_sums" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.ppc.altivec.vsumsws") - }, - "_vec_madd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.ppc.altivec.vmaddfp") - }, - "_vec_nmsub" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.ppc.altivec.vnmsubfp") - }, - "_vec_expte" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.ppc.altivec.vexptefp") - }, - "_vec_floor" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.ppc.altivec.vrfim") - }, - "_vec_ceil" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.ppc.altivec.vrfip") - }, - "_vec_round" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.ppc.altivec.vrfin") - }, - "_vec_trunc" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.ppc.altivec.vrfiz") - }, - "_vec_loge" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.ppc.altivec.vlogefp") - }, - "_vec_re" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.ppc.altivec.vrefp") - }, - "_vec_rsqrte" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.ppc.altivec.vrsqrtefp") - }, - _ => return None, - }) -} diff --git a/src/librustc_platform_intrinsics/x86.rs b/src/librustc_platform_intrinsics/x86.rs deleted file mode 100644 index 5c01c3a118d2d..0000000000000 --- a/src/librustc_platform_intrinsics/x86.rs +++ /dev/null @@ -1,1379 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// DO NOT EDIT: autogenerated by etc/platform-intrinsics/generator.py -// ignore-tidy-linelength - -#![allow(unused_imports)] - -use {Intrinsic, Type}; -use IntrinsicDef::Named; - -pub fn find(name: &str) -> Option { - if !name.starts_with("x86") { return None } - Some(match &name["x86".len()..] { - "_mm256_abs_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x32]; &INPUTS }, - output: &::I8x32, - definition: Named("llvm.x86.avx2.pabs.b") - }, - "_mm256_abs_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x16]; &INPUTS }, - output: &::I16x16, - definition: Named("llvm.x86.avx2.pabs.w") - }, - "_mm256_abs_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x8]; &INPUTS }, - output: &::I32x8, - definition: Named("llvm.x86.avx2.pabs.d") - }, - "_mm256_adds_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS }, - output: &::I8x32, - definition: Named("llvm.x86.avx2.padds.b") - }, - "_mm256_adds_epu8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS }, - output: &::U8x32, - definition: Named("llvm.x86.avx2.paddus.b") - }, - "_mm256_adds_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, - output: &::I16x16, - definition: Named("llvm.x86.avx2.padds.w") - }, - "_mm256_adds_epu16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS }, - output: &::U16x16, - definition: Named("llvm.x86.avx2.paddus.w") - }, - "_mm256_avg_epu8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS }, - output: &::U8x32, - definition: Named("llvm.x86.avx2.pavg.b") - }, - "_mm256_avg_epu16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS }, - output: &::U16x16, - definition: Named("llvm.x86.avx2.pavg.w") - }, - "_mm256_hadd_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, - output: &::I16x16, - definition: Named("llvm.x86.avx2.phadd.w") - }, - "_mm256_hadd_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, - output: &::I32x8, - definition: Named("llvm.x86.avx2.phadd.d") - }, - "_mm256_hadds_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, - output: &::I16x16, - definition: Named("llvm.x86.avx2.phadd.sw") - }, - "_mm256_hsub_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, - output: &::I16x16, - definition: Named("llvm.x86.avx2.phsub.w") - }, - "_mm256_hsub_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, - output: &::I32x8, - definition: Named("llvm.x86.avx2.phsub.d") - }, - "_mm256_hsubs_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, - output: &::I16x16, - definition: Named("llvm.x86.avx2.phsub.sw") - }, - "_mm256_madd_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, - output: &::I32x8, - definition: Named("llvm.x86.avx2.pmadd.wd") - }, - "_mm256_maddubs_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS }, - output: &::I16x16, - definition: Named("llvm.x86.avx2.pmadd.ub.sw") - }, - "_mm_mask_i32gather_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::I32x4, { static PTR: Type = Type::Pointer(&::I32, Some(&::I8), true); &PTR }, &::I32x4, &::I32x4, &::I32_8]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.x86.avx2.gather.d.d") - }, - "_mm_mask_i32gather_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::F32x4, { static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I32x4, &::I32x4_F32, &::I32_8]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.avx2.gather.d.ps") - }, - "_mm256_mask_i32gather_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::I32x8, { static PTR: Type = Type::Pointer(&::I32, Some(&::I8), true); &PTR }, &::I32x8, &::I32x8, &::I32_8]; &INPUTS }, - output: &::I32x8, - definition: Named("llvm.x86.avx2.gather.d.d.256") - }, - "_mm256_mask_i32gather_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::F32x8, { static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I32x8, &::I32x8_F32, &::I32_8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx2.gather.d.ps.256") - }, - "_mm_mask_i32gather_epi64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::I64x2, { static PTR: Type = Type::Pointer(&::I64, Some(&::I8), true); &PTR }, &::I32x4, &::I64x2, &::I32_8]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.x86.avx2.gather.d.q") - }, - "_mm_mask_i32gather_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::F64x2, { static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I32x4, &::I64x2_F64, &::I32_8]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.avx2.gather.d.pd") - }, - "_mm256_mask_i32gather_epi64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::I64x4, { static PTR: Type = Type::Pointer(&::I64, Some(&::I8), true); &PTR }, &::I32x4, &::I64x4, &::I32_8]; &INPUTS }, - output: &::I64x4, - definition: Named("llvm.x86.avx2.gather.d.q.256") - }, - "_mm256_mask_i32gather_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::F64x4, { static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I32x4, &::I64x4_F64, &::I32_8]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.avx2.gather.d.pd.256") - }, - "_mm_mask_i64gather_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::I32x4, { static PTR: Type = Type::Pointer(&::I32, Some(&::I8), true); &PTR }, &::I64x2, &::I32x4, &::I32_8]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.x86.avx2.gather.q.d") - }, - "_mm_mask_i64gather_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::F32x4, { static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I64x2, &::I32x4_F32, &::I32_8]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.avx2.gather.q.ps") - }, - "_mm256_mask_i64gather_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::I32x4, { static PTR: Type = Type::Pointer(&::I32, Some(&::I8), true); &PTR }, &::I64x4, &::I32x4, &::I32_8]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.x86.avx2.gather.q.d") - }, - "_mm256_mask_i64gather_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::F32x4, { static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I64x4, &::I32x4_F32, &::I32_8]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.avx2.gather.q.ps") - }, - "_mm_mask_i64gather_epi64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::I64x2, { static PTR: Type = Type::Pointer(&::I64, Some(&::I8), true); &PTR }, &::I64x2, &::I64x2, &::I32_8]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.x86.avx2.gather.q.q") - }, - "_mm_mask_i64gather_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::F64x2, { static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I64x2, &::I64x2_F64, &::I32_8]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.avx2.gather.q.pd") - }, - "_mm256_mask_i64gather_epi64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::I64x4, { static PTR: Type = Type::Pointer(&::I64, Some(&::I8), true); &PTR }, &::I64x4, &::I64x4, &::I32_8]; &INPUTS }, - output: &::I64x4, - definition: Named("llvm.x86.avx2.gather.q.q.256") - }, - "_mm256_mask_i64gather_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::F64x4, { static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I64x4, &::I64x4_F64, &::I32_8]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.avx2.gather.q.pd.256") - }, - "_mm_maskload_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::I32x4, Some(&::I8), true); &PTR }, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.x86.avx2.maskload.d") - }, - "_mm_maskload_epi64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::I64x2, Some(&::I8), true); &PTR }, &::I64x2]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.x86.avx2.maskload.q") - }, - "_mm256_maskload_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::I32x8, Some(&::I8), true); &PTR }, &::I32x8]; &INPUTS }, - output: &::I32x8, - definition: Named("llvm.x86.avx2.maskload.d.256") - }, - "_mm256_maskload_epi64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::I64x4, Some(&::I8), true); &PTR }, &::I64x4]; &INPUTS }, - output: &::I64x4, - definition: Named("llvm.x86.avx2.maskload.q.256") - }, - "_mm_maskstore_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I8), false); &PTR }, &::I32x4, &::I32x4]; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.avx2.maskstore.d") - }, - "_mm_maskstore_epi64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I8), false); &PTR }, &::I64x2, &::I64x2]; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.avx2.maskstore.q") - }, - "_mm256_maskstore_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::I32, Some(&::I8), false); &PTR }, &::I32x8, &::I32x8]; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.avx2.maskstore.d.256") - }, - "_mm256_maskstore_epi64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::I64, Some(&::I8), false); &PTR }, &::I64x4, &::I64x4]; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.avx2.maskstore.q.256") - }, - "_mm256_max_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS }, - output: &::I8x32, - definition: Named("llvm.x86.avx2.pmaxs.b") - }, - "_mm256_max_epu8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS }, - output: &::U8x32, - definition: Named("llvm.x86.avx2.pmaxu.b") - }, - "_mm256_max_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, - output: &::I16x16, - definition: Named("llvm.x86.avx2.pmaxs.w") - }, - "_mm256_max_epu16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS }, - output: &::U16x16, - definition: Named("llvm.x86.avx2.pmaxu.w") - }, - "_mm256_max_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, - output: &::I32x8, - definition: Named("llvm.x86.avx2.pmaxs.d") - }, - "_mm256_max_epu32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x8, &::U32x8]; &INPUTS }, - output: &::U32x8, - definition: Named("llvm.x86.avx2.pmaxu.d") - }, - "_mm256_min_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS }, - output: &::I8x32, - definition: Named("llvm.x86.avx2.pmins.b") - }, - "_mm256_min_epu8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS }, - output: &::U8x32, - definition: Named("llvm.x86.avx2.pminu.b") - }, - "_mm256_min_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, - output: &::I16x16, - definition: Named("llvm.x86.avx2.pmins.w") - }, - "_mm256_min_epu16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS }, - output: &::U16x16, - definition: Named("llvm.x86.avx2.pminu.w") - }, - "_mm256_min_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, - output: &::I32x8, - definition: Named("llvm.x86.avx2.pmins.d") - }, - "_mm256_min_epu32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x8, &::U32x8]; &INPUTS }, - output: &::U32x8, - definition: Named("llvm.x86.avx2.pminu.d") - }, - "_mm256_movemask_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x32]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx2.pmovmskb") - }, - "_mm256_mpsadbw_epu8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x32, &::U8x32, &::I32_8]; &INPUTS }, - output: &::U16x16, - definition: Named("llvm.x86.avx2.mpsadbw") - }, - "_mm256_mul_epi64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, - output: &::I64x4, - definition: Named("llvm.x86.avx2.pmulq.dq") - }, - "_mm256_mul_epu64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x8, &::U32x8]; &INPUTS }, - output: &::U64x4, - definition: Named("llvm.x86.avx2.pmulq.dq") - }, - "_mm256_mulhi_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, - output: &::I16x16, - definition: Named("llvm.x86.avx2.pmulhw.w") - }, - "_mm256_mulhi_epu16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS }, - output: &::U16x16, - definition: Named("llvm.x86.avx2.pmulhw.w") - }, - "_mm256_mulhrs_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, - output: &::I16x16, - definition: Named("llvm.x86.avx2.pmul.hr.sw") - }, - "_mm256_packs_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, - output: &::I8x32, - definition: Named("llvm.x86.avx2.packsswb") - }, - "_mm256_packus_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, - output: &::U8x32, - definition: Named("llvm.x86.avx2.packuswb") - }, - "_mm256_packs_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, - output: &::I16x16, - definition: Named("llvm.x86.avx2.packssdw") - }, - "_mm256_packus_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, - output: &::U16x16, - definition: Named("llvm.x86.avx2.packusdw") - }, - "_mm256_permutevar8x32_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, - output: &::I32x8, - definition: Named("llvm.x86.avx2.permd") - }, - "_mm256_permutevar8x32_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::I32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx2.permps") - }, - "_mm256_sad_epu8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS }, - output: &::U64x4, - definition: Named("llvm.x86.avx2.psad.bw") - }, - "_mm256_shuffle_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS }, - output: &::I8x32, - definition: Named("llvm.x86.avx2.pshuf.b") - }, - "_mm256_sign_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS }, - output: &::I8x32, - definition: Named("llvm.x86.avx2.psign.b") - }, - "_mm256_sign_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, - output: &::I16x16, - definition: Named("llvm.x86.avx2.psign.w") - }, - "_mm256_sign_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x8, &::I32x8]; &INPUTS }, - output: &::I32x8, - definition: Named("llvm.x86.avx2.psign.d") - }, - "_mm256_subs_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x32, &::I8x32]; &INPUTS }, - output: &::I8x32, - definition: Named("llvm.x86.avx2.psubs.b") - }, - "_mm256_subs_epu8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x32, &::U8x32]; &INPUTS }, - output: &::U8x32, - definition: Named("llvm.x86.avx2.psubus.b") - }, - "_mm256_subs_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x16, &::I16x16]; &INPUTS }, - output: &::I16x16, - definition: Named("llvm.x86.avx2.psubs.w") - }, - "_mm256_subs_epu16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x16, &::U16x16]; &INPUTS }, - output: &::U16x16, - definition: Named("llvm.x86.avx2.psubus.w") - }, - "_mm256_addsub_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx.addsub.ps.256") - }, - "_mm256_addsub_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.avx.addsub.pd.256") - }, - "_mm256_blendv_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx.blendv.ps.256") - }, - "_mm256_blendv_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.avx.blendv.pd.256") - }, - "_mm256_broadcast_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx.vbroadcastf128.ps.256") - }, - "_mm256_broadcast_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::I8, None, true); &PTR }]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.avx.vbroadcastf128.pd.256") - }, - "_mm256_cmp_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::I8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx.cmp.ps.256") - }, - "_mm256_cmp_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::I8]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.avx.cmp.pd.256") - }, - "_mm256_cvtepi32_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.avx.cvtdq2.pd.256") - }, - "_mm256_cvtepi32_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx.cvtdq2.ps.256") - }, - "_mm256_cvtpd_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.x86.avx.cvt.pd2dq.256") - }, - "_mm256_cvtpd_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.avx.cvt.pd2.ps.256") - }, - "_mm256_cvtps_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS }, - output: &::I32x8, - definition: Named("llvm.x86.avx.cvt.ps2dq.256") - }, - "_mm256_cvtps_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.avx.cvt.ps2.pd.256") - }, - "_mm256_cvttpd_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.x86.avx.cvtt.pd2dq.256") - }, - "_mm256_cvttps_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS }, - output: &::I32x8, - definition: Named("llvm.x86.avx.cvtt.ps2dq.256") - }, - "_mm256_dp_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::I32_8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx.dp.ps.256") - }, - "_mm256_hadd_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx.hadd.ps.256") - }, - "_mm256_hadd_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.avx.hadd.pd.256") - }, - "_mm256_hsub_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx.hsub.ps.256") - }, - "_mm256_hsub_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.avx.hsub.pd.256") - }, - "_mm256_max_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx.max.ps.256") - }, - "_mm256_max_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.avx.max.pd.256") - }, - "_mm_maskload_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I32x4_F32]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.avx.maskload.ps") - }, - "_mm_maskload_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I64x2_F64]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.avx.maskload.pd") - }, - "_mm256_maskload_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::I8), true); &PTR }, &::I32x8_F32]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx.maskload.ps.256") - }, - "_mm256_maskload_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::I8), true); &PTR }, &::I64x4_F64]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.avx.maskload.pd.256") - }, - "_mm_maskstore_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::I8), false); &PTR }, &::I32x4_F32, &::F32x4]; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.avx.maskstore.ps") - }, - "_mm_maskstore_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::I8), false); &PTR }, &::I64x2_F64, &::F64x2]; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.avx.maskstore.pd") - }, - "_mm256_maskstore_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::I8), false); &PTR }, &::I32x8_F32, &::F32x8]; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.avx.maskstore.ps.256") - }, - "_mm256_maskstore_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::I8), false); &PTR }, &::I64x4_F64, &::F64x4]; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.avx.maskstore.pd.256") - }, - "_mm256_min_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx.min.ps.256") - }, - "_mm256_min_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.avx.min.pd.256") - }, - "_mm256_movemask_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.movmsk.ps.256") - }, - "_mm256_movemask_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x4]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.movmsk.pd.256") - }, - "_mm_permutevar_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::I32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.avx.vpermilvar.ps") - }, - "_mm_permutevar_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::I64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.avx.vpermilvar.pd") - }, - "_mm256_permutevar_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::I32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx.vpermilvar.ps.256") - }, - "_mm256_permutevar_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::I64x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.avx.vpermilvar.pd.256") - }, - "_mm256_rcp_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx.rcp.ps.256") - }, - "_mm256_rsqrt_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.avx.rsqrt.ps.256") - }, - "_mm256_storeu_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F32x8, Some(&::U8), false); &PTR }, &::F32x8]; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.avx.storeu.ps.256") - }, - "_mm256_storeu_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F64x4, Some(&::U8), false); &PTR }, &::F64x4]; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.avx.storeu.ps.256") - }, - "_mm256_storeu_si256" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::U8x32, Some(&::U8), false); &PTR }, &::U8x32]; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.avx.storeu.dq.256") - }, - "_mm256_sqrt_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.sqrt.v8f32") - }, - "_mm256_sqrt_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.sqrt.v4f64") - }, - "_mm_testc_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.vtestc.ps") - }, - "_mm256_testc_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.vtestc.ps.256") - }, - "_mm_testc_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.vtestc.pd") - }, - "_mm256_testc_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.vtestc.pd.256") - }, - "_mm256_testc_si256" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x4, &::U64x4]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.ptestc.256") - }, - "_mm_testnzc_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.vtestnzc.ps") - }, - "_mm256_testnzc_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.vtestnzc.ps.256") - }, - "_mm_testnzc_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.vtestnzc.pd") - }, - "_mm256_testnzc_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.vtestnzc.pd.256") - }, - "_mm256_testnzc_si256" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x4, &::U64x4]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.ptestnzc.256") - }, - "_mm_testz_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.vtestz.ps") - }, - "_mm256_testz_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x8, &::F32x8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.vtestz.ps.256") - }, - "_mm_testz_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.vtestz.pd") - }, - "_mm256_testz_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x4, &::F64x4]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.vtestz.pd.256") - }, - "_mm256_testz_si256" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x4, &::U64x4]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.avx.ptestz.256") - }, - "_mm256_zeroall" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.avx.vzeroall") - }, - "_mm256_zeroupper" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.avx.vzeroupper") - }, - "_bmi2_bzhi_32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32, &::U32]; &INPUTS }, - output: &::U32, - definition: Named("llvm.x86.bmi.bzhi.32") - }, - "_bmi2_bzhi_64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64, &::U64]; &INPUTS }, - output: &::U64, - definition: Named("llvm.x86.bmi.bzhi.64") - }, - "_bmi2_pdep_32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32, &::U32]; &INPUTS }, - output: &::U32, - definition: Named("llvm.x86.bmi.pdep.32") - }, - "_bmi2_pdep_64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64, &::U64]; &INPUTS }, - output: &::U64, - definition: Named("llvm.x86.bmi.pdep.64") - }, - "_bmi2_pext_32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32, &::U32]; &INPUTS }, - output: &::U32, - definition: Named("llvm.x86.bmi.pext.32") - }, - "_bmi2_pext_64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64, &::U64]; &INPUTS }, - output: &::U64, - definition: Named("llvm.x86.bmi.pext.64") - }, - "_bmi_bextr_32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32, &::U32]; &INPUTS }, - output: &::U32, - definition: Named("llvm.x86.bmi.bextr.32") - }, - "_bmi_bextr_64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64, &::U64]; &INPUTS }, - output: &::U64, - definition: Named("llvm.x86.bmi.bextr.64") - }, - "_mm_fmadd_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.fma.vfmadd.ps") - }, - "_mm_fmadd_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.fma.vfmadd.pd") - }, - "_mm256_fmadd_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.fma.vfmadd.ps.256") - }, - "_mm256_fmadd_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.fma.vfmadd.pd.256") - }, - "_mm_fmaddsub_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.fma.vfmaddsub.ps") - }, - "_mm_fmaddsub_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.fma.vfmaddsub.pd") - }, - "_mm256_fmaddsub_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.fma.vfmaddsub.ps.256") - }, - "_mm256_fmaddsub_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.fma.vfmaddsub.pd.256") - }, - "_mm_fmsub_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.fma.vfmsub.ps") - }, - "_mm_fmsub_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.fma.vfmsub.pd") - }, - "_mm256_fmsub_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.fma.vfmsub.ps.256") - }, - "_mm256_fmsub_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.fma.vfmsub.pd.256") - }, - "_mm_fmsubadd_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.fma.vfmsubadd.ps") - }, - "_mm_fmsubadd_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.fma.vfmsubadd.pd") - }, - "_mm256_fmsubadd_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.fma.vfmsubadd.ps.256") - }, - "_mm256_fmsubadd_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.fma.vfmsubadd.pd.256") - }, - "_mm_fnmadd_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.fma.vfnmadd.ps") - }, - "_mm_fnmadd_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.fma.vfnmadd.pd") - }, - "_mm256_fnmadd_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.fma.vfnmadd.ps.256") - }, - "_mm256_fnmadd_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.fma.vfnmadd.pd.256") - }, - "_mm_fnmsub_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.fma.vfnmsub.ps") - }, - "_mm_fnmsub_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.fma.vfnmsub.pd") - }, - "_mm256_fnmsub_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x8, &::F32x8, &::F32x8]; &INPUTS }, - output: &::F32x8, - definition: Named("llvm.x86.fma.vfnmsub.ps.256") - }, - "_mm256_fnmsub_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F64x4, &::F64x4, &::F64x4]; &INPUTS }, - output: &::F64x4, - definition: Named("llvm.x86.fma.vfnmsub.pd.256") - }, - "_rdrand16_step" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U16, &::I32]; &PARTS }); &AGG }, - definition: Named("llvm.x86.rdrand.16") - }, - "_rdrand32_step" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U32, &::I32]; &PARTS }); &AGG }, - definition: Named("llvm.x86.rdrand.32") - }, - "_rdrand64_step" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U64, &::I32]; &PARTS }); &AGG }, - definition: Named("llvm.x86.rdrand.64") - }, - "_rdseed16_step" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U16, &::I32]; &PARTS }); &AGG }, - definition: Named("llvm.x86.rdseed.16") - }, - "_rdseed32_step" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U32, &::I32]; &PARTS }); &AGG }, - definition: Named("llvm.x86.rdseed.32") - }, - "_rdseed64_step" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: { static AGG: Type = Type::Aggregate(false, { static PARTS: [&'static Type; 2] = [&::U64, &::I32]; &PARTS }); &AGG }, - definition: Named("llvm.x86.rdseed.64") - }, - "_mm_adds_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.x86.sse2.padds.b") - }, - "_mm_adds_epu8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.x86.sse2.paddus.b") - }, - "_mm_adds_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.x86.sse2.padds.w") - }, - "_mm_adds_epu16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.x86.sse2.paddus.w") - }, - "_mm_avg_epu8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.x86.sse2.pavg.b") - }, - "_mm_avg_epu16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.x86.sse2.pavg.w") - }, - "_mm_lfence" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.sse2.lfence") - }, - "_mm_madd_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.x86.sse2.pmadd.wd") - }, - "_mm_maskmoveu_si128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, &::U8x16, { static PTR: Type = Type::Pointer(&::U8, None, false); &PTR }]; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.sse2.maskmov.dqu") - }, - "_mm_max_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.x86.sse2.pmaxs.w") - }, - "_mm_max_epu8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.x86.sse2.pmaxu.b") - }, - "_mm_max_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.sse2.max.pd") - }, - "_mm_mfence" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.sse2.fence") - }, - "_mm_min_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.x86.sse2.pmins.w") - }, - "_mm_min_epu8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.x86.sse2.pminu.b") - }, - "_mm_min_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.sse2.min.pd") - }, - "_mm_movemask_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse2.movmsk.pd") - }, - "_mm_movemask_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse2.pmovmskb.128") - }, - "_mm_mul_epu32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.x86.sse2.pmulu.dq") - }, - "_mm_mulhi_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.x86.sse2.pmulh.w") - }, - "_mm_mulhi_epu16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.x86.sse2.pmulhu.w") - }, - "_mm_packs_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.x86.sse2.packsswb.128") - }, - "_mm_packs_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.x86.sse2.packssdw.128") - }, - "_mm_packus_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.x86.sse2.packuswb.128") - }, - "_mm_sad_epu8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U64x2, - definition: Named("llvm.x86.sse2.psad.bw") - }, - "_mm_sfence" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 0] = []; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.sse2.sfence") - }, - "_mm_sqrt_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.sqrt.v2f64") - }, - "_mm_storeu_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F64, Some(&::U8), false); &PTR }, &::F64x2]; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.sse2.storeu.pd") - }, - "_mm_storeu_si128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::U8x16, Some(&::U8), false); &PTR }, &::U8x16]; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.sse2.storeu.dq") - }, - "_mm_subs_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.x86.sse2.psubs.b") - }, - "_mm_subs_epu8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::U8x16]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.x86.sse2.psubus.b") - }, - "_mm_subs_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.x86.sse2.psubs.w") - }, - "_mm_subs_epu16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.x86.sse2.psubus.w") - }, - "_mm_addsub_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.sse3.addsub.ps") - }, - "_mm_addsub_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.sse3.addsub.pd") - }, - "_mm_hadd_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.sse3.hadd.ps") - }, - "_mm_hadd_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.sse3.hadd.pd") - }, - "_mm_hsub_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.sse3.hsub.ps") - }, - "_mm_hsub_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F64x2, &::F64x2]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.sse3.hsub.pd") - }, - "_mm_lddqu_si128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [{ static PTR: Type = Type::Pointer(&::U8x16, Some(&::I8), true); &PTR }]; &INPUTS }, - output: &::U8x16, - definition: Named("llvm.x86.sse3.ldu.dq") - }, - "_mm_dp_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F32x4, &::F32x4, &::I32_8]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.sse41.dpps") - }, - "_mm_dp_pd" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::F64x2, &::F64x2, &::I32_8]; &INPUTS }, - output: &::F64x2, - definition: Named("llvm.x86.sse41.dppd") - }, - "_mm_max_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.x86.sse41.pmaxsb") - }, - "_mm_max_epu16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.x86.sse41.pmaxuw") - }, - "_mm_max_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.x86.sse41.pmaxsd") - }, - "_mm_max_epu32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.x86.sse41.pmaxud") - }, - "_mm_min_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.x86.sse41.pminsb") - }, - "_mm_min_epu16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U16x8, &::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.x86.sse41.pminuw") - }, - "_mm_min_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.x86.sse41.pminsd") - }, - "_mm_min_epu32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32x4, &::U32x4]; &INPUTS }, - output: &::U32x4, - definition: Named("llvm.x86.sse41.pminud") - }, - "_mm_minpos_epu16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::U16x8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.x86.sse41.phminposuw") - }, - "_mm_mpsadbw_epu8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::U8x16, &::U8x16, &::I32_8]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.x86.sse41.mpsadbw") - }, - "_mm_mul_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I64x2, - definition: Named("llvm.x86.sse41.pmuldq") - }, - "_mm_packus_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::U16x8, - definition: Named("llvm.x86.sse41.packusdw") - }, - "_mm_testc_si128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse41.ptestc") - }, - "_mm_testnzc_si128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse41.ptestnzc") - }, - "_mm_testz_si128" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64x2, &::U64x2]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse41.ptestz") - }, - "_mm_cmpestra" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse42.pcmpestria128") - }, - "_mm_cmpestrc" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse42.pcmpestric128") - }, - "_mm_cmpestri" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse42.pcmpestri128") - }, - "_mm_cmpestrm" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.x86.sse42.pcmpestrm128") - }, - "_mm_cmpestro" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse42.pcmpestrio128") - }, - "_mm_cmpestrs" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse42.pcmpestris128") - }, - "_mm_cmpestrz" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 5] = [&::I8x16, &::I32, &::I8x16, &::I32, &::I32_8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse42.pcmpestriz128") - }, - "_mm_cmpistra" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse42.pcmpistria128") - }, - "_mm_cmpistrc" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse42.pcmpistric128") - }, - "_mm_cmpistri" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse42.pcmpistri128") - }, - "_mm_cmpistrm" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.x86.sse42.pcmpistrm128") - }, - "_mm_cmpistro" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse42.pcmpistrio128") - }, - "_mm_cmpistrs" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse42.pcmpistris128") - }, - "_mm_cmpistrz" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 3] = [&::I8x16, &::I8x16, &::I32_8]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse42.pcmpistriz128") - }, - "_mm_movemask_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::I32, - definition: Named("llvm.x86.sse.movmsk.ps") - }, - "_mm_max_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.sse.max.ps") - }, - "_mm_min_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::F32x4, &::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.sse.min.ps") - }, - "_mm_rsqrt_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.sse.rsqrt.ps") - }, - "_mm_rcp_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.x86.sse.rcp.ps") - }, - "_mm_sqrt_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::F32x4]; &INPUTS }, - output: &::F32x4, - definition: Named("llvm.sqrt.v4f32") - }, - "_mm_storeu_ps" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [{ static PTR: Type = Type::Pointer(&::F32, Some(&::I8), false); &PTR }, &::F32x4]; &INPUTS }, - output: &::VOID, - definition: Named("llvm.x86.sse.storeu.ps") - }, - "_mm_abs_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.x86.ssse3.pabs.b.128") - }, - "_mm_abs_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.x86.ssse3.pabs.w.128") - }, - "_mm_abs_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 1] = [&::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.x86.ssse3.pabs.d.128") - }, - "_mm_hadd_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.x86.ssse3.phadd.w.128") - }, - "_mm_hadd_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.x86.ssse3.phadd.d.128") - }, - "_mm_hadds_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.x86.ssse3.phadd.sw.128") - }, - "_mm_hsub_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.x86.ssse3.phsub.w.128") - }, - "_mm_hsub_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.x86.ssse3.phsub.d.128") - }, - "_mm_hsubs_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.x86.ssse3.phsub.sw.128") - }, - "_mm_maddubs_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U8x16, &::I8x16]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.x86.ssse3.pmadd.ub.sw.128") - }, - "_mm_mulhrs_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.x86.ssse3.pmul.hr.sw.128") - }, - "_mm_shuffle_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.x86.ssse3.pshuf.b.128") - }, - "_mm_sign_epi8" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I8x16, &::I8x16]; &INPUTS }, - output: &::I8x16, - definition: Named("llvm.x86.ssse3.psign.b.128") - }, - "_mm_sign_epi16" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I16x8, &::I16x8]; &INPUTS }, - output: &::I16x8, - definition: Named("llvm.x86.ssse3.psign.w.128") - }, - "_mm_sign_epi32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::I32x4, &::I32x4]; &INPUTS }, - output: &::I32x4, - definition: Named("llvm.x86.ssse3.psign.d.128") - }, - "_tbm_bextri_u32" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U32, &::U32]; &INPUTS }, - output: &::U32, - definition: Named("llvm.x86.tbm.bextri.u32") - }, - "_tbm_bextri_u64" => Intrinsic { - inputs: { static INPUTS: [&'static Type; 2] = [&::U64, &::U64]; &INPUTS }, - output: &::U64, - definition: Named("llvm.x86.tbm.bextri.u64") - }, - _ => return None, - }) -} diff --git a/src/librustc_plugin/Cargo.toml b/src/librustc_plugin/Cargo.toml index d8fa1da1ce219..5e23aa0d7f74e 100644 --- a/src/librustc_plugin/Cargo.toml +++ b/src/librustc_plugin/Cargo.toml @@ -3,6 +3,7 @@ authors = ["The Rust Project Developers"] name = "rustc_plugin" version = "0.0.0" build = false +edition = "2018" [lib] name = "rustc_plugin" diff --git a/src/librustc_plugin/build.rs b/src/librustc_plugin/build.rs index f2728593db4d9..31018a7cd7a3c 100644 --- a/src/librustc_plugin/build.rs +++ b/src/librustc_plugin/build.rs @@ -1,25 +1,15 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Used by `rustc` when compiling a plugin crate. -use syntax::ast; use syntax::attr; -use errors; use syntax_pos::Span; -use rustc::hir::map::Map; use rustc::hir::itemlikevisit::ItemLikeVisitor; use rustc::hir; +use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; +use rustc::ty::TyCtxt; +use rustc::ty::query::Providers; struct RegistrarFinder { - registrars: Vec<(ast::NodeId, Span)> , + registrars: Vec<(hir::HirId, Span)> , } impl<'v> ItemLikeVisitor<'v> for RegistrarFinder { @@ -27,7 +17,7 @@ impl<'v> ItemLikeVisitor<'v> for RegistrarFinder { if let hir::ItemKind::Fn(..) = item.node { if attr::contains_name(&item.attrs, "plugin_registrar") { - self.registrars.push((item.id, item.span)); + self.registrars.push((item.hir_id, item.span)); } } } @@ -39,22 +29,28 @@ impl<'v> ItemLikeVisitor<'v> for RegistrarFinder { } } -/// Find the function marked with `#[plugin_registrar]`, if any. -pub fn find_plugin_registrar(diagnostic: &errors::Handler, - hir_map: &Map) - -> Option { - let krate = hir_map.krate(); +/// Finds the function marked with `#[plugin_registrar]`, if any. +pub fn find_plugin_registrar<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>) -> Option { + tcx.plugin_registrar_fn(LOCAL_CRATE) +} + +fn plugin_registrar_fn<'tcx>( + tcx: TyCtxt<'_, 'tcx, 'tcx>, + cnum: CrateNum, +) -> Option { + assert_eq!(cnum, LOCAL_CRATE); let mut finder = RegistrarFinder { registrars: Vec::new() }; - krate.visit_all_item_likes(&mut finder); + tcx.hir().krate().visit_all_item_likes(&mut finder); match finder.registrars.len() { 0 => None, 1 => { - let (node_id, _) = finder.registrars.pop().unwrap(); - Some(node_id) + let (hir_id, _) = finder.registrars.pop().unwrap(); + Some(tcx.hir().local_def_id_from_hir_id(hir_id)) }, _ => { + let diagnostic = tcx.sess.diagnostic(); let mut e = diagnostic.struct_err("multiple plugin registration functions found"); for &(_, span) in &finder.registrars { e.span_note(span, "one is here"); @@ -65,3 +61,11 @@ pub fn find_plugin_registrar(diagnostic: &errors::Handler, } } } + + +pub fn provide(providers: &mut Providers<'_>) { + *providers = Providers { + plugin_registrar_fn, + ..*providers + }; +} diff --git a/src/librustc_plugin/diagnostics.rs b/src/librustc_plugin/diagnostics.rs index 100c1db143974..68462bd83ef60 100644 --- a/src/librustc_plugin/diagnostics.rs +++ b/src/librustc_plugin/diagnostics.rs @@ -1,15 +1,7 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_snake_case)] +use syntax::{register_diagnostic, register_diagnostics, register_long_diagnostics}; + register_long_diagnostics! { } diff --git a/src/librustc_plugin/lib.rs b/src/librustc_plugin/lib.rs index 2cc3f1efcb5f4..351ba7f04d3b1 100644 --- a/src/librustc_plugin/lib.rs +++ b/src/librustc_plugin/lib.rs @@ -1,20 +1,10 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Infrastructure for compiler plugins. //! //! Plugins are Rust libraries which extend the behavior of `rustc` //! in various ways. //! //! Plugin authors will use the `Registry` type re-exported by -//! this module, along with its methods. The rest of the module +//! this module, along with its methods. The rest of the module //! is for use by `rustc` itself. //! //! To define a plugin, build a dylib crate with a @@ -60,21 +50,16 @@ //! See the [`plugin` feature](../unstable-book/language-features/plugin.html) of //! the Unstable Book for more examples. -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![feature(nll)] #![feature(rustc_diagnostic_macros)] -#[macro_use] extern crate syntax; +#![recursion_limit="256"] -extern crate rustc; -extern crate rustc_metadata; -extern crate syntax_pos; -extern crate rustc_errors as errors; +#![deny(rust_2018_idioms)] -pub use self::registry::Registry; +pub use registry::Registry; mod diagnostics; pub mod registry; diff --git a/src/librustc_plugin/load.rs b/src/librustc_plugin/load.rs index ad55672fb47de..8b86bddb29f4e 100644 --- a/src/librustc_plugin/load.rs +++ b/src/librustc_plugin/load.rs @@ -1,30 +1,21 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Used by `rustc` when loading a plugin. use rustc::session::Session; use rustc_metadata::creader::CrateLoader; use rustc_metadata::cstore::CStore; -use registry::Registry; +use crate::registry::Registry; use std::borrow::ToOwned; use std::env; use std::mem; use std::path::PathBuf; use syntax::ast; +use syntax::span_err; use syntax_pos::{Span, DUMMY_SP}; /// Pointer to a registrar function. pub type PluginRegistrarFun = - fn(&mut Registry); + fn(&mut Registry<'_>); pub struct PluginRegistrar { pub fun: PluginRegistrarFun, @@ -60,20 +51,17 @@ pub fn load_plugins(sess: &Session, let plugins = match attr.meta_item_list() { Some(xs) => xs, - None => { - call_malformed_plugin_attribute(sess, attr.span); - continue; - } + None => continue, }; for plugin in plugins { // plugins must have a name and can't be key = value - match plugin.name() { - Some(name) if !plugin.is_value_str() => { - let args = plugin.meta_item_list().map(ToOwned::to_owned); - loader.load_plugin(plugin.span, &name.as_str(), args.unwrap_or_default()); - }, - _ => call_malformed_plugin_attribute(sess, attr.span), + let name = plugin.name_or_empty(); + if !name.is_empty() && !plugin.is_value_str() { + let args = plugin.meta_item_list().map(ToOwned::to_owned); + loader.load_plugin(plugin.span(), &name, args.unwrap_or_default()); + } else { + call_malformed_plugin_attribute(sess, attr.span); } } } diff --git a/src/librustc_plugin/registry.rs b/src/librustc_plugin/registry.rs index 2fb28a8a66f97..5c5b6f232b271 100644 --- a/src/librustc_plugin/registry.rs +++ b/src/librustc_plugin/registry.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Used by plugin crates to tell `rustc` about the plugins they provide. use rustc::lint::{EarlyLintPassObject, LateLintPassObject, LintId, Lint}; @@ -78,7 +68,7 @@ impl<'a> Registry<'a> { } } - /// Get the plugin's arguments, if any. + /// Gets the plugin's arguments, if any. /// /// These are specified inside the `plugin` crate attribute as /// @@ -120,8 +110,8 @@ impl<'a> Registry<'a> { edition, } } - IdentTT(ext, _, allow_internal_unstable) => { - IdentTT(ext, Some(self.krate_span), allow_internal_unstable) + IdentTT { expander, span: _, allow_internal_unstable } => { + IdentTT { expander, span: Some(self.krate_span), allow_internal_unstable } } _ => extension, })); @@ -136,7 +126,7 @@ impl<'a> Registry<'a> { self.register_syntax_extension(Symbol::intern(name), NormalTT { expander: Box::new(expander), def_info: None, - allow_internal_unstable: false, + allow_internal_unstable: None, allow_internal_unsafe: false, local_inner_macros: false, unstable_feature: None, diff --git a/src/librustc_privacy/Cargo.toml b/src/librustc_privacy/Cargo.toml index 62eab40f3ec9a..5bf8024c56911 100644 --- a/src/librustc_privacy/Cargo.toml +++ b/src/librustc_privacy/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustc_privacy" version = "0.0.0" +edition = "2018" [lib] name = "rustc_privacy" @@ -13,4 +14,5 @@ rustc = { path = "../librustc" } rustc_typeck = { path = "../librustc_typeck" } syntax = { path = "../libsyntax" } syntax_pos = { path = "../libsyntax_pos" } -rustc_data_structures = { path = "../librustc_data_structures" } \ No newline at end of file +rustc_data_structures = { path = "../librustc_data_structures" } +log = "0.4" diff --git a/src/librustc_privacy/diagnostics.rs b/src/librustc_privacy/diagnostics.rs index f8559954db12b..fa4df53e47b31 100644 --- a/src/librustc_privacy/diagnostics.rs +++ b/src/librustc_privacy/diagnostics.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_snake_case)] register_long_diagnostics! { diff --git a/src/librustc_privacy/lib.rs b/src/librustc_privacy/lib.rs index 86e3b231fc7ff..9a8970b2935e0 100644 --- a/src/librustc_privacy/lib.rs +++ b/src/librustc_privacy/lib.rs @@ -1,51 +1,339 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] + +#![deny(rust_2018_idioms)] +#![cfg_attr(not(stage0), deny(internal))] #![feature(nll)] #![feature(rustc_diagnostic_macros)] #![recursion_limit="256"] -#[macro_use] extern crate rustc; #[macro_use] extern crate syntax; -extern crate rustc_typeck; -extern crate syntax_pos; -extern crate rustc_data_structures; -use rustc::hir::{self, Node, PatKind}; +use rustc::bug; +use rustc::hir::{self, Node, PatKind, AssociatedItemKind}; use rustc::hir::def::Def; use rustc::hir::def_id::{CRATE_DEF_INDEX, LOCAL_CRATE, CrateNum, DefId}; use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; use rustc::hir::itemlikevisit::DeepVisitor; use rustc::lint; use rustc::middle::privacy::{AccessLevel, AccessLevels}; -use rustc::ty::{self, TyCtxt, Ty, TypeFoldable, GenericParamDefKind}; +use rustc::ty::{self, TyCtxt, Ty, TraitRef, TypeFoldable, GenericParamDefKind}; use rustc::ty::fold::TypeVisitor; use rustc::ty::query::Providers; -use rustc::ty::subst::UnpackedKind; -use rustc::util::nodemap::NodeSet; +use rustc::ty::subst::InternalSubsts; +use rustc::util::nodemap::HirIdSet; use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::sync::Lrc; -use syntax::ast::{self, CRATE_NODE_ID, Ident}; +use syntax::ast::Ident; +use syntax::attr; use syntax::symbol::keywords; use syntax_pos::Span; -use std::cmp; -use std::mem::replace; +use std::{cmp, fmt, mem}; +use std::marker::PhantomData; mod diagnostics; +//////////////////////////////////////////////////////////////////////////////// +/// Generic infrastructure used to implement specific visitors below. +//////////////////////////////////////////////////////////////////////////////// + +/// Implemented to visit all `DefId`s in a type. +/// Visiting `DefId`s is useful because visibilities and reachabilities are attached to them. +/// The idea is to visit "all components of a type", as documented in +/// https://github.com/rust-lang/rfcs/blob/master/text/2145-type-privacy.md#how-to-determine-visibility-of-a-type. +/// The default type visitor (`TypeVisitor`) does most of the job, but it has some shortcomings. +/// First, it doesn't have overridable `fn visit_trait_ref`, so we have to catch trait `DefId`s +/// manually. Second, it doesn't visit some type components like signatures of fn types, or traits +/// in `impl Trait`, see individual comments in `DefIdVisitorSkeleton::visit_ty`. +trait DefIdVisitor<'a, 'tcx: 'a> { + fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx>; + fn shallow(&self) -> bool { false } + fn skip_assoc_tys(&self) -> bool { false } + fn visit_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool; + + /// Not overridden, but used to actually visit types and traits. + fn skeleton(&mut self) -> DefIdVisitorSkeleton<'_, 'a, 'tcx, Self> { + DefIdVisitorSkeleton { + def_id_visitor: self, + visited_opaque_tys: Default::default(), + dummy: Default::default(), + } + } + fn visit(&mut self, ty_fragment: impl TypeFoldable<'tcx>) -> bool { + ty_fragment.visit_with(&mut self.skeleton()) + } + fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> bool { + self.skeleton().visit_trait(trait_ref) + } + fn visit_predicates(&mut self, predicates: Lrc>) -> bool { + self.skeleton().visit_predicates(predicates) + } +} + +struct DefIdVisitorSkeleton<'v, 'a, 'tcx, V> + where V: DefIdVisitor<'a, 'tcx> + ?Sized +{ + def_id_visitor: &'v mut V, + visited_opaque_tys: FxHashSet, + dummy: PhantomData>, +} + +impl<'a, 'tcx, V> DefIdVisitorSkeleton<'_, 'a, 'tcx, V> + where V: DefIdVisitor<'a, 'tcx> + ?Sized +{ + fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> bool { + let TraitRef { def_id, substs } = trait_ref; + self.def_id_visitor.visit_def_id(def_id, "trait", &trait_ref) || + (!self.def_id_visitor.shallow() && substs.visit_with(self)) + } + + fn visit_predicates(&mut self, predicates: Lrc>) -> bool { + let ty::GenericPredicates { parent: _, predicates } = &*predicates; + for (predicate, _span) in predicates { + match predicate { + ty::Predicate::Trait(poly_predicate) => { + let ty::TraitPredicate { trait_ref } = *poly_predicate.skip_binder(); + if self.visit_trait(trait_ref) { + return true; + } + } + ty::Predicate::Projection(poly_predicate) => { + let ty::ProjectionPredicate { projection_ty, ty } = + *poly_predicate.skip_binder(); + if ty.visit_with(self) { + return true; + } + if self.visit_trait(projection_ty.trait_ref(self.def_id_visitor.tcx())) { + return true; + } + } + ty::Predicate::TypeOutlives(poly_predicate) => { + let ty::OutlivesPredicate(ty, _region) = *poly_predicate.skip_binder(); + if ty.visit_with(self) { + return true; + } + } + ty::Predicate::RegionOutlives(..) => {}, + _ => bug!("unexpected predicate: {:?}", predicate), + } + } + false + } +} + +impl<'a, 'tcx, V> TypeVisitor<'tcx> for DefIdVisitorSkeleton<'_, 'a, 'tcx, V> + where V: DefIdVisitor<'a, 'tcx> + ?Sized +{ + fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool { + let tcx = self.def_id_visitor.tcx(); + // InternalSubsts are not visited here because they are visited below in `super_visit_with`. + match ty.sty { + ty::Adt(&ty::AdtDef { did: def_id, .. }, ..) | + ty::Foreign(def_id) | + ty::FnDef(def_id, ..) | + ty::Closure(def_id, ..) | + ty::Generator(def_id, ..) => { + if self.def_id_visitor.visit_def_id(def_id, "type", &ty) { + return true; + } + if self.def_id_visitor.shallow() { + return false; + } + // Default type visitor doesn't visit signatures of fn types. + // Something like `fn() -> Priv {my_func}` is considered a private type even if + // `my_func` is public, so we need to visit signatures. + if let ty::FnDef(..) = ty.sty { + if tcx.fn_sig(def_id).visit_with(self) { + return true; + } + } + // Inherent static methods don't have self type in substs. + // Something like `fn() {my_method}` type of the method + // `impl Pub { pub fn my_method() {} }` is considered a private type, + // so we need to visit the self type additionally. + if let Some(assoc_item) = tcx.opt_associated_item(def_id) { + if let ty::ImplContainer(impl_def_id) = assoc_item.container { + if tcx.type_of(impl_def_id).visit_with(self) { + return true; + } + } + } + } + ty::Projection(proj) | ty::UnnormalizedProjection(proj) => { + if self.def_id_visitor.skip_assoc_tys() { + // Visitors searching for minimal visibility/reachability want to + // conservatively approximate associated types like `::Alias` + // as visible/reachable even if both `Type` and `Trait` are private. + // Ideally, associated types should be substituted in the same way as + // free type aliases, but this isn't done yet. + return false; + } + // This will also visit substs if necessary, so we don't need to recurse. + return self.visit_trait(proj.trait_ref(tcx)); + } + ty::Dynamic(predicates, ..) => { + // All traits in the list are considered the "primary" part of the type + // and are visited by shallow visitors. + for predicate in *predicates.skip_binder() { + let trait_ref = match *predicate { + ty::ExistentialPredicate::Trait(trait_ref) => trait_ref, + ty::ExistentialPredicate::Projection(proj) => proj.trait_ref(tcx), + ty::ExistentialPredicate::AutoTrait(def_id) => + ty::ExistentialTraitRef { def_id, substs: InternalSubsts::empty() }, + }; + let ty::ExistentialTraitRef { def_id, substs: _ } = trait_ref; + if self.def_id_visitor.visit_def_id(def_id, "trait", &trait_ref) { + return true; + } + } + } + ty::Opaque(def_id, ..) => { + // Skip repeated `Opaque`s to avoid infinite recursion. + if self.visited_opaque_tys.insert(def_id) { + // The intent is to treat `impl Trait1 + Trait2` identically to + // `dyn Trait1 + Trait2`. Therefore we ignore def-id of the opaque type itself + // (it either has no visibility, or its visibility is insignificant, like + // visibilities of type aliases) and recurse into predicates instead to go + // through the trait list (default type visitor doesn't visit those traits). + // All traits in the list are considered the "primary" part of the type + // and are visited by shallow visitors. + if self.visit_predicates(tcx.predicates_of(def_id)) { + return true; + } + } + } + // These types don't have their own def-ids (but may have subcomponents + // with def-ids that should be visited recursively). + ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) | + ty::Float(..) | ty::Str | ty::Never | + ty::Array(..) | ty::Slice(..) | ty::Tuple(..) | + ty::RawPtr(..) | ty::Ref(..) | ty::FnPtr(..) | + ty::Param(..) | ty::Error | ty::GeneratorWitness(..) => {} + ty::Bound(..) | ty::Placeholder(..) | ty::Infer(..) => + bug!("unexpected type: {:?}", ty), + } + + !self.def_id_visitor.shallow() && ty.super_visit_with(self) + } +} + +fn def_id_visibility<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) + -> (ty::Visibility, Span, &'static str) { + match tcx.hir().as_local_hir_id(def_id) { + Some(hir_id) => { + let vis = match tcx.hir().get_by_hir_id(hir_id) { + Node::Item(item) => &item.vis, + Node::ForeignItem(foreign_item) => &foreign_item.vis, + Node::TraitItem(..) | Node::Variant(..) => { + return def_id_visibility(tcx, tcx.hir().get_parent_did_by_hir_id(hir_id)); + } + Node::ImplItem(impl_item) => { + match tcx.hir().get_by_hir_id(tcx.hir().get_parent_item(hir_id)) { + Node::Item(item) => match &item.node { + hir::ItemKind::Impl(.., None, _, _) => &impl_item.vis, + hir::ItemKind::Impl(.., Some(trait_ref), _, _) + => return def_id_visibility(tcx, trait_ref.path.def.def_id()), + kind => bug!("unexpected item kind: {:?}", kind), + } + node => bug!("unexpected node kind: {:?}", node), + } + } + Node::Ctor(vdata) => { + let parent_hir_id = tcx.hir().get_parent_node_by_hir_id(hir_id); + match tcx.hir().get_by_hir_id(parent_hir_id) { + Node::Variant(..) => { + let parent_did = tcx.hir().local_def_id_from_hir_id(parent_hir_id); + let (mut ctor_vis, mut span, mut descr) = def_id_visibility( + tcx, parent_did, + ); + + let adt_def = tcx.adt_def(tcx.hir().get_parent_did_by_hir_id(hir_id)); + let ctor_did = tcx.hir().local_def_id_from_hir_id( + vdata.ctor_hir_id().unwrap()); + let variant = adt_def.variant_with_ctor_id(ctor_did); + + if variant.is_field_list_non_exhaustive() && + ctor_vis == ty::Visibility::Public + { + ctor_vis = ty::Visibility::Restricted( + DefId::local(CRATE_DEF_INDEX)); + let attrs = tcx.get_attrs(variant.def_id); + span = attr::find_by_name(&attrs, "non_exhaustive").unwrap().span; + descr = "crate-visible"; + } + + return (ctor_vis, span, descr); + } + Node::Item(..) => { + let item = match tcx.hir().get_by_hir_id(parent_hir_id) { + Node::Item(item) => item, + node => bug!("unexpected node kind: {:?}", node), + }; + let (mut ctor_vis, mut span, mut descr) = + (ty::Visibility::from_hir(&item.vis, parent_hir_id, tcx), + item.vis.span, item.vis.node.descr()); + for field in vdata.fields() { + let field_vis = ty::Visibility::from_hir(&field.vis, hir_id, tcx); + if ctor_vis.is_at_least(field_vis, tcx) { + ctor_vis = field_vis; + span = field.vis.span; + descr = field.vis.node.descr(); + } + } + + // If the structure is marked as non_exhaustive then lower the + // visibility to within the crate. + if ctor_vis == ty::Visibility::Public { + let adt_def = + tcx.adt_def(tcx.hir().get_parent_did_by_hir_id(hir_id)); + if adt_def.non_enum_variant().is_field_list_non_exhaustive() { + ctor_vis = + ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX)); + span = attr::find_by_name(&item.attrs, "non_exhaustive") + .unwrap().span; + descr = "crate-visible"; + } + } + + return (ctor_vis, span, descr); + } + node => bug!("unexpected node kind: {:?}", node), + } + } + Node::Expr(expr) => { + return (ty::Visibility::Restricted( + tcx.hir().get_module_parent_by_hir_id(expr.hir_id)), + expr.span, "private") + } + node => bug!("unexpected node kind: {:?}", node) + }; + (ty::Visibility::from_hir(vis, hir_id, tcx), vis.span, vis.node.descr()) + } + None => { + let vis = tcx.visibility(def_id); + let descr = if vis == ty::Visibility::Public { "public" } else { "private" }; + (vis, tcx.def_span(def_id), descr) + } + } +} + +// Set the correct `TypeckTables` for the given `item_id` (or an empty table if +// there is no `TypeckTables` for the item). +fn item_tables<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + hir_id: hir::HirId, + empty_tables: &'a ty::TypeckTables<'tcx>) + -> &'a ty::TypeckTables<'tcx> { + let def_id = tcx.hir().local_def_id_from_hir_id(hir_id); + if tcx.has_typeck_tables(def_id) { tcx.typeck_tables_of(def_id) } else { empty_tables } +} + +fn min<'a, 'tcx>(vis1: ty::Visibility, vis2: ty::Visibility, tcx: TyCtxt<'a, 'tcx, 'tcx>) + -> ty::Visibility { + if vis1.is_at_least(vis2, tcx) { vis2 } else { vis1 } +} + //////////////////////////////////////////////////////////////////////////////// /// Visitor used to determine if pub(restricted) is used anywhere in the crate. /// @@ -67,7 +355,72 @@ impl<'a, 'tcx> Visitor<'tcx> for PubRestrictedVisitor<'a, 'tcx> { } //////////////////////////////////////////////////////////////////////////////// -/// The embargo visitor, used to determine the exports of the ast +/// Visitor used to determine impl visibility and reachability. +//////////////////////////////////////////////////////////////////////////////// + +struct FindMin<'a, 'tcx, VL: VisibilityLike> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, + access_levels: &'a AccessLevels, + min: VL, +} + +impl<'a, 'tcx, VL: VisibilityLike> DefIdVisitor<'a, 'tcx> for FindMin<'a, 'tcx, VL> { + fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.tcx } + fn shallow(&self) -> bool { VL::SHALLOW } + fn skip_assoc_tys(&self) -> bool { true } + fn visit_def_id(&mut self, def_id: DefId, _kind: &str, _descr: &dyn fmt::Display) -> bool { + self.min = VL::new_min(self, def_id); + false + } +} + +trait VisibilityLike: Sized { + const MAX: Self; + const SHALLOW: bool = false; + fn new_min<'a, 'tcx>(find: &FindMin<'a, 'tcx, Self>, def_id: DefId) -> Self; + + // Returns an over-approximation (`skip_assoc_tys` = true) of visibility due to + // associated types for which we can't determine visibility precisely. + fn of_impl<'a, 'tcx>(hir_id: hir::HirId, tcx: TyCtxt<'a, 'tcx, 'tcx>, + access_levels: &'a AccessLevels) -> Self { + let mut find = FindMin { tcx, access_levels, min: Self::MAX }; + let def_id = tcx.hir().local_def_id_from_hir_id(hir_id); + find.visit(tcx.type_of(def_id)); + if let Some(trait_ref) = tcx.impl_trait_ref(def_id) { + find.visit_trait(trait_ref); + } + find.min + } +} +impl VisibilityLike for ty::Visibility { + const MAX: Self = ty::Visibility::Public; + fn new_min<'a, 'tcx>(find: &FindMin<'a, 'tcx, Self>, def_id: DefId) -> Self { + min(def_id_visibility(find.tcx, def_id).0, find.min, find.tcx) + } +} +impl VisibilityLike for Option { + const MAX: Self = Some(AccessLevel::Public); + // Type inference is very smart sometimes. + // It can make an impl reachable even some components of its type or trait are unreachable. + // E.g. methods of `impl ReachableTrait for ReachableTy { ... }` + // can be usable from other crates (#57264). So we skip substs when calculating reachability + // and consider an impl reachable if its "shallow" type and trait are reachable. + // + // The assumption we make here is that type-inference won't let you use an impl without knowing + // both "shallow" version of its self type and "shallow" version of its trait if it exists + // (which require reaching the `DefId`s in them). + const SHALLOW: bool = true; + fn new_min<'a, 'tcx>(find: &FindMin<'a, 'tcx, Self>, def_id: DefId) -> Self { + cmp::min(if let Some(hir_id) = find.tcx.hir().as_local_hir_id(def_id) { + find.access_levels.map.get(&hir_id).cloned() + } else { + Self::MAX + }, find.min) + } +} + +//////////////////////////////////////////////////////////////////////////////// +/// The embargo visitor, used to determine the exports of the AST. //////////////////////////////////////////////////////////////////////////////// struct EmbargoVisitor<'a, 'tcx: 'a> { @@ -88,36 +441,12 @@ struct ReachEverythingInTheInterfaceVisitor<'b, 'a: 'b, 'tcx: 'a> { } impl<'a, 'tcx> EmbargoVisitor<'a, 'tcx> { - fn item_ty_level(&self, item_def_id: DefId) -> Option { - let ty_def_id = match self.tcx.type_of(item_def_id).sty { - ty::Adt(adt, _) => adt.did, - ty::Foreign(did) => did, - ty::Dynamic(ref obj, ..) => obj.principal().def_id(), - ty::Projection(ref proj) => proj.trait_ref(self.tcx).def_id, - _ => return Some(AccessLevel::Public) - }; - if let Some(node_id) = self.tcx.hir().as_local_node_id(ty_def_id) { - self.get(node_id) - } else { - Some(AccessLevel::Public) - } - } - - fn impl_trait_level(&self, impl_def_id: DefId) -> Option { - if let Some(trait_ref) = self.tcx.impl_trait_ref(impl_def_id) { - if let Some(node_id) = self.tcx.hir().as_local_node_id(trait_ref.def_id) { - return self.get(node_id); - } - } - Some(AccessLevel::Public) - } - - fn get(&self, id: ast::NodeId) -> Option { + fn get(&self, id: hir::HirId) -> Option { self.access_levels.map.get(&id).cloned() } // Updates node level and returns the updated level. - fn update(&mut self, id: ast::NodeId, level: Option) -> Option { + fn update(&mut self, id: hir::HirId, level: Option) -> Option { let old_level = self.get(id); // Accessibility levels can only grow. if level > old_level { @@ -129,14 +458,51 @@ impl<'a, 'tcx> EmbargoVisitor<'a, 'tcx> { } } - fn reach<'b>(&'b mut self, item_id: ast::NodeId) - -> ReachEverythingInTheInterfaceVisitor<'b, 'a, 'tcx> { + fn reach(&mut self, item_id: hir::HirId, access_level: Option) + -> ReachEverythingInTheInterfaceVisitor<'_, 'a, 'tcx> { ReachEverythingInTheInterfaceVisitor { - access_level: self.prev_level.map(|l| l.min(AccessLevel::Reachable)), - item_def_id: self.tcx.hir().local_def_id(item_id), + access_level: cmp::min(access_level, Some(AccessLevel::Reachable)), + item_def_id: self.tcx.hir().local_def_id_from_hir_id(item_id), ev: self, } } + + + /// Given the path segments of a `ItemKind::Use`, then we need + /// to update the visibility of the intermediate use so that it isn't linted + /// by `unreachable_pub`. + /// + /// This isn't trivial as `path.def` has the `DefId` of the eventual target + /// of the use statement not of the next intermediate use statement. + /// + /// To do this, consider the last two segments of the path to our intermediate + /// use statement. We expect the penultimate segment to be a module and the + /// last segment to be the name of the item we are exporting. We can then + /// look at the items contained in the module for the use statement with that + /// name and update that item's visibility. + /// + /// FIXME: This solution won't work with glob imports and doesn't respect + /// namespaces. See . + fn update_visibility_of_intermediate_use_statements(&mut self, segments: &[hir::PathSegment]) { + if let Some([module, segment]) = segments.rchunks_exact(2).next() { + if let Some(item) = module.def + .and_then(|def| def.mod_def_id()) + .and_then(|def_id| self.tcx.hir().as_local_hir_id(def_id)) + .map(|module_hir_id| self.tcx.hir().expect_item_by_hir_id(module_hir_id)) + { + if let hir::ItemKind::Mod(m) = &item.node { + for item_id in m.item_ids.as_ref() { + let item = self.tcx.hir().expect_item_by_hir_id(item_id.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(item_id.id); + if !self.tcx.hygienic_eq(segment.ident, item.ident, def_id) { continue; } + if let hir::ItemKind::Use(..) = item.node { + self.update(item.hir_id, Some(AccessLevel::Exported)); + } + } + } + } + } + } } impl<'a, 'tcx> Visitor<'tcx> for EmbargoVisitor<'a, 'tcx> { @@ -148,15 +514,10 @@ impl<'a, 'tcx> Visitor<'tcx> for EmbargoVisitor<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx hir::Item) { let inherited_item_level = match item.node { - // Impls inherit level from their types and traits. - hir::ItemKind::Impl(..) => { - let def_id = self.tcx.hir().local_def_id(item.id); - cmp::min(self.item_ty_level(def_id), self.impl_trait_level(def_id)) - } + hir::ItemKind::Impl(..) => + Option::::of_impl(item.hir_id, self.tcx, &self.access_levels), // Foreign modules inherit level from parents. - hir::ItemKind::ForeignMod(..) => { - self.prev_level - } + hir::ItemKind::ForeignMod(..) => self.prev_level, // Other `pub` items inherit levels from parents. hir::ItemKind::Const(..) | hir::ItemKind::Enum(..) | hir::ItemKind::ExternCrate(..) | hir::ItemKind::GlobalAsm(..) | hir::ItemKind::Fn(..) | hir::ItemKind::Mod(..) | @@ -169,61 +530,51 @@ impl<'a, 'tcx> Visitor<'tcx> for EmbargoVisitor<'a, 'tcx> { }; // Update level of the item itself. - let item_level = self.update(item.id, inherited_item_level); + let item_level = self.update(item.hir_id, inherited_item_level); // Update levels of nested things. match item.node { hir::ItemKind::Enum(ref def, _) => { for variant in &def.variants { - let variant_level = self.update(variant.node.data.id(), item_level); + let variant_level = self.update(variant.node.id, item_level); + if let Some(ctor_hir_id) = variant.node.data.ctor_hir_id() { + self.update(ctor_hir_id, item_level); + } for field in variant.node.data.fields() { - self.update(field.id, variant_level); + self.update(field.hir_id, variant_level); } } } - hir::ItemKind::Impl(.., None, _, ref impl_item_refs) => { + hir::ItemKind::Impl(.., ref trait_ref, _, ref impl_item_refs) => { for impl_item_ref in impl_item_refs { - if impl_item_ref.vis.node.is_pub() { - self.update(impl_item_ref.id.node_id, item_level); + if trait_ref.is_some() || impl_item_ref.vis.node.is_pub() { + self.update(impl_item_ref.id.hir_id, item_level); } } } - hir::ItemKind::Impl(.., Some(_), _, ref impl_item_refs) => { - for impl_item_ref in impl_item_refs { - self.update(impl_item_ref.id.node_id, item_level); - } - } hir::ItemKind::Trait(.., ref trait_item_refs) => { for trait_item_ref in trait_item_refs { - self.update(trait_item_ref.id.node_id, item_level); + self.update(trait_item_ref.id.hir_id, item_level); } } hir::ItemKind::Struct(ref def, _) | hir::ItemKind::Union(ref def, _) => { - if !def.is_struct() { - self.update(def.id(), item_level); + if let Some(ctor_hir_id) = def.ctor_hir_id() { + self.update(ctor_hir_id, item_level); } for field in def.fields() { if field.vis.node.is_pub() { - self.update(field.id, item_level); + self.update(field.hir_id, item_level); } } } hir::ItemKind::ForeignMod(ref foreign_mod) => { for foreign_item in &foreign_mod.items { if foreign_item.vis.node.is_pub() { - self.update(foreign_item.id, item_level); - } - } - } - // Impl trait return types mark their parent function. - // It (and its children) are revisited if the change applies. - hir::ItemKind::Existential(ref ty_data) => { - if let Some(impl_trait_fn) = ty_data.impl_trait_fn { - if let Some(node_id) = self.tcx.hir().as_local_node_id(impl_trait_fn) { - self.update(node_id, Some(AccessLevel::ReachableFromImplTrait)); + self.update(foreign_item.hir_id, item_level); } } } + hir::ItemKind::Existential(..) | hir::ItemKind::Use(..) | hir::ItemKind::Static(..) | hir::ItemKind::Const(..) | @@ -235,43 +586,45 @@ impl<'a, 'tcx> Visitor<'tcx> for EmbargoVisitor<'a, 'tcx> { hir::ItemKind::ExternCrate(..) => {} } - // Store this node's access level here to propagate the correct - // reachability level through interfaces and children. - let orig_level = replace(&mut self.prev_level, item_level); - // Mark all items in interfaces of reachable items as reachable. match item.node { // The interface is empty. hir::ItemKind::ExternCrate(..) => {} // All nested items are checked by `visit_item`. hir::ItemKind::Mod(..) => {} - // Re-exports are handled in `visit_mod`. - hir::ItemKind::Use(..) => {} - // The interface is empty. - hir::ItemKind::GlobalAsm(..) => {} - hir::ItemKind::Existential(hir::ExistTy { impl_trait_fn: Some(_), .. }) => { + // Re-exports are handled in `visit_mod`. However, in order to avoid looping over + // all of the items of a mod in `visit_mod` looking for use statements, we handle + // making sure that intermediate use statements have their visibilities updated here. + hir::ItemKind::Use(ref path, _) => { if item_level.is_some() { - // Reach the (potentially private) type and the API being exposed. - self.reach(item.id).ty().predicates(); + self.update_visibility_of_intermediate_use_statements(path.segments.as_ref()); } } + // The interface is empty. + hir::ItemKind::GlobalAsm(..) => {} + hir::ItemKind::Existential(..) => { + // FIXME: This is some serious pessimization intended to workaround deficiencies + // in the reachability pass (`middle/reachable.rs`). Types are marked as link-time + // reachable if they are returned via `impl Trait`, even from private functions. + let exist_level = cmp::max(item_level, Some(AccessLevel::ReachableFromImplTrait)); + self.reach(item.hir_id, exist_level).generics().predicates().ty(); + } // Visit everything. hir::ItemKind::Const(..) | hir::ItemKind::Static(..) | - hir::ItemKind::Existential(..) | hir::ItemKind::Fn(..) | hir::ItemKind::Ty(..) => { if item_level.is_some() { - self.reach(item.id).generics().predicates().ty(); + self.reach(item.hir_id, item_level).generics().predicates().ty(); } } hir::ItemKind::Trait(.., ref trait_item_refs) => { if item_level.is_some() { - self.reach(item.id).generics().predicates(); + self.reach(item.hir_id, item_level).generics().predicates(); for trait_item_ref in trait_item_refs { - let mut reach = self.reach(trait_item_ref.id.node_id); + let mut reach = self.reach(trait_item_ref.id.hir_id, item_level); reach.generics().predicates(); - if trait_item_ref.kind == hir::AssociatedItemKind::Type && + if trait_item_ref.kind == AssociatedItemKind::Type && !trait_item_ref.defaultness.has_value() { // No type to visit. } else { @@ -282,18 +635,19 @@ impl<'a, 'tcx> Visitor<'tcx> for EmbargoVisitor<'a, 'tcx> { } hir::ItemKind::TraitAlias(..) => { if item_level.is_some() { - self.reach(item.id).generics().predicates(); + self.reach(item.hir_id, item_level).generics().predicates(); } } // Visit everything except for private impl items. - hir::ItemKind::Impl(.., ref trait_ref, _, ref impl_item_refs) => { + hir::ItemKind::Impl(.., ref impl_item_refs) => { if item_level.is_some() { - self.reach(item.id).generics().predicates().impl_trait_ref(); + self.reach(item.hir_id, item_level).generics().predicates().ty().trait_ref(); for impl_item_ref in impl_item_refs { - let id = impl_item_ref.id.node_id; - if trait_ref.is_some() || self.get(id).is_some() { - self.reach(id).generics().predicates().ty(); + let impl_item_level = self.get(impl_item_ref.id.hir_id); + if impl_item_level.is_some() { + self.reach(impl_item_ref.id.hir_id, impl_item_level) + .generics().predicates().ty(); } } } @@ -302,24 +656,27 @@ impl<'a, 'tcx> Visitor<'tcx> for EmbargoVisitor<'a, 'tcx> { // Visit everything, but enum variants have their own levels. hir::ItemKind::Enum(ref def, _) => { if item_level.is_some() { - self.reach(item.id).generics().predicates(); + self.reach(item.hir_id, item_level).generics().predicates(); } for variant in &def.variants { - if self.get(variant.node.data.id()).is_some() { + let variant_level = self.get(variant.node.id); + if variant_level.is_some() { for field in variant.node.data.fields() { - self.reach(field.id).ty(); + self.reach(field.hir_id, variant_level).ty(); } // Corner case: if the variant is reachable, but its // enum is not, make the enum reachable as well. - self.update(item.id, Some(AccessLevel::Reachable)); + self.update(item.hir_id, variant_level); } } } // Visit everything, but foreign items have their own levels. hir::ItemKind::ForeignMod(ref foreign_mod) => { for foreign_item in &foreign_mod.items { - if self.get(foreign_item.id).is_some() { - self.reach(foreign_item.id).generics().predicates().ty(); + let foreign_item_level = self.get(foreign_item.hir_id); + if foreign_item_level.is_some() { + self.reach(foreign_item.hir_id, foreign_item_level) + .generics().predicates().ty(); } } } @@ -327,43 +684,42 @@ impl<'a, 'tcx> Visitor<'tcx> for EmbargoVisitor<'a, 'tcx> { hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => { if item_level.is_some() { - self.reach(item.id).generics().predicates(); + self.reach(item.hir_id, item_level).generics().predicates(); for field in struct_def.fields() { - if self.get(field.id).is_some() { - self.reach(field.id).ty(); + let field_level = self.get(field.hir_id); + if field_level.is_some() { + self.reach(field.hir_id, field_level).ty(); } } } } } + let orig_level = mem::replace(&mut self.prev_level, item_level); intravisit::walk_item(self, item); - self.prev_level = orig_level; } fn visit_block(&mut self, b: &'tcx hir::Block) { - let orig_level = replace(&mut self.prev_level, None); - // Blocks can have public items, for example impls, but they always // start as completely private regardless of publicity of a function, // constant, type, field, etc., in which this block resides. + let orig_level = mem::replace(&mut self.prev_level, None); intravisit::walk_block(self, b); - self.prev_level = orig_level; } - fn visit_mod(&mut self, m: &'tcx hir::Mod, _sp: Span, id: ast::NodeId) { + fn visit_mod(&mut self, m: &'tcx hir::Mod, _sp: Span, id: hir::HirId) { // This code is here instead of in visit_item so that the // crate module gets processed as well. if self.prev_level.is_some() { - let def_id = self.tcx.hir().local_def_id(id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(id); if let Some(exports) = self.tcx.module_exports(def_id) { for export in exports.iter() { if export.vis == ty::Visibility::Public { if let Some(def_id) = export.def.opt_def_id() { - if let Some(node_id) = self.tcx.hir().as_local_node_id(def_id) { - self.update(node_id, Some(AccessLevel::Exported)); + if let Some(hir_id) = self.tcx.hir().as_local_hir_id(def_id) { + self.update(hir_id, Some(AccessLevel::Exported)); } } } @@ -376,26 +732,26 @@ impl<'a, 'tcx> Visitor<'tcx> for EmbargoVisitor<'a, 'tcx> { fn visit_macro_def(&mut self, md: &'tcx hir::MacroDef) { if md.legacy { - self.update(md.id, Some(AccessLevel::Public)); + self.update(md.hir_id, Some(AccessLevel::Public)); return } let module_did = ty::DefIdTree::parent( self.tcx, - self.tcx.hir().local_def_id(md.id) + self.tcx.hir().local_def_id_from_hir_id(md.hir_id) ).unwrap(); - let mut module_id = self.tcx.hir().as_local_node_id(module_did).unwrap(); + let mut module_id = self.tcx.hir().as_local_hir_id(module_did).unwrap(); let level = if md.vis.node.is_pub() { self.get(module_id) } else { None }; - let level = self.update(md.id, level); + let level = self.update(md.hir_id, level); if level.is_none() { return } loop { - let module = if module_id == ast::CRATE_NODE_ID { + let module = if module_id == hir::CRATE_HIR_ID { &self.tcx.hir().krate().module } else if let hir::ItemKind::Mod(ref module) = - self.tcx.hir().expect_item(module_id).node { + self.tcx.hir().expect_item_by_hir_id(module_id).node { module } else { unreachable!() @@ -403,106 +759,66 @@ impl<'a, 'tcx> Visitor<'tcx> for EmbargoVisitor<'a, 'tcx> { for id in &module.item_ids { self.update(id.id, level); } - let def_id = self.tcx.hir().local_def_id(module_id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(module_id); if let Some(exports) = self.tcx.module_exports(def_id) { for export in exports.iter() { - if let Some(node_id) = self.tcx.hir().as_local_node_id(export.def.def_id()) { - self.update(node_id, level); + if let Some(hir_id) = self.tcx.hir().as_local_hir_id(export.def.def_id()) { + self.update(hir_id, level); } } } - if module_id == ast::CRATE_NODE_ID { + if module_id == hir::CRATE_HIR_ID { break } - module_id = self.tcx.hir().get_parent_node(module_id); + module_id = self.tcx.hir().get_parent_node_by_hir_id(module_id); } } } -impl<'b, 'a, 'tcx> ReachEverythingInTheInterfaceVisitor<'b, 'a, 'tcx> { +impl<'a, 'tcx> ReachEverythingInTheInterfaceVisitor<'_, 'a, 'tcx> { fn generics(&mut self) -> &mut Self { for param in &self.ev.tcx.generics_of(self.item_def_id).params { match param.kind { + GenericParamDefKind::Lifetime => {} GenericParamDefKind::Type { has_default, .. } => { if has_default { - self.ev.tcx.type_of(param.def_id).visit_with(self); + self.visit(self.ev.tcx.type_of(param.def_id)); } } - GenericParamDefKind::Lifetime => {} + GenericParamDefKind::Const => { + self.visit(self.ev.tcx.type_of(param.def_id)); + } } } self } fn predicates(&mut self) -> &mut Self { - let predicates = self.ev.tcx.predicates_of(self.item_def_id); - for (predicate, _) in &predicates.predicates { - predicate.visit_with(self); - match predicate { - &ty::Predicate::Trait(poly_predicate) => { - self.check_trait_ref(poly_predicate.skip_binder().trait_ref); - }, - &ty::Predicate::Projection(poly_predicate) => { - let tcx = self.ev.tcx; - self.check_trait_ref( - poly_predicate.skip_binder().projection_ty.trait_ref(tcx) - ); - }, - _ => (), - }; - } + self.visit_predicates(self.ev.tcx.predicates_of(self.item_def_id)); self } fn ty(&mut self) -> &mut Self { - let ty = self.ev.tcx.type_of(self.item_def_id); - ty.visit_with(self); - if let ty::FnDef(def_id, _) = ty.sty { - if def_id == self.item_def_id { - self.ev.tcx.fn_sig(def_id).visit_with(self); - } - } + self.visit(self.ev.tcx.type_of(self.item_def_id)); self } - fn impl_trait_ref(&mut self) -> &mut Self { - if let Some(impl_trait_ref) = self.ev.tcx.impl_trait_ref(self.item_def_id) { - self.check_trait_ref(impl_trait_ref); - impl_trait_ref.super_visit_with(self); + fn trait_ref(&mut self) -> &mut Self { + if let Some(trait_ref) = self.ev.tcx.impl_trait_ref(self.item_def_id) { + self.visit_trait(trait_ref); } self } - - fn check_trait_ref(&mut self, trait_ref: ty::TraitRef<'tcx>) { - if let Some(node_id) = self.ev.tcx.hir().as_local_node_id(trait_ref.def_id) { - let item = self.ev.tcx.hir().expect_item(node_id); - self.ev.update(item.id, self.access_level); - } - } } -impl<'b, 'a, 'tcx> TypeVisitor<'tcx> for ReachEverythingInTheInterfaceVisitor<'b, 'a, 'tcx> { - fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool { - let ty_def_id = match ty.sty { - ty::Adt(adt, _) => Some(adt.did), - ty::Foreign(did) => Some(did), - ty::Dynamic(ref obj, ..) => Some(obj.principal().def_id()), - ty::Projection(ref proj) => Some(proj.item_def_id), - ty::FnDef(def_id, ..) | - ty::Closure(def_id, ..) | - ty::Generator(def_id, ..) | - ty::Opaque(def_id, _) => Some(def_id), - _ => None - }; - - if let Some(def_id) = ty_def_id { - if let Some(node_id) = self.ev.tcx.hir().as_local_node_id(def_id) { - self.ev.update(node_id, self.access_level); - } +impl<'a, 'tcx> DefIdVisitor<'a, 'tcx> for ReachEverythingInTheInterfaceVisitor<'_, 'a, 'tcx> { + fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.ev.tcx } + fn visit_def_id(&mut self, def_id: DefId, _kind: &str, _descr: &dyn fmt::Display) -> bool { + if let Some(hir_id) = self.ev.tcx.hir().as_local_hir_id(def_id) { + self.ev.update(hir_id, self.access_level); } - - ty.super_visit_with(self) + false } } @@ -516,7 +832,7 @@ impl<'b, 'a, 'tcx> TypeVisitor<'tcx> for ReachEverythingInTheInterfaceVisitor<'b struct NamePrivacyVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, tables: &'a ty::TypeckTables<'tcx>, - current_item: ast::NodeId, + current_item: hir::HirId, empty_tables: &'a ty::TypeckTables<'tcx>, } @@ -528,32 +844,17 @@ impl<'a, 'tcx> NamePrivacyVisitor<'a, 'tcx> { def: &'tcx ty::AdtDef, // definition of the struct or enum field: &'tcx ty::FieldDef) { // definition of the field let ident = Ident::new(keywords::Invalid.name(), use_ctxt); - let def_id = self.tcx.adjust_ident(ident, def.did, self.current_item).1; + let current_hir = self.current_item; + let def_id = self.tcx.adjust_ident(ident, def.did, current_hir).1; if !def.is_enum() && !field.vis.is_accessible_from(def_id, self.tcx) { struct_span_err!(self.tcx.sess, span, E0451, "field `{}` of {} `{}` is private", - field.ident, def.variant_descr(), self.tcx.item_path_str(def.did)) + field.ident, def.variant_descr(), self.tcx.def_path_str(def.did)) .span_label(span, format!("field `{}` is private", field.ident)) .emit(); } } } -// Set the correct `TypeckTables` for the given `item_id` (or an empty table if -// there is no `TypeckTables` for the item). -fn update_tables<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - item_id: ast::NodeId, - tables: &mut &'a ty::TypeckTables<'tcx>, - empty_tables: &'a ty::TypeckTables<'tcx>) - -> &'a ty::TypeckTables<'tcx> { - let def_id = tcx.hir().local_def_id(item_id); - - if tcx.has_typeck_tables(def_id) { - replace(tables, tcx.typeck_tables_of(def_id)) - } else { - replace(tables, empty_tables) - } -} - impl<'a, 'tcx> Visitor<'tcx> for NamePrivacyVisitor<'a, 'tcx> { /// We want to visit items in the context of their containing /// module and so forth, so supply a crate for doing a deep walk. @@ -561,29 +862,37 @@ impl<'a, 'tcx> Visitor<'tcx> for NamePrivacyVisitor<'a, 'tcx> { NestedVisitorMap::All(&self.tcx.hir()) } + fn visit_mod(&mut self, _m: &'tcx hir::Mod, _s: Span, _n: hir::HirId) { + // Don't visit nested modules, since we run a separate visitor walk + // for each module in `privacy_access_levels` + } + fn visit_nested_body(&mut self, body: hir::BodyId) { - let orig_tables = replace(&mut self.tables, self.tcx.body_tables(body)); + let orig_tables = mem::replace(&mut self.tables, self.tcx.body_tables(body)); let body = self.tcx.hir().body(body); self.visit_body(body); self.tables = orig_tables; } fn visit_item(&mut self, item: &'tcx hir::Item) { - let orig_current_item = replace(&mut self.current_item, item.id); - let orig_tables = update_tables(self.tcx, item.id, &mut self.tables, self.empty_tables); + let orig_current_item = mem::replace(&mut self.current_item, item.hir_id); + let orig_tables = + mem::replace(&mut self.tables, item_tables(self.tcx, item.hir_id, self.empty_tables)); intravisit::walk_item(self, item); self.current_item = orig_current_item; self.tables = orig_tables; } fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem) { - let orig_tables = update_tables(self.tcx, ti.id, &mut self.tables, self.empty_tables); + let orig_tables = + mem::replace(&mut self.tables, item_tables(self.tcx, ti.hir_id, self.empty_tables)); intravisit::walk_trait_item(self, ti); self.tables = orig_tables; } fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem) { - let orig_tables = update_tables(self.tcx, ii.id, &mut self.tables, self.empty_tables); + let orig_tables = + mem::replace(&mut self.tables, item_tables(self.tcx, ii.hir_id, self.empty_tables)); intravisit::walk_impl_item(self, ii); self.tables = orig_tables; } @@ -600,7 +909,7 @@ impl<'a, 'tcx> Visitor<'tcx> for NamePrivacyVisitor<'a, 'tcx> { // unmentioned fields, just check them all. for (vf_index, variant_field) in variant.fields.iter().enumerate() { let field = fields.iter().find(|f| { - self.tcx.field_index(f.id, self.tables) == vf_index + self.tcx.field_index(f.hir_id, self.tables) == vf_index }); let (use_ctxt, span) = match field { Some(field) => (field.ident.span, field.span), @@ -611,7 +920,7 @@ impl<'a, 'tcx> Visitor<'tcx> for NamePrivacyVisitor<'a, 'tcx> { } else { for field in fields { let use_ctxt = field.ident.span; - let index = self.tcx.field_index(field.id, self.tables); + let index = self.tcx.field_index(field.hir_id, self.tables); self.check_field(use_ctxt, field.span, adt, &variant.fields[index]); } } @@ -630,7 +939,7 @@ impl<'a, 'tcx> Visitor<'tcx> for NamePrivacyVisitor<'a, 'tcx> { let variant = adt.variant_of_def(def); for field in fields { let use_ctxt = field.node.ident.span; - let index = self.tcx.field_index(field.node.id, self.tables); + let index = self.tcx.field_index(field.node.hir_id, self.tables); self.check_field(use_ctxt, field.span, adt, &variant.fields[index]); } } @@ -654,73 +963,22 @@ struct TypePrivacyVisitor<'a, 'tcx: 'a> { in_body: bool, span: Span, empty_tables: &'a ty::TypeckTables<'tcx>, - visited_opaque_tys: FxHashSet } impl<'a, 'tcx> TypePrivacyVisitor<'a, 'tcx> { - fn def_id_visibility(&self, did: DefId) -> ty::Visibility { - match self.tcx.hir().as_local_node_id(did) { - Some(node_id) => { - let vis = match self.tcx.hir().get(node_id) { - Node::Item(item) => &item.vis, - Node::ForeignItem(foreign_item) => &foreign_item.vis, - Node::ImplItem(impl_item) => &impl_item.vis, - Node::TraitItem(..) | - Node::Variant(..) => { - return self.def_id_visibility(self.tcx.hir().get_parent_did(node_id)); - } - Node::StructCtor(vdata) => { - let struct_node_id = self.tcx.hir().get_parent(node_id); - let struct_vis = match self.tcx.hir().get(struct_node_id) { - Node::Item(item) => &item.vis, - node => bug!("unexpected node kind: {:?}", node), - }; - let mut ctor_vis - = ty::Visibility::from_hir(struct_vis, struct_node_id, self.tcx); - for field in vdata.fields() { - let field_vis = ty::Visibility::from_hir(&field.vis, node_id, self.tcx); - if ctor_vis.is_at_least(field_vis, self.tcx) { - ctor_vis = field_vis; - } - } - - // If the structure is marked as non_exhaustive then lower the - // visibility to within the crate. - let struct_def_id = self.tcx.hir().get_parent_did(node_id); - let adt_def = self.tcx.adt_def(struct_def_id); - if adt_def.non_enum_variant().is_field_list_non_exhaustive() - && ctor_vis == ty::Visibility::Public - { - ctor_vis = ty::Visibility::Restricted( - DefId::local(CRATE_DEF_INDEX)); - } - - return ctor_vis; - } - node => bug!("unexpected node kind: {:?}", node) - }; - ty::Visibility::from_hir(vis, node_id, self.tcx) - } - None => self.tcx.visibility(did), - } - } - fn item_is_accessible(&self, did: DefId) -> bool { - self.def_id_visibility(did).is_accessible_from(self.current_item, self.tcx) + def_id_visibility(self.tcx, did).0.is_accessible_from(self.current_item, self.tcx) } // Take node-id of an expression or pattern and check its type for privacy. fn check_expr_pat_type(&mut self, id: hir::HirId, span: Span) -> bool { self.span = span; - if self.tables.node_id_to_type(id).visit_with(self) { - return true; - } - if self.tables.node_substs(id).visit_with(self) { + if self.visit(self.tables.node_type(id)) || self.visit(self.tables.node_substs(id)) { return true; } if let Some(adjustments) = self.tables.adjustments().get(id) { for adjustment in adjustments { - if adjustment.target.visit_with(self) { + if self.visit(adjustment.target) { return true; } } @@ -728,14 +986,12 @@ impl<'a, 'tcx> TypePrivacyVisitor<'a, 'tcx> { false } - fn check_trait_ref(&mut self, trait_ref: ty::TraitRef<'tcx>) -> bool { - if !self.item_is_accessible(trait_ref.def_id) { - let msg = format!("trait `{}` is private", trait_ref); - self.tcx.sess.span_err(self.span, &msg); - return true; + fn check_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool { + let is_error = !self.item_is_accessible(def_id); + if is_error { + self.tcx.sess.span_err(self.span, &format!("{} `{}` is private", kind, descr)); } - - trait_ref.super_visit_with(self) + is_error } } @@ -746,9 +1002,14 @@ impl<'a, 'tcx> Visitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> { NestedVisitorMap::All(&self.tcx.hir()) } + fn visit_mod(&mut self, _m: &'tcx hir::Mod, _s: Span, _n: hir::HirId) { + // Don't visit nested modules, since we run a separate visitor walk + // for each module in `privacy_access_levels` + } + fn visit_nested_body(&mut self, body: hir::BodyId) { - let orig_tables = replace(&mut self.tables, self.tcx.body_tables(body)); - let orig_in_body = replace(&mut self.in_body, true); + let orig_tables = mem::replace(&mut self.tables, self.tcx.body_tables(body)); + let orig_in_body = mem::replace(&mut self.in_body, true); let body = self.tcx.hir().body(body); self.visit_body(body); self.tables = orig_tables; @@ -759,14 +1020,14 @@ impl<'a, 'tcx> Visitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> { self.span = hir_ty.span; if self.in_body { // Types in bodies. - if self.tables.node_id_to_type(hir_ty.hir_id).visit_with(self) { + if self.visit(self.tables.node_type(hir_ty.hir_id)) { return; } } else { // Types in signatures. // FIXME: This is very ineffective. Ideally each HIR type should be converted // into a semantic type only once and the result should be cached somehow. - if rustc_typeck::hir_ty_to_ty(self.tcx, hir_ty).visit_with(self) { + if self.visit(rustc_typeck::hir_ty_to_ty(self.tcx, hir_ty)) { return; } } @@ -781,12 +1042,13 @@ impl<'a, 'tcx> Visitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> { // The traits' privacy in bodies is already checked as a part of trait object types. let (principal, projections) = rustc_typeck::hir_trait_to_predicates(self.tcx, trait_ref); - if self.check_trait_ref(*principal.skip_binder()) { + if self.visit_trait(*principal.skip_binder()) { return; } for (poly_predicate, _) in projections { let tcx = self.tcx; - if self.check_trait_ref(poly_predicate.skip_binder().projection_ty.trait_ref(tcx)) { + if self.visit(poly_predicate.skip_binder().ty) || + self.visit_trait(poly_predicate.skip_binder().projection_ty.trait_ref(tcx)) { return; } } @@ -811,9 +1073,8 @@ impl<'a, 'tcx> Visitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> { hir::ExprKind::MethodCall(_, span, _) => { // Method calls have to be checked specially. self.span = span; - if let Some(def) = self.tables.type_dependent_defs().get(expr.hir_id) { - let def_id = def.def_id(); - if self.tcx.type_of(def_id).visit_with(self) { + if let Some(def_id) = self.tables.type_dependent_def_id(expr.hir_id) { + if self.visit(self.tcx.type_of(def_id)) { return; } } else { @@ -837,11 +1098,12 @@ impl<'a, 'tcx> Visitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> { let def = match *qpath { hir::QPath::Resolved(_, ref path) => match path.def { Def::Method(..) | Def::AssociatedConst(..) | - Def::AssociatedTy(..) | Def::Static(..) => Some(path.def), + Def::AssociatedTy(..) | Def::AssociatedExistential(..) | + Def::Static(..) => Some(path.def), _ => None, } hir::QPath::TypeRelative(..) => { - self.tables.type_dependent_defs().get(id).cloned() + self.tables.type_dependent_def(id) } }; if let Some(def) = def { @@ -884,13 +1146,11 @@ impl<'a, 'tcx> Visitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> { // Check types in item interfaces. fn visit_item(&mut self, item: &'tcx hir::Item) { - let orig_current_item = self.current_item; - let orig_tables = update_tables(self.tcx, - item.id, - &mut self.tables, - self.empty_tables); - let orig_in_body = replace(&mut self.in_body, false); - self.current_item = self.tcx.hir().local_def_id(item.id); + let orig_current_item = mem::replace(&mut self.current_item, + self.tcx.hir().local_def_id_from_hir_id(item.hir_id)); + let orig_in_body = mem::replace(&mut self.in_body, false); + let orig_tables = + mem::replace(&mut self.tables, item_tables(self.tcx, item.hir_id, self.empty_tables)); intravisit::walk_item(self, item); self.tables = orig_tables; self.in_body = orig_in_body; @@ -898,108 +1158,24 @@ impl<'a, 'tcx> Visitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> { } fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem) { - let orig_tables = update_tables(self.tcx, ti.id, &mut self.tables, self.empty_tables); + let orig_tables = + mem::replace(&mut self.tables, item_tables(self.tcx, ti.hir_id, self.empty_tables)); intravisit::walk_trait_item(self, ti); self.tables = orig_tables; } fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem) { - let orig_tables = update_tables(self.tcx, ii.id, &mut self.tables, self.empty_tables); + let orig_tables = + mem::replace(&mut self.tables, item_tables(self.tcx, ii.hir_id, self.empty_tables)); intravisit::walk_impl_item(self, ii); self.tables = orig_tables; } } -impl<'a, 'tcx> TypeVisitor<'tcx> for TypePrivacyVisitor<'a, 'tcx> { - fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool { - match ty.sty { - ty::Adt(&ty::AdtDef { did: def_id, .. }, ..) | - ty::FnDef(def_id, ..) | - ty::Foreign(def_id) => { - if !self.item_is_accessible(def_id) { - let msg = format!("type `{}` is private", ty); - self.tcx.sess.span_err(self.span, &msg); - return true; - } - if let ty::FnDef(..) = ty.sty { - if self.tcx.fn_sig(def_id).visit_with(self) { - return true; - } - } - // Inherent static methods don't have self type in substs, - // we have to check it additionally. - if let Some(assoc_item) = self.tcx.opt_associated_item(def_id) { - if let ty::ImplContainer(impl_def_id) = assoc_item.container { - if self.tcx.type_of(impl_def_id).visit_with(self) { - return true; - } - } - } - } - ty::Dynamic(ref predicates, ..) => { - let is_private = predicates.skip_binder().iter().any(|predicate| { - let def_id = match *predicate { - ty::ExistentialPredicate::Trait(trait_ref) => trait_ref.def_id, - ty::ExistentialPredicate::Projection(proj) => - proj.trait_ref(self.tcx).def_id, - ty::ExistentialPredicate::AutoTrait(def_id) => def_id, - }; - !self.item_is_accessible(def_id) - }); - if is_private { - let msg = format!("type `{}` is private", ty); - self.tcx.sess.span_err(self.span, &msg); - return true; - } - } - ty::Projection(ref proj) => { - let tcx = self.tcx; - if self.check_trait_ref(proj.trait_ref(tcx)) { - return true; - } - } - ty::Opaque(def_id, ..) => { - for (predicate, _) in &self.tcx.predicates_of(def_id).predicates { - let trait_ref = match *predicate { - ty::Predicate::Trait(ref poly_trait_predicate) => { - Some(poly_trait_predicate.skip_binder().trait_ref) - } - ty::Predicate::Projection(ref poly_projection_predicate) => { - if poly_projection_predicate.skip_binder().ty.visit_with(self) { - return true; - } - Some(poly_projection_predicate.skip_binder() - .projection_ty.trait_ref(self.tcx)) - } - ty::Predicate::TypeOutlives(..) | ty::Predicate::RegionOutlives(..) => None, - _ => bug!("unexpected predicate: {:?}", predicate), - }; - if let Some(trait_ref) = trait_ref { - if !self.item_is_accessible(trait_ref.def_id) { - let msg = format!("trait `{}` is private", trait_ref); - self.tcx.sess.span_err(self.span, &msg); - return true; - } - for subst in trait_ref.substs.iter() { - // Skip repeated `Opaque`s to avoid infinite recursion. - if let UnpackedKind::Type(ty) = subst.unpack() { - if let ty::Opaque(def_id, ..) = ty.sty { - if !self.visited_opaque_tys.insert(def_id) { - continue; - } - } - } - if subst.visit_with(self) { - return true; - } - } - } - } - } - _ => {} - } - - ty.super_visit_with(self) +impl<'a, 'tcx> DefIdVisitor<'a, 'tcx> for TypePrivacyVisitor<'a, 'tcx> { + fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.tcx } + fn visit_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool { + self.check_def_id(def_id, kind, descr) } } @@ -1015,7 +1191,7 @@ struct ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx: 'a> { access_levels: &'a AccessLevels, in_variant: bool, // Set of errors produced by this obsolete visitor. - old_error_set: NodeSet, + old_error_set: HirIdSet, } struct ObsoleteCheckTypeForPrivatenessVisitor<'a, 'b: 'a, 'tcx: 'b> { @@ -1038,10 +1214,10 @@ impl<'a, 'tcx> ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { // A path can only be private if: // it's in this crate... - if let Some(node_id) = self.tcx.hir().as_local_node_id(did) { + if let Some(hir_id) = self.tcx.hir().as_local_hir_id(did) { // .. and it corresponds to a private type in the AST (this returns // `None` for type parameters). - match self.tcx.hir().find(node_id) { + match self.tcx.hir().find_by_hir_id(hir_id) { Some(Node::Item(ref item)) => !item.vis.node.is_pub(), Some(_) | None => false, } @@ -1050,7 +1226,7 @@ impl<'a, 'tcx> ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { } } - fn trait_is_public(&self, trait_id: ast::NodeId) -> bool { + fn trait_is_public(&self, trait_id: hir::HirId) -> bool { // FIXME: this would preferably be using `exported_items`, but all // traits are exported currently (see `EmbargoVisitor.exported_trait`). self.access_levels.is_public(trait_id) @@ -1059,12 +1235,12 @@ impl<'a, 'tcx> ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { fn check_generic_bound(&mut self, bound: &hir::GenericBound) { if let hir::GenericBound::Trait(ref trait_ref, _) = *bound { if self.path_is_private_type(&trait_ref.trait_ref.path) { - self.old_error_set.insert(trait_ref.trait_ref.ref_id); + self.old_error_set.insert(trait_ref.trait_ref.hir_ref_id); } } } - fn item_is_public(&self, id: &ast::NodeId, vis: &hir::Visibility) -> bool { + fn item_is_public(&self, id: &hir::HirId, vis: &hir::Visibility) -> bool { self.access_levels.is_reachable(*id) || vis.node.is_pub() } } @@ -1113,7 +1289,7 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { hir::ItemKind::ForeignMod(_) => {} hir::ItemKind::Trait(.., ref bounds, _) => { - if !self.trait_is_public(item.id) { + if !self.trait_is_public(item.hir_id) { return } @@ -1155,8 +1331,8 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { |tr| { let did = tr.path.def.def_id(); - if let Some(node_id) = self.tcx.hir().as_local_node_id(did) { - self.trait_is_public(node_id) + if let Some(hir_id) = self.tcx.hir().as_local_hir_id(did) { + self.trait_is_public(hir_id) } else { true // external traits must be public } @@ -1178,7 +1354,8 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { match impl_item.node { hir::ImplItemKind::Const(..) | hir::ImplItemKind::Method(..) => { - self.access_levels.is_reachable(impl_item.id) + self.access_levels.is_reachable( + impl_item_ref.id.hir_id) } hir::ImplItemKind::Existential(..) | hir::ImplItemKind::Type(_) => false, @@ -1203,7 +1380,7 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { match impl_item.node { hir::ImplItemKind::Const(..) | hir::ImplItemKind::Method(..) - if self.item_is_public(&impl_item.id, &impl_item.vis) => + if self.item_is_public(&impl_item.hir_id, &impl_item.vis) => { intravisit::walk_impl_item(self, impl_item) } @@ -1244,14 +1421,14 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { // methods will be visible as `Public::foo`. let mut found_pub_static = false; for impl_item_ref in impl_item_refs { - if self.item_is_public(&impl_item_ref.id.node_id, &impl_item_ref.vis) { + if self.item_is_public(&impl_item_ref.id.hir_id, &impl_item_ref.vis) { let impl_item = self.tcx.hir().impl_item(impl_item_ref.id); match impl_item_ref.kind { - hir::AssociatedItemKind::Const => { + AssociatedItemKind::Const => { found_pub_static = true; intravisit::walk_impl_item(self, impl_item); } - hir::AssociatedItemKind::Method { has_self: false } => { + AssociatedItemKind::Method { has_self: false } => { found_pub_static = true; intravisit::walk_impl_item(self, impl_item); } @@ -1271,7 +1448,7 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { hir::ItemKind::Ty(..) => return, // Not at all public, so we don't care. - _ if !self.item_is_public(&item.id, &item.vis) => { + _ if !self.item_is_public(&item.hir_id, &item.vis) => { return; } @@ -1293,13 +1470,13 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { } for predicate in &generics.where_clause.predicates { match predicate { - &hir::WherePredicate::BoundPredicate(ref bound_pred) => { + hir::WherePredicate::BoundPredicate(bound_pred) => { for bound in bound_pred.bounds.iter() { self.check_generic_bound(bound) } } - &hir::WherePredicate::RegionPredicate(_) => {} - &hir::WherePredicate::EqPredicate(ref eq_pred) => { + hir::WherePredicate::RegionPredicate(_) => {} + hir::WherePredicate::EqPredicate(eq_pred) => { self.visit_ty(&eq_pred.rhs_ty); } } @@ -1307,7 +1484,7 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { } fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem) { - if self.access_levels.is_reachable(item.id) { + if self.access_levels.is_reachable(item.hir_id) { intravisit::walk_foreign_item(self, item) } } @@ -1315,7 +1492,7 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { fn visit_ty(&mut self, t: &'tcx hir::Ty) { if let hir::TyKind::Path(hir::QPath::Resolved(_, ref path)) = t.node { if self.path_is_private_type(path) { - self.old_error_set.insert(t.id); + self.old_error_set.insert(t.hir_id); } } intravisit::walk_ty(self, t) @@ -1324,8 +1501,8 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { fn visit_variant(&mut self, v: &'tcx hir::Variant, g: &'tcx hir::Generics, - item_id: ast::NodeId) { - if self.access_levels.is_reachable(v.node.data.id()) { + item_id: hir::HirId) { + if self.access_levels.is_reachable(v.node.id) { self.in_variant = true; intravisit::walk_variant(self, v, g, item_id); self.in_variant = false; @@ -1355,27 +1532,30 @@ impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> { struct SearchInterfaceForPrivateItemsVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, + item_id: hir::HirId, item_def_id: DefId, span: Span, /// The visitor checks that each component type is at least this visible. required_visibility: ty::Visibility, - /// The visibility of the least visible component that has been visited. - min_visibility: ty::Visibility, has_pub_restricted: bool, has_old_errors: bool, in_assoc_ty: bool, + private_crates: FxHashSet } impl<'a, 'tcx: 'a> SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> { fn generics(&mut self) -> &mut Self { for param in &self.tcx.generics_of(self.item_def_id).params { match param.kind { + GenericParamDefKind::Lifetime => {} GenericParamDefKind::Type { has_default, .. } => { if has_default { - self.tcx.type_of(param.def_id).visit_with(self); + self.visit(self.tcx.type_of(param.def_id)); } } - GenericParamDefKind::Lifetime => {} + GenericParamDefKind::Const => { + self.visit(self.tcx.type_of(param.def_id)); + } } } self @@ -1388,144 +1568,83 @@ impl<'a, 'tcx: 'a> SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> { // consider the ones that the user wrote. This is important // for the inferred outlives rules; see // `src/test/ui/rfc-2093-infer-outlives/privacy.rs`. - let predicates = self.tcx.explicit_predicates_of(self.item_def_id); - for (predicate, _) in &predicates.predicates { - predicate.visit_with(self); - match predicate { - &ty::Predicate::Trait(poly_predicate) => { - self.check_trait_ref(poly_predicate.skip_binder().trait_ref); - }, - &ty::Predicate::Projection(poly_predicate) => { - let tcx = self.tcx; - self.check_trait_ref( - poly_predicate.skip_binder().projection_ty.trait_ref(tcx) - ); - }, - _ => (), - }; - } + self.visit_predicates(self.tcx.explicit_predicates_of(self.item_def_id)); self } fn ty(&mut self) -> &mut Self { - let ty = self.tcx.type_of(self.item_def_id); - ty.visit_with(self); - if let ty::FnDef(def_id, _) = ty.sty { - if def_id == self.item_def_id { - self.tcx.fn_sig(def_id).visit_with(self); - } - } + self.visit(self.tcx.type_of(self.item_def_id)); self } - fn impl_trait_ref(&mut self) -> &mut Self { - if let Some(impl_trait_ref) = self.tcx.impl_trait_ref(self.item_def_id) { - self.check_trait_ref(impl_trait_ref); - impl_trait_ref.super_visit_with(self); - } - self - } + fn check_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool { + if self.leaks_private_dep(def_id) { + self.tcx.lint_hir(lint::builtin::EXPORTED_PRIVATE_DEPENDENCIES, + self.item_id, + self.span, + &format!("{} `{}` from private dependency '{}' in public \ + interface", kind, descr, + self.tcx.crate_name(def_id.krate))); - fn check_trait_ref(&mut self, trait_ref: ty::TraitRef<'tcx>) { - // Non-local means public (private items can't leave their crate, modulo bugs). - if let Some(node_id) = self.tcx.hir().as_local_node_id(trait_ref.def_id) { - let item = self.tcx.hir().expect_item(node_id); - let vis = ty::Visibility::from_hir(&item.vis, node_id, self.tcx); - if !vis.is_at_least(self.min_visibility, self.tcx) { - self.min_visibility = vis; - } - if !vis.is_at_least(self.required_visibility, self.tcx) { - if self.has_pub_restricted || self.has_old_errors || self.in_assoc_ty { - struct_span_err!(self.tcx.sess, self.span, E0445, - "private trait `{}` in public interface", trait_ref) - .span_label(self.span, format!( - "can't leak private trait")) - .emit(); - } else { - self.tcx.lint_node(lint::builtin::PRIVATE_IN_PUBLIC, - node_id, - self.span, - &format!("private trait `{}` in public \ - interface (error E0445)", trait_ref)); - } - } } - } -} -impl<'a, 'tcx: 'a> TypeVisitor<'tcx> for SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> { - fn visit_ty(&mut self, ty: Ty<'tcx>) -> bool { - let ty_def_id = match ty.sty { - ty::Adt(adt, _) => Some(adt.did), - ty::Foreign(did) => Some(did), - ty::Dynamic(ref obj, ..) => Some(obj.principal().def_id()), - ty::Projection(ref proj) => { - if self.required_visibility == ty::Visibility::Invisible { - // Conservatively approximate the whole type alias as public without - // recursing into its components when determining impl publicity. - // For example, `impl ::Alias {...}` may be a public impl - // even if both `Type` and `Trait` are private. - // Ideally, associated types should be substituted in the same way as - // free type aliases, but this isn't done yet. - return false; - } - let trait_ref = proj.trait_ref(self.tcx); - Some(trait_ref.def_id) - } - _ => None + let hir_id = match self.tcx.hir().as_local_hir_id(def_id) { + Some(hir_id) => hir_id, + None => return false, }; - if let Some(def_id) = ty_def_id { - // Non-local means public (private items can't leave their crate, modulo bugs). - if let Some(node_id) = self.tcx.hir().as_local_node_id(def_id) { - let hir_vis = match self.tcx.hir().find(node_id) { - Some(Node::Item(item)) => &item.vis, - Some(Node::ForeignItem(item)) => &item.vis, - _ => bug!("expected item of foreign item"), + let (vis, vis_span, vis_descr) = def_id_visibility(self.tcx, def_id); + if !vis.is_at_least(self.required_visibility, self.tcx) { + let msg = format!("{} {} `{}` in public interface", vis_descr, kind, descr); + if self.has_pub_restricted || self.has_old_errors || self.in_assoc_ty { + let mut err = if kind == "trait" { + struct_span_err!(self.tcx.sess, self.span, E0445, "{}", msg) + } else { + struct_span_err!(self.tcx.sess, self.span, E0446, "{}", msg) }; + err.span_label(self.span, format!("can't leak {} {}", vis_descr, kind)); + err.span_label(vis_span, format!("`{}` declared as {}", descr, vis_descr)); + err.emit(); + } else { + let err_code = if kind == "trait" { "E0445" } else { "E0446" }; + self.tcx.lint_hir(lint::builtin::PRIVATE_IN_PUBLIC, hir_id, self.span, + &format!("{} (error {})", msg, err_code)); + } - let vis = ty::Visibility::from_hir(hir_vis, node_id, self.tcx); + } - if !vis.is_at_least(self.min_visibility, self.tcx) { - self.min_visibility = vis; - } - if !vis.is_at_least(self.required_visibility, self.tcx) { - let vis_adj = match hir_vis.node { - hir::VisibilityKind::Crate(_) => "crate-visible", - hir::VisibilityKind::Restricted { .. } => "restricted", - _ => "private" - }; + false + } - if self.has_pub_restricted || self.has_old_errors || self.in_assoc_ty { - let mut err = struct_span_err!(self.tcx.sess, self.span, E0446, - "{} type `{}` in public interface", vis_adj, ty); - err.span_label(self.span, format!("can't leak {} type", vis_adj)); - err.span_label(hir_vis.span, format!("`{}` declared as {}", ty, vis_adj)); - err.emit(); - } else { - self.tcx.lint_node(lint::builtin::PRIVATE_IN_PUBLIC, - node_id, - self.span, - &format!("{} type `{}` in public \ - interface (error E0446)", vis_adj, ty)); - } - } - } - } + /// An item is 'leaked' from a private dependency if all + /// of the following are true: + /// 1. It's contained within a public type + /// 2. It comes from a private crate + fn leaks_private_dep(&self, item_id: DefId) -> bool { + let ret = self.required_visibility == ty::Visibility::Public && + self.private_crates.contains(&item_id.krate); - ty.super_visit_with(self) + log::debug!("leaks_private_dep(item_id={:?})={}", item_id, ret); + return ret; + } +} + +impl<'a, 'tcx> DefIdVisitor<'a, 'tcx> for SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> { + fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.tcx } + fn visit_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool { + self.check_def_id(def_id, kind, descr) } } struct PrivateItemsInPublicInterfacesVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, has_pub_restricted: bool, - old_error_set: &'a NodeSet, - inner_visibility: ty::Visibility, + old_error_set: &'a HirIdSet, + private_crates: FxHashSet } impl<'a, 'tcx> PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> { - fn check(&self, item_id: ast::NodeId, required_visibility: ty::Visibility) + fn check(&self, item_id: hir::HirId, required_visibility: ty::Visibility) -> SearchInterfaceForPrivateItemsVisitor<'a, 'tcx> { let mut has_old_errors = false; @@ -1538,7 +1657,7 @@ impl<'a, 'tcx> PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> { has_old_errors = true; break; } - let parent = self.tcx.hir().get_parent_node(id); + let parent = self.tcx.hir().get_parent_node_by_hir_id(id); if parent == id { break; } @@ -1552,13 +1671,32 @@ impl<'a, 'tcx> PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> { SearchInterfaceForPrivateItemsVisitor { tcx: self.tcx, - item_def_id: self.tcx.hir().local_def_id(item_id), - span: self.tcx.hir().span(item_id), - min_visibility: ty::Visibility::Public, + item_id, + item_def_id: self.tcx.hir().local_def_id_from_hir_id(item_id), + span: self.tcx.hir().span_by_hir_id(item_id), required_visibility, has_pub_restricted: self.has_pub_restricted, has_old_errors, in_assoc_ty: false, + private_crates: self.private_crates.clone() + } + } + + fn check_trait_or_impl_item(&self, hir_id: hir::HirId, assoc_item_kind: AssociatedItemKind, + defaultness: hir::Defaultness, vis: ty::Visibility) { + let mut check = self.check(hir_id, vis); + + let (check_ty, is_assoc_ty) = match assoc_item_kind { + AssociatedItemKind::Const | AssociatedItemKind::Method { .. } => (true, false), + AssociatedItemKind::Type => (defaultness.has_value(), true), + // `ty()` for existential types is the underlying type, + // it's not a part of interface, so we skip it. + AssociatedItemKind::Existential => (false, true), + }; + check.in_assoc_ty = is_assoc_ty; + check.generics().predicates(); + if check_ty { + check.ty(); } } } @@ -1570,11 +1708,7 @@ impl<'a, 'tcx> Visitor<'tcx> for PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> fn visit_item(&mut self, item: &'tcx hir::Item) { let tcx = self.tcx; - let min = |vis1: ty::Visibility, vis2| { - if vis1.is_at_least(vis2, tcx) { vis2 } else { vis1 } - }; - - let item_visibility = ty::Visibility::from_hir(&item.vis, item.id, tcx); + let item_visibility = ty::Visibility::from_hir(&item.vis, item.hir_id, tcx); match item.node { // Crates are always public. @@ -1585,158 +1719,117 @@ impl<'a, 'tcx> Visitor<'tcx> for PrivateItemsInPublicInterfacesVisitor<'a, 'tcx> hir::ItemKind::Use(..) => {} // No subitems. hir::ItemKind::GlobalAsm(..) => {} - hir::ItemKind::Existential(hir::ExistTy { impl_trait_fn: Some(_), .. }) => { - // Check the traits being exposed, as they're separate, - // e.g., `impl Iterator` has two predicates, - // `X: Iterator` and `::Item == T`, - // where `X` is the `impl Iterator` itself, - // stored in `predicates_of`, not in the `Ty` itself. - self.check(item.id, item_visibility).predicates(); - } // Subitems of these items have inherited publicity. - hir::ItemKind::Const(..) | hir::ItemKind::Static(..) | hir::ItemKind::Fn(..) | - hir::ItemKind::Existential(..) | - hir::ItemKind::Ty(..) => { - self.check(item.id, item_visibility).generics().predicates().ty(); - - // Recurse for e.g., `impl Trait` (see `visit_ty`). - self.inner_visibility = item_visibility; - intravisit::walk_item(self, item); + hir::ItemKind::Const(..) | hir::ItemKind::Static(..) | + hir::ItemKind::Fn(..) | hir::ItemKind::Ty(..) => { + self.check(item.hir_id, item_visibility).generics().predicates().ty(); + } + hir::ItemKind::Existential(..) => { + // `ty()` for existential types is the underlying type, + // it's not a part of interface, so we skip it. + self.check(item.hir_id, item_visibility).generics().predicates(); } hir::ItemKind::Trait(.., ref trait_item_refs) => { - self.check(item.id, item_visibility).generics().predicates(); + self.check(item.hir_id, item_visibility).generics().predicates(); for trait_item_ref in trait_item_refs { - let mut check = self.check(trait_item_ref.id.node_id, item_visibility); - check.in_assoc_ty = trait_item_ref.kind == hir::AssociatedItemKind::Type; - check.generics().predicates(); - - if trait_item_ref.kind == hir::AssociatedItemKind::Type && - !trait_item_ref.defaultness.has_value() { - // No type to visit. - } else { - check.ty(); - } + self.check_trait_or_impl_item(trait_item_ref.id.hir_id, trait_item_ref.kind, + trait_item_ref.defaultness, item_visibility); } } hir::ItemKind::TraitAlias(..) => { - self.check(item.id, item_visibility).generics().predicates(); + self.check(item.hir_id, item_visibility).generics().predicates(); } hir::ItemKind::Enum(ref def, _) => { - self.check(item.id, item_visibility).generics().predicates(); + self.check(item.hir_id, item_visibility).generics().predicates(); for variant in &def.variants { for field in variant.node.data.fields() { - self.check(field.id, item_visibility).ty(); + self.check(field.hir_id, item_visibility).ty(); } } } // Subitems of foreign modules have their own publicity. hir::ItemKind::ForeignMod(ref foreign_mod) => { for foreign_item in &foreign_mod.items { - let vis = ty::Visibility::from_hir(&foreign_item.vis, item.id, tcx); - self.check(foreign_item.id, vis).generics().predicates().ty(); + let vis = ty::Visibility::from_hir(&foreign_item.vis, item.hir_id, tcx); + self.check(foreign_item.hir_id, vis).generics().predicates().ty(); } } // Subitems of structs and unions have their own publicity. hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => { - self.check(item.id, item_visibility).generics().predicates(); + self.check(item.hir_id, item_visibility).generics().predicates(); for field in struct_def.fields() { - let field_visibility = ty::Visibility::from_hir(&field.vis, item.id, tcx); - self.check(field.id, min(item_visibility, field_visibility)).ty(); + let field_visibility = ty::Visibility::from_hir(&field.vis, item.hir_id, tcx); + self.check(field.hir_id, min(item_visibility, field_visibility, tcx)).ty(); } } // An inherent impl is public when its type is public // Subitems of inherent impls have their own publicity. - hir::ItemKind::Impl(.., None, _, ref impl_item_refs) => { - let ty_vis = - self.check(item.id, ty::Visibility::Invisible).ty().min_visibility; - self.check(item.id, ty_vis).generics().predicates(); - - for impl_item_ref in impl_item_refs { - let impl_item = self.tcx.hir().impl_item(impl_item_ref.id); - let impl_item_vis = ty::Visibility::from_hir(&impl_item.vis, item.id, tcx); - let mut check = self.check(impl_item.id, min(impl_item_vis, ty_vis)); - check.in_assoc_ty = impl_item_ref.kind == hir::AssociatedItemKind::Type; - check.generics().predicates().ty(); - - // Recurse for e.g., `impl Trait` (see `visit_ty`). - self.inner_visibility = impl_item_vis; - intravisit::walk_impl_item(self, impl_item); - } - } // A trait impl is public when both its type and its trait are public // Subitems of trait impls have inherited publicity. - hir::ItemKind::Impl(.., Some(_), _, ref impl_item_refs) => { - let vis = self.check(item.id, ty::Visibility::Invisible) - .ty().impl_trait_ref().min_visibility; - self.check(item.id, vis).generics().predicates(); + hir::ItemKind::Impl(.., ref trait_ref, _, ref impl_item_refs) => { + let impl_vis = ty::Visibility::of_impl(item.hir_id, tcx, &Default::default()); + self.check(item.hir_id, impl_vis).generics().predicates(); for impl_item_ref in impl_item_refs { - let impl_item = self.tcx.hir().impl_item(impl_item_ref.id); - let mut check = self.check(impl_item.id, vis); - check.in_assoc_ty = impl_item_ref.kind == hir::AssociatedItemKind::Type; - check.generics().predicates().ty(); - - // Recurse for e.g., `impl Trait` (see `visit_ty`). - self.inner_visibility = vis; - intravisit::walk_impl_item(self, impl_item); + let impl_item = tcx.hir().impl_item(impl_item_ref.id); + let impl_item_vis = if trait_ref.is_none() { + min(ty::Visibility::from_hir(&impl_item.vis, item.hir_id, tcx), + impl_vis, + tcx) + } else { + impl_vis + }; + self.check_trait_or_impl_item(impl_item_ref.id.hir_id, impl_item_ref.kind, + impl_item_ref.defaultness, impl_item_vis); } } } } - - fn visit_impl_item(&mut self, _impl_item: &'tcx hir::ImplItem) { - // Handled in `visit_item` above. - } - - // Don't recurse into expressions in array sizes or const initializers. - fn visit_expr(&mut self, _: &'tcx hir::Expr) {} - // Don't recurse into patterns in function arguments. - fn visit_pat(&mut self, _: &'tcx hir::Pat) {} } -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { *providers = Providers { privacy_access_levels, + check_private_in_public, + check_mod_privacy, ..*providers }; } -pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Lrc { - tcx.privacy_access_levels(LOCAL_CRATE) -} - -fn privacy_access_levels<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - krate: CrateNum) - -> Lrc { - assert_eq!(krate, LOCAL_CRATE); - - let krate = tcx.hir().krate(); +fn check_mod_privacy<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>, module_def_id: DefId) { let empty_tables = ty::TypeckTables::empty(None); // Check privacy of names not checked in previous compilation stages. let mut visitor = NamePrivacyVisitor { tcx, tables: &empty_tables, - current_item: CRATE_NODE_ID, + current_item: hir::DUMMY_HIR_ID, empty_tables: &empty_tables, }; - intravisit::walk_crate(&mut visitor, krate); + let (module, span, hir_id) = tcx.hir().get_module(module_def_id); + intravisit::walk_mod(&mut visitor, module, hir_id); // Check privacy of explicitly written types and traits as well as // inferred types of expressions and patterns. let mut visitor = TypePrivacyVisitor { tcx, tables: &empty_tables, - current_item: DefId::local(CRATE_DEF_INDEX), + current_item: module_def_id, in_body: false, - span: krate.span, + span, empty_tables: &empty_tables, - visited_opaque_tys: FxHashSet::default() }; - intravisit::walk_crate(&mut visitor, krate); + intravisit::walk_mod(&mut visitor, module, hir_id); +} + +fn privacy_access_levels<'tcx>( + tcx: TyCtxt<'_, 'tcx, 'tcx>, + krate: CrateNum, +) -> Lrc { + assert_eq!(krate, LOCAL_CRATE); // Build up a set of all exported items in the AST. This is a set of all // items which are reachable from external crates based on visibility. @@ -1747,45 +1840,55 @@ fn privacy_access_levels<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, changed: false, }; loop { - intravisit::walk_crate(&mut visitor, krate); + intravisit::walk_crate(&mut visitor, tcx.hir().krate()); if visitor.changed { visitor.changed = false; } else { break } } - visitor.update(ast::CRATE_NODE_ID, Some(AccessLevel::Public)); + visitor.update(hir::CRATE_HIR_ID, Some(AccessLevel::Public)); - { - let mut visitor = ObsoleteVisiblePrivateTypesVisitor { - tcx, - access_levels: &visitor.access_levels, - in_variant: false, - old_error_set: Default::default(), - }; - intravisit::walk_crate(&mut visitor, krate); + Lrc::new(visitor.access_levels) +} +fn check_private_in_public<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>, krate: CrateNum) { + assert_eq!(krate, LOCAL_CRATE); - let has_pub_restricted = { - let mut pub_restricted_visitor = PubRestrictedVisitor { - tcx, - has_pub_restricted: false - }; - intravisit::walk_crate(&mut pub_restricted_visitor, krate); - pub_restricted_visitor.has_pub_restricted - }; + let access_levels = tcx.privacy_access_levels(LOCAL_CRATE); + + let krate = tcx.hir().krate(); - // Check for private types and traits in public interfaces. - let mut visitor = PrivateItemsInPublicInterfacesVisitor { + let mut visitor = ObsoleteVisiblePrivateTypesVisitor { + tcx, + access_levels: &access_levels, + in_variant: false, + old_error_set: Default::default(), + }; + intravisit::walk_crate(&mut visitor, krate); + + let has_pub_restricted = { + let mut pub_restricted_visitor = PubRestrictedVisitor { tcx, - has_pub_restricted, - old_error_set: &visitor.old_error_set, - inner_visibility: ty::Visibility::Public, + has_pub_restricted: false }; - krate.visit_all_item_likes(&mut DeepVisitor::new(&mut visitor)); - } + intravisit::walk_crate(&mut pub_restricted_visitor, krate); + pub_restricted_visitor.has_pub_restricted + }; - Lrc::new(visitor.access_levels) + let private_crates: FxHashSet = tcx.sess.opts.extern_private.iter() + .flat_map(|c| { + tcx.crates().iter().find(|&&krate| &tcx.crate_name(krate) == c).cloned() + }).collect(); + + // Check for private types and traits in public interfaces. + let mut visitor = PrivateItemsInPublicInterfacesVisitor { + tcx, + has_pub_restricted, + old_error_set: &visitor.old_error_set, + private_crates + }; + krate.visit_all_item_likes(&mut DeepVisitor::new(&mut visitor)); } __build_diagnostic_array! { librustc_privacy, DIAGNOSTICS } diff --git a/src/librustc_resolve/Cargo.toml b/src/librustc_resolve/Cargo.toml index 3a8e84a3280c6..836b4ad38ca88 100644 --- a/src/librustc_resolve/Cargo.toml +++ b/src/librustc_resolve/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustc_resolve" version = "0.0.0" +edition = "2018" [lib] name = "rustc_resolve" @@ -15,7 +16,7 @@ log = "0.4" syntax = { path = "../libsyntax" } rustc = { path = "../librustc" } arena = { path = "../libarena" } -rustc_errors = { path = "../librustc_errors" } +errors = { path = "../librustc_errors", package = "rustc_errors" } syntax_pos = { path = "../libsyntax_pos" } rustc_data_structures = { path = "../librustc_data_structures" } rustc_metadata = { path = "../librustc_metadata" } diff --git a/src/librustc_resolve/build_reduced_graph.rs b/src/librustc_resolve/build_reduced_graph.rs index 191e4e8fe2a83..7ce264db755db 100644 --- a/src/librustc_resolve/build_reduced_graph.rs +++ b/src/librustc_resolve/build_reduced_graph.rs @@ -1,26 +1,17 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Reduced graph building +//! Reduced graph building. //! //! Here we build the "reduced graph": the graph of the module tree without //! any imports resolved. -use macros::{InvocationData, ParentScope, LegacyScope}; -use resolve_imports::ImportDirective; -use resolve_imports::ImportDirectiveSubclass::{self, GlobImport, SingleImport}; -use {Module, ModuleData, ModuleKind, NameBinding, NameBindingKind, Segment, ToNameBinding}; -use {ModuleOrUniformRoot, PerNS, Resolver, ResolverArenas, ExternPreludeEntry}; -use Namespace::{self, TypeNS, ValueNS, MacroNS}; -use {resolve_error, resolve_struct_error, ResolutionError}; +use crate::macros::{InvocationData, ParentScope, LegacyScope}; +use crate::resolve_imports::ImportDirective; +use crate::resolve_imports::ImportDirectiveSubclass::{self, GlobImport, SingleImport}; +use crate::{Module, ModuleData, ModuleKind, NameBinding, NameBindingKind, Segment, ToNameBinding}; +use crate::{ModuleOrUniformRoot, PerNS, Resolver, ResolverArenas, ExternPreludeEntry}; +use crate::Namespace::{self, TypeNS, ValueNS, MacroNS}; +use crate::{resolve_error, resolve_struct_error, ResolutionError}; +use rustc::bug; use rustc::hir::def::*; use rustc::hir::def_id::{CrateNum, CRATE_DEF_INDEX, LOCAL_CRATE, DefId}; use rustc::ty; @@ -31,6 +22,8 @@ use std::cell::Cell; use std::ptr; use rustc_data_structures::sync::Lrc; +use errors::Applicability; + use syntax::ast::{Name, Ident}; use syntax::attr; @@ -40,18 +33,22 @@ use syntax::ext::base::{MacroKind, SyntaxExtension}; use syntax::ext::base::Determinacy::Undetermined; use syntax::ext::hygiene::Mark; use syntax::ext::tt::macro_rules; -use syntax::feature_gate::{is_builtin_attr, emit_feature_err, GateIssue}; +use syntax::feature_gate::is_builtin_attr; use syntax::parse::token::{self, Token}; +use syntax::span_err; use syntax::std_inject::injected_crate_name; use syntax::symbol::keywords; use syntax::visit::{self, Visitor}; use syntax_pos::{Span, DUMMY_SP}; +use log::debug; + impl<'a> ToNameBinding<'a> for (Module<'a>, ty::Visibility, Span, Mark) { fn to_name_binding(self, arenas: &'a ResolverArenas<'a>) -> &'a NameBinding<'a> { arenas.alloc_name_binding(NameBinding { kind: NameBindingKind::Module(self.0), + ambiguity: None, vis: self.1, span: self.2, expansion: self.3, @@ -63,6 +60,7 @@ impl<'a> ToNameBinding<'a> for (Def, ty::Visibility, Span, Mark) { fn to_name_binding(self, arenas: &'a ResolverArenas<'a>) -> &'a NameBinding<'a> { arenas.alloc_name_binding(NameBinding { kind: NameBindingKind::Def(self.0, false), + ambiguity: None, vis: self.1, span: self.2, expansion: self.3, @@ -76,6 +74,7 @@ impl<'a> ToNameBinding<'a> for (Def, ty::Visibility, Span, Mark, IsMacroExport) fn to_name_binding(self, arenas: &'a ResolverArenas<'a>) -> &'a NameBinding<'a> { arenas.alloc_name_binding(NameBinding { kind: NameBindingKind::Def(self.0, true), + ambiguity: None, vis: self.1, span: self.2, expansion: self.3, @@ -83,7 +82,7 @@ impl<'a> ToNameBinding<'a> for (Def, ty::Visibility, Span, Mark, IsMacroExport) } } -impl<'a, 'cl> Resolver<'a, 'cl> { +impl<'a> Resolver<'a> { /// Defines `name` in namespace `ns` of module `parent` to be `def` if it is not yet defined; /// otherwise, reports an error. pub fn define(&mut self, parent: Module<'a>, ident: Ident, ns: Namespace, def: T) @@ -132,12 +131,9 @@ impl<'a, 'cl> Resolver<'a, 'cl> { // so prefixes are prepended with crate root segment if necessary. // The root is prepended lazily, when the first non-empty prefix or terminating glob // appears, so imports in braced groups can have roots prepended independently. - // 2015 identifiers used on global 2018 edition enter special "virtual 2015 mode", don't - // get crate root prepended, but get special treatment during in-scope resolution instead. let is_glob = if let ast::UseTreeKind::Glob = use_tree.kind { true } else { false }; let crate_root = match prefix_iter.peek() { - Some(seg) if !seg.ident.is_path_segment_keyword() && - seg.ident.span.rust_2015() && self.session.rust_2015() => { + Some(seg) if !seg.ident.is_path_segment_keyword() && seg.ident.span.rust_2015() => { Some(seg.ident.span.ctxt()) } None if is_glob && use_tree.span.rust_2015() => { @@ -243,12 +239,14 @@ impl<'a, 'cl> Resolver<'a, 'cl> { macro_ns: Cell::new(None), }, type_ns_only, + nested, }; self.add_import_directive( module_path, subclass, use_tree.span, id, + item, root_span, item.id, vis, @@ -265,6 +263,7 @@ impl<'a, 'cl> Resolver<'a, 'cl> { subclass, use_tree.span, id, + item, root_span, item.id, vis, @@ -304,7 +303,7 @@ impl<'a, 'cl> Resolver<'a, 'cl> { } // Empty groups `a::b::{}` are turned into synthetic `self` imports - // `a::b::c::{self as _}`, so that their prefixes are correctly + // `a::b::c::{self as __dummy}`, so that their prefixes are correctly // resolved and checked for privacy/stability/etc. if items.is_empty() && !empty_for_self(&prefix) { let new_span = prefix[prefix.len() - 1].ident.span; @@ -313,7 +312,7 @@ impl<'a, 'cl> Resolver<'a, 'cl> { Ident::new(keywords::SelfLower.name(), new_span) ), kind: ast::UseTreeKind::Simple( - Some(Ident::new(keywords::Underscore.name().gensymed(), new_span)), + Some(Ident::new(Name::gensym("__dummy"), new_span)), ast::DUMMY_NODE_ID, ast::DUMMY_NODE_ID, ), @@ -352,14 +351,15 @@ impl<'a, 'cl> Resolver<'a, 'cl> { let module = if orig_name.is_none() && ident.name == keywords::SelfLower.name() { self.session .struct_span_err(item.span, "`extern crate self;` requires renaming") - .span_suggestion(item.span, "try", "extern crate self as name;".into()) + .span_suggestion( + item.span, + "try", + "extern crate self as name;".into(), + Applicability::HasPlaceholders, + ) .emit(); return; } else if orig_name == Some(keywords::SelfLower.name()) { - if !self.session.features_untracked().extern_crate_self { - emit_feature_err(&self.session.parse_sess, "extern_crate_self", item.span, - GateIssue::Language, "`extern crate self` is unstable"); - } self.graph_root } else { let crate_id = self.crate_loader.process_extern_crate(item, &self.definitions); @@ -383,6 +383,9 @@ impl<'a, 'cl> Resolver<'a, 'cl> { source: orig_name, target: ident, }, + has_attributes: !item.attrs.is_empty(), + use_span_with_attributes: item.span_with_attributes(), + use_span: item.span, root_span: item.span, span: item.span, module_path: Vec::new(), @@ -460,10 +463,9 @@ impl<'a, 'cl> Resolver<'a, 'cl> { if let Some(attr) = attr::find_by_name(&item.attrs, "proc_macro_derive") { if let Some(trait_attr) = attr.meta_item_list().and_then(|list| list.get(0).cloned()) { - if let Some(ident) = trait_attr.name().map(Ident::with_empty_ctxt) { - let sp = trait_attr.span; + if let Some(ident) = trait_attr.ident() { let def = Def::Macro(def.def_id(), MacroKind::ProcMacroStub); - self.define(parent, ident, MacroNS, (def, vis, sp, expansion)); + self.define(parent, ident, MacroNS, (def, vis, ident.span, expansion)); } } } @@ -530,9 +532,10 @@ impl<'a, 'cl> Resolver<'a, 'cl> { // If this is a tuple or unit struct, define a name // in the value namespace as well. - if !struct_def.is_struct() { - let ctor_def = Def::StructCtor(self.definitions.local_def_id(struct_def.id()), - CtorKind::from_ast(struct_def)); + if let Some(ctor_node_id) = struct_def.ctor_id() { + let ctor_def = Def::Ctor(self.definitions.local_def_id(ctor_node_id), + CtorOf::Struct, + CtorKind::from_ast(struct_def)); self.define(parent, ident, ValueNS, (ctor_def, ctor_vis, sp, expansion)); self.struct_constructors.insert(def.def_id(), (ctor_def, ctor_vis)); } @@ -579,19 +582,30 @@ impl<'a, 'cl> Resolver<'a, 'cl> { vis: ty::Visibility, expansion: Mark) { let ident = variant.node.ident; - let def_id = self.definitions.local_def_id(variant.node.data.id()); // Define a name in the type namespace. + let def_id = self.definitions.local_def_id(variant.node.id); let def = Def::Variant(def_id); self.define(parent, ident, TypeNS, (def, vis, variant.span, expansion)); + // If the variant is marked as non_exhaustive then lower the visibility to within the + // crate. + let mut ctor_vis = vis; + let has_non_exhaustive = attr::contains_name(&variant.node.attrs, "non_exhaustive"); + if has_non_exhaustive && vis == ty::Visibility::Public { + ctor_vis = ty::Visibility::Restricted(DefId::local(CRATE_DEF_INDEX)); + } + // Define a constructor name in the value namespace. // Braced variants, unlike structs, generate unusable names in // value namespace, they are reserved for possible future use. + // It's ok to use the variant's id as a ctor id since an + // error will be reported on any use of such resolution anyway. + let ctor_node_id = variant.node.data.ctor_id().unwrap_or(variant.node.id); + let ctor_def_id = self.definitions.local_def_id(ctor_node_id); let ctor_kind = CtorKind::from_ast(&variant.node.data); - let ctor_def = Def::VariantCtor(def_id, ctor_kind); - - self.define(parent, ident, ValueNS, (ctor_def, vis, variant.span, expansion)); + let ctor_def = Def::Ctor(ctor_def_id, CtorOf::Variant, ctor_kind); + self.define(parent, ident, ValueNS, (ctor_def, ctor_vis, variant.span, expansion)); } /// Constructs the reduced graph for one foreign item. @@ -633,10 +647,9 @@ impl<'a, 'cl> Resolver<'a, 'cl> { // but metadata cannot encode gensyms currently, so we create it here. // This is only a guess, two equivalent idents may incorrectly get different gensyms here. let ident = ident.gensym_if_underscore(); - let def_id = def.def_id(); let expansion = Mark::root(); // FIXME(jseyfried) intercrate hygiene match def { - Def::Mod(..) | Def::Enum(..) => { + Def::Mod(def_id) | Def::Enum(def_id) => { let module = self.new_module(parent, ModuleKind::Def(def, ident.name), def_id, @@ -644,13 +657,15 @@ impl<'a, 'cl> Resolver<'a, 'cl> { span); self.define(parent, ident, TypeNS, (module, vis, DUMMY_SP, expansion)); } - Def::Variant(..) | Def::TyAlias(..) | Def::ForeignTy(..) => { + Def::Variant(..) | Def::TyAlias(..) | Def::ForeignTy(..) | Def::Existential(..) | + Def::TraitAlias(..) | Def::PrimTy(..) | Def::ToolMod => { self.define(parent, ident, TypeNS, (def, vis, DUMMY_SP, expansion)); } - Def::Fn(..) | Def::Static(..) | Def::Const(..) | Def::VariantCtor(..) => { + Def::Fn(..) | Def::Static(..) | Def::Const(..) | + Def::Ctor(_, CtorOf::Variant, ..) => { self.define(parent, ident, ValueNS, (def, vis, DUMMY_SP, expansion)); } - Def::StructCtor(..) => { + Def::Ctor(def_id, CtorOf::Struct, ..) => { self.define(parent, ident, ValueNS, (def, vis, DUMMY_SP, expansion)); if let Some(struct_def_id) = @@ -659,7 +674,7 @@ impl<'a, 'cl> Resolver<'a, 'cl> { self.struct_constructors.insert(struct_def_id, (def, vis)); } } - Def::Trait(..) => { + Def::Trait(def_id) => { let module_kind = ModuleKind::Def(def, ident.name); let module = self.new_module(parent, module_kind, @@ -680,14 +695,14 @@ impl<'a, 'cl> Resolver<'a, 'cl> { } module.populated.set(true); } - Def::Struct(..) | Def::Union(..) => { + Def::Struct(def_id) | Def::Union(def_id) => { self.define(parent, ident, TypeNS, (def, vis, DUMMY_SP, expansion)); // Record field names for error reporting. let field_names = self.cstore.struct_field_names_untracked(def_id); self.insert_field_names(def_id, field_names); } - Def::Macro(..) => { + Def::Macro(..) | Def::NonMacroAttr(..) => { self.define(parent, ident, MacroNS, (def, vis, DUMMY_SP, expansion)); } _ => bug!("unexpected definition: {:?}", def) @@ -780,7 +795,7 @@ impl<'a, 'cl> Resolver<'a, 'cl> { } } - // This returns true if we should consider the underlying `extern crate` to be used. + /// Returns `true` if we should consider the underlying `extern crate` to be used. fn process_legacy_macro_imports(&mut self, item: &Item, module: Module<'a>, parent_scope: &ParentScope<'a>) -> bool { let mut import_all = None; @@ -805,14 +820,14 @@ impl<'a, 'cl> Resolver<'a, 'cl> { break; } MetaItemKind::List(nested_metas) => for nested_meta in nested_metas { - match nested_meta.word() { - Some(word) => single_imports.push((word.name(), word.span)), - None => ill_formed(nested_meta.span), + match nested_meta.ident() { + Some(ident) if nested_meta.is_word() => single_imports.push(ident), + _ => ill_formed(nested_meta.span()), } } MetaItemKind::NameValue(..) => ill_formed(meta.span), } - None => ill_formed(attr.span()), + None => ill_formed(attr.span), } } } @@ -824,6 +839,9 @@ impl<'a, 'cl> Resolver<'a, 'cl> { parent_scope: parent_scope.clone(), imported_module: Cell::new(Some(ModuleOrUniformRoot::Module(module))), subclass: ImportDirectiveSubclass::MacroUse, + use_span_with_attributes: item.span_with_attributes(), + has_attributes: !item.attrs.is_empty(), + use_span: item.span, root_span: span, span, module_path: Vec::new(), @@ -840,30 +858,30 @@ impl<'a, 'cl> Resolver<'a, 'cl> { self.legacy_import_macro(ident.name, imported_binding, span, allow_shadowing); }); } else { - for (name, span) in single_imports.iter().cloned() { - let ident = Ident::with_empty_ctxt(name); + for ident in single_imports.iter().cloned() { let result = self.resolve_ident_in_module( ModuleOrUniformRoot::Module(module), ident, MacroNS, None, false, - span, + ident.span, ); if let Ok(binding) = result { - let directive = macro_use_directive(span); + let directive = macro_use_directive(ident.span); self.potentially_unused_imports.push(directive); let imported_binding = self.import(binding, directive); - self.legacy_import_macro(name, imported_binding, span, allow_shadowing); + self.legacy_import_macro(ident.name, imported_binding, + ident.span, allow_shadowing); } else { - span_err!(self.session, span, E0469, "imported macro not found"); + span_err!(self.session, ident.span, E0469, "imported macro not found"); } } } import_all.is_some() || !single_imports.is_empty() } - // does this attribute list contain "macro_use"? + /// Returns `true` if this attribute list contains `macro_use`. fn contains_macro_use(&mut self, attrs: &[ast::Attribute]) -> bool { for attr in attrs { if attr.check_name("macro_escape") { @@ -888,13 +906,13 @@ impl<'a, 'cl> Resolver<'a, 'cl> { } } -pub struct BuildReducedGraphVisitor<'a, 'b: 'a, 'c: 'b> { - pub resolver: &'a mut Resolver<'b, 'c>, +pub struct BuildReducedGraphVisitor<'a, 'b: 'a> { + pub resolver: &'a mut Resolver<'b>, pub current_legacy_scope: LegacyScope<'b>, pub expansion: Mark, } -impl<'a, 'b, 'cl> BuildReducedGraphVisitor<'a, 'b, 'cl> { +impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> { fn visit_invoc(&mut self, id: ast::NodeId) -> &'b InvocationData<'b> { let mark = id.placeholder_to_mark(); self.resolver.current_module.unresolved_invocations.borrow_mut().insert(mark); @@ -917,7 +935,7 @@ macro_rules! method { } } -impl<'a, 'b, 'cl> Visitor<'a> for BuildReducedGraphVisitor<'a, 'b, 'cl> { +impl<'a, 'b> Visitor<'a> for BuildReducedGraphVisitor<'a, 'b> { method!(visit_impl_item: ast::ImplItem, ast::ImplItemKind::Macro, walk_impl_item); method!(visit_expr: ast::Expr, ast::ExprKind::Mac, walk_expr); method!(visit_pat: ast::Pat, ast::PatKind::Mac, walk_pat); @@ -1012,7 +1030,7 @@ impl<'a, 'b, 'cl> Visitor<'a> for BuildReducedGraphVisitor<'a, 'b, 'cl> { fn visit_token(&mut self, t: Token) { if let Token::Interpolated(nt) = t { - if let token::NtExpr(ref expr) = nt.0 { + if let token::NtExpr(ref expr) = *nt { if let ast::ExprKind::Mac(..) = expr.node { self.visit_invoc(expr.id); } diff --git a/src/librustc_resolve/check_unused.rs b/src/librustc_resolve/check_unused.rs index 659ca1f5b9f31..3b6179f78558b 100644 --- a/src/librustc_resolve/check_unused.rs +++ b/src/librustc_resolve/check_unused.rs @@ -1,14 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - - // // Unused import checking // @@ -18,46 +7,75 @@ // // Unused trait imports can't be checked until the method resolution. We save // candidates here, and do the actual check in librustc_typeck/check_unused.rs. +// +// Checking for unused imports is split into three steps: +// +// - `UnusedImportCheckVisitor` walks the AST to find all the unused imports +// inside of `UseTree`s, recording their `NodeId`s and grouping them by +// the parent `use` item +// +// - `calc_unused_spans` then walks over all the `use` items marked in the +// previous step to collect the spans associated with the `NodeId`s and to +// calculate the spans that can be removed by rustfix; This is done in a +// separate step to be able to collapse the adjacent spans that rustfix +// will remove +// +// - `check_crate` finally emits the diagnostics based on the data generated +// in the last step use std::ops::{Deref, DerefMut}; -use Resolver; -use resolve_imports::ImportDirectiveSubclass; +use crate::Resolver; +use crate::resolve_imports::ImportDirectiveSubclass; -use rustc::{lint, ty}; use rustc::util::nodemap::NodeMap; +use rustc::{lint, ty}; +use rustc_data_structures::fx::FxHashSet; use syntax::ast; use syntax::visit::{self, Visitor}; use syntax_pos::{Span, MultiSpan, DUMMY_SP}; +struct UnusedImport<'a> { + use_tree: &'a ast::UseTree, + use_tree_id: ast::NodeId, + item_span: Span, + unused: FxHashSet, +} -struct UnusedImportCheckVisitor<'a, 'b: 'a, 'd: 'b> { - resolver: &'a mut Resolver<'b, 'd>, +impl<'a> UnusedImport<'a> { + fn add(&mut self, id: ast::NodeId) { + self.unused.insert(id); + } +} + +struct UnusedImportCheckVisitor<'a, 'b: 'a> { + resolver: &'a mut Resolver<'b>, /// All the (so far) unused imports, grouped path list - unused_imports: NodeMap>, + unused_imports: NodeMap>, + base_use_tree: Option<&'a ast::UseTree>, base_id: ast::NodeId, item_span: Span, } // Deref and DerefMut impls allow treating UnusedImportCheckVisitor as Resolver. -impl<'a, 'b, 'd> Deref for UnusedImportCheckVisitor<'a, 'b, 'd> { - type Target = Resolver<'b, 'd>; +impl<'a, 'b> Deref for UnusedImportCheckVisitor<'a, 'b> { + type Target = Resolver<'b>; - fn deref<'c>(&'c self) -> &'c Resolver<'b, 'd> { + fn deref<'c>(&'c self) -> &'c Resolver<'b> { &*self.resolver } } -impl<'a, 'b, 'd> DerefMut for UnusedImportCheckVisitor<'a, 'b, 'd> { - fn deref_mut<'c>(&'c mut self) -> &'c mut Resolver<'b, 'd> { +impl<'a, 'b> DerefMut for UnusedImportCheckVisitor<'a, 'b> { + fn deref_mut<'c>(&'c mut self) -> &'c mut Resolver<'b> { &mut *self.resolver } } -impl<'a, 'b, 'd> UnusedImportCheckVisitor<'a, 'b, 'd> { +impl<'a, 'b> UnusedImportCheckVisitor<'a, 'b> { // We have information about whether `use` (import) directives are actually // used now. If an import is not used at all, we signal a lint error. - fn check_import(&mut self, item_id: ast::NodeId, id: ast::NodeId, span: Span) { + fn check_import(&mut self, id: ast::NodeId) { let mut used = false; self.per_ns(|this, ns| used |= this.used_imports.contains(&(id, ns))); if !used { @@ -65,19 +83,34 @@ impl<'a, 'b, 'd> UnusedImportCheckVisitor<'a, 'b, 'd> { // Check later. return; } - self.unused_imports.entry(item_id).or_default().insert(id, span); + self.unused_import(self.base_id).add(id); } else { // This trait import is definitely used, in a way other than // method resolution. self.maybe_unused_trait_imports.remove(&id); - if let Some(i) = self.unused_imports.get_mut(&item_id) { - i.remove(&id); + if let Some(i) = self.unused_imports.get_mut(&self.base_id) { + i.unused.remove(&id); } } } + + fn unused_import(&mut self, id: ast::NodeId) -> &mut UnusedImport<'a> { + let use_tree_id = self.base_id; + let use_tree = self.base_use_tree.unwrap(); + let item_span = self.item_span; + + self.unused_imports + .entry(id) + .or_insert_with(|| UnusedImport { + use_tree, + use_tree_id, + item_span, + unused: FxHashSet::default(), + }) + } } -impl<'a, 'b, 'cl> Visitor<'a> for UnusedImportCheckVisitor<'a, 'b, 'cl> { +impl<'a, 'b> Visitor<'a> for UnusedImportCheckVisitor<'a, 'b> { fn visit_item(&mut self, item: &'a ast::Item) { self.item_span = item.span; @@ -99,32 +132,113 @@ impl<'a, 'b, 'cl> Visitor<'a> for UnusedImportCheckVisitor<'a, 'b, 'cl> { // This allows the grouping of all the lints in the same item if !nested { self.base_id = id; + self.base_use_tree = Some(use_tree); } if let ast::UseTreeKind::Nested(ref items) = use_tree.kind { - // If it's the parent group, cover the entire use item - let span = if nested { - use_tree.span - } else { - self.item_span - }; - if items.is_empty() { - self.unused_imports - .entry(self.base_id) - .or_default() - .insert(id, span); + self.unused_import(self.base_id).add(id); } } else { - let base_id = self.base_id; - self.check_import(base_id, id, use_tree.span); + self.check_import(id); } visit::walk_use_tree(self, use_tree, id); } } -pub fn check_crate(resolver: &mut Resolver, krate: &ast::Crate) { +enum UnusedSpanResult { + Used, + FlatUnused(Span, Span), + NestedFullUnused(Vec, Span), + NestedPartialUnused(Vec, Vec), +} + +fn calc_unused_spans( + unused_import: &UnusedImport<'_>, + use_tree: &ast::UseTree, + use_tree_id: ast::NodeId, +) -> UnusedSpanResult { + // The full span is the whole item's span if this current tree is not nested inside another + // This tells rustfix to remove the whole item if all the imports are unused + let full_span = if unused_import.use_tree.span == use_tree.span { + unused_import.item_span + } else { + use_tree.span + }; + match use_tree.kind { + ast::UseTreeKind::Simple(..) | ast::UseTreeKind::Glob => { + if unused_import.unused.contains(&use_tree_id) { + UnusedSpanResult::FlatUnused(use_tree.span, full_span) + } else { + UnusedSpanResult::Used + } + } + ast::UseTreeKind::Nested(ref nested) => { + if nested.len() == 0 { + return UnusedSpanResult::FlatUnused(use_tree.span, full_span); + } + + let mut unused_spans = Vec::new(); + let mut to_remove = Vec::new(); + let mut all_nested_unused = true; + let mut previous_unused = false; + for (pos, (use_tree, use_tree_id)) in nested.iter().enumerate() { + let remove = match calc_unused_spans(unused_import, use_tree, *use_tree_id) { + UnusedSpanResult::Used => { + all_nested_unused = false; + None + } + UnusedSpanResult::FlatUnused(span, remove) => { + unused_spans.push(span); + Some(remove) + } + UnusedSpanResult::NestedFullUnused(mut spans, remove) => { + unused_spans.append(&mut spans); + Some(remove) + } + UnusedSpanResult::NestedPartialUnused(mut spans, mut to_remove_extra) => { + all_nested_unused = false; + unused_spans.append(&mut spans); + to_remove.append(&mut to_remove_extra); + None + } + }; + if let Some(remove) = remove { + let remove_span = if nested.len() == 1 { + remove + } else if pos == nested.len() - 1 || !all_nested_unused { + // Delete everything from the end of the last import, to delete the + // previous comma + nested[pos - 1].0.span.shrink_to_hi().to(use_tree.span) + } else { + // Delete everything until the next import, to delete the trailing commas + use_tree.span.to(nested[pos + 1].0.span.shrink_to_lo()) + }; + + // Try to collapse adjacent spans into a single one. This prevents all cases of + // overlapping removals, which are not supported by rustfix + if previous_unused && !to_remove.is_empty() { + let previous = to_remove.pop().unwrap(); + to_remove.push(previous.to(remove_span)); + } else { + to_remove.push(remove_span); + } + } + previous_unused = remove.is_some(); + } + if unused_spans.is_empty() { + UnusedSpanResult::Used + } else if all_nested_unused { + UnusedSpanResult::NestedFullUnused(unused_spans, full_span) + } else { + UnusedSpanResult::NestedPartialUnused(unused_spans, to_remove) + } + } + } +} + +pub fn check_crate(resolver: &mut Resolver<'_>, krate: &ast::Crate) { for directive in resolver.potentially_unused_imports.iter() { match directive.subclass { _ if directive.used.get() || @@ -163,14 +277,33 @@ pub fn check_crate(resolver: &mut Resolver, krate: &ast::Crate) { let mut visitor = UnusedImportCheckVisitor { resolver, unused_imports: Default::default(), + base_use_tree: None, base_id: ast::DUMMY_NODE_ID, item_span: DUMMY_SP, }; visit::walk_crate(&mut visitor, krate); - for (id, spans) in &visitor.unused_imports { + for unused in visitor.unused_imports.values() { + let mut fixes = Vec::new(); + let mut spans = match calc_unused_spans(unused, unused.use_tree, unused.use_tree_id) { + UnusedSpanResult::Used => continue, + UnusedSpanResult::FlatUnused(span, remove) => { + fixes.push((remove, String::new())); + vec![span] + } + UnusedSpanResult::NestedFullUnused(spans, remove) => { + fixes.push((remove, String::new())); + spans + } + UnusedSpanResult::NestedPartialUnused(spans, remove) => { + for fix in &remove { + fixes.push((*fix, String::new())); + } + spans + } + }; + let len = spans.len(); - let mut spans = spans.values().cloned().collect::>(); spans.sort(); let ms = MultiSpan::from_spans(spans.clone()); let mut span_snippets = spans.iter() @@ -188,6 +321,21 @@ pub fn check_crate(resolver: &mut Resolver, krate: &ast::Crate) { } else { String::new() }); - visitor.session.buffer_lint(lint::builtin::UNUSED_IMPORTS, *id, ms, &msg); + + let fix_msg = if fixes.len() == 1 && fixes[0].0 == unused.item_span { + "remove the whole `use` item" + } else if spans.len() > 1 { + "remove the unused imports" + } else { + "remove the unused import" + }; + + visitor.session.buffer_lint_with_diagnostic( + lint::builtin::UNUSED_IMPORTS, + unused.use_tree_id, + ms, + &msg, + lint::builtin::BuiltinLintDiagnostics::UnusedImports(fix_msg.into(), fixes), + ); } } diff --git a/src/librustc_resolve/diagnostics.rs b/src/librustc_resolve/diagnostics.rs index 7730239071410..5c095994a1bbd 100644 --- a/src/librustc_resolve/diagnostics.rs +++ b/src/librustc_resolve/diagnostics.rs @@ -1,15 +1,7 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - #![allow(non_snake_case)] +use syntax::{register_diagnostic, register_diagnostics, register_long_diagnostics}; + // Error messages for EXXXX errors. Each message should start and end with a // new line, and be wrapped to 80 characters. In vim you can `:set tw=80` and // use `gq` to wrap paragraphs. Use `:set tw=0` to disable. @@ -202,7 +194,7 @@ use foo::core; // error: an extern crate named `core` has already fn main() {} ``` -To fix issue issue, you have to rename at least one of the two imports. +To fix this issue, you have to rename at least one of the two imports. Example: ``` @@ -422,8 +414,8 @@ https://doc.rust-lang.org/reference.html#use-declarations "##, E0401: r##" -Inner items do not inherit type parameters from the functions they are embedded -in. +Inner items do not inherit type or const parameters from the functions +they are embedded in. Erroneous code example: diff --git a/src/librustc_resolve/error_reporting.rs b/src/librustc_resolve/error_reporting.rs index 23edaf1243812..461d02e515d38 100644 --- a/src/librustc_resolve/error_reporting.rs +++ b/src/librustc_resolve/error_reporting.rs @@ -1,24 +1,428 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use {CrateLint, PathResult, Segment}; -use macros::ParentScope; +use std::cmp::Reverse; +use errors::{Applicability, DiagnosticBuilder, DiagnosticId}; +use log::debug; +use rustc::hir::def::{Def, CtorKind, Namespace::*}; +use rustc::hir::def_id::{CRATE_DEF_INDEX, DefId}; +use rustc::session::config::nightly_options; +use syntax::ast::{Expr, ExprKind}; use syntax::symbol::keywords; use syntax_pos::Span; -use resolve_imports::ImportResolver; -use std::cmp::Reverse; +use crate::macros::ParentScope; +use crate::resolve_imports::ImportResolver; +use crate::{import_candidate_to_enum_paths, is_self_type, is_self_value, path_names_to_string}; +use crate::{AssocSuggestion, CrateLint, ImportSuggestion, ModuleOrUniformRoot, PathResult, + PathSource, Resolver, Segment}; + +impl<'a> Resolver<'a> { + /// Handles error reporting for `smart_resolve_path_fragment` function. + /// Creates base error and amends it with one short label and possibly some longer helps/notes. + pub(crate) fn smart_resolve_report_errors( + &mut self, + path: &[Segment], + span: Span, + source: PathSource<'_>, + def: Option, + ) -> (DiagnosticBuilder<'a>, Vec) { + let ident_span = path.last().map_or(span, |ident| ident.ident.span); + let ns = source.namespace(); + let is_expected = &|def| source.is_expected(def); + let is_enum_variant = &|def| if let Def::Variant(..) = def { true } else { false }; + + // Make the base error. + let expected = source.descr_expected(); + let path_str = Segment::names_to_string(path); + let item_str = path.last().unwrap().ident; + let code = source.error_code(def.is_some()); + let (base_msg, fallback_label, base_span) = if let Some(def) = def { + (format!("expected {}, found {} `{}`", expected, def.kind_name(), path_str), + format!("not a {}", expected), + span) + } else { + let item_span = path.last().unwrap().ident.span; + let (mod_prefix, mod_str) = if path.len() == 1 { + (String::new(), "this scope".to_string()) + } else if path.len() == 2 && path[0].ident.name == keywords::PathRoot.name() { + (String::new(), "the crate root".to_string()) + } else { + let mod_path = &path[..path.len() - 1]; + let mod_prefix = match self.resolve_path_without_parent_scope( + mod_path, Some(TypeNS), false, span, CrateLint::No + ) { + PathResult::Module(ModuleOrUniformRoot::Module(module)) => + module.def(), + _ => None, + }.map_or(String::new(), |def| format!("{} ", def.kind_name())); + (mod_prefix, format!("`{}`", Segment::names_to_string(mod_path))) + }; + (format!("cannot find {} `{}` in {}{}", expected, item_str, mod_prefix, mod_str), + format!("not found in {}", mod_str), + item_span) + }; + + let code = DiagnosticId::Error(code.into()); + let mut err = self.session.struct_span_err_with_code(base_span, &base_msg, code); + + // Emit help message for fake-self from other languages (e.g., `this` in Javascript). + if ["this", "my"].contains(&&*item_str.as_str()) + && self.self_value_is_available(path[0].ident.span, span) { + err.span_suggestion( + span, + "did you mean", + "self".to_string(), + Applicability::MaybeIncorrect, + ); + } + + // Emit special messages for unresolved `Self` and `self`. + if is_self_type(path, ns) { + __diagnostic_used!(E0411); + err.code(DiagnosticId::Error("E0411".into())); + err.span_label(span, format!("`Self` is only available in impls, traits, \ + and type definitions")); + return (err, Vec::new()); + } + if is_self_value(path, ns) { + debug!("smart_resolve_path_fragment: E0424, source={:?}", source); + + __diagnostic_used!(E0424); + err.code(DiagnosticId::Error("E0424".into())); + err.span_label(span, match source { + PathSource::Pat => { + format!("`self` value is a keyword \ + and may not be bound to \ + variables or shadowed") + } + _ => { + format!("`self` value is a keyword \ + only available in methods \ + with `self` parameter") + } + }); + return (err, Vec::new()); + } + + // Try to lookup name in more relaxed fashion for better error reporting. + let ident = path.last().unwrap().ident; + let candidates = self.lookup_import_candidates(ident, ns, is_expected) + .drain(..) + .filter(|ImportSuggestion { did, .. }| { + match (did, def.and_then(|def| def.opt_def_id())) { + (Some(suggestion_did), Some(actual_did)) => *suggestion_did != actual_did, + _ => true, + } + }) + .collect::>(); + if candidates.is_empty() && is_expected(Def::Enum(DefId::local(CRATE_DEF_INDEX))) { + let enum_candidates = + self.lookup_import_candidates(ident, ns, is_enum_variant); + let mut enum_candidates = enum_candidates.iter() + .map(|suggestion| { + import_candidate_to_enum_paths(&suggestion) + }).collect::>(); + enum_candidates.sort(); + + if !enum_candidates.is_empty() { + // Contextualize for E0412 "cannot find type", but don't belabor the point + // (that it's a variant) for E0573 "expected type, found variant". + let preamble = if def.is_none() { + let others = match enum_candidates.len() { + 1 => String::new(), + 2 => " and 1 other".to_owned(), + n => format!(" and {} others", n) + }; + format!("there is an enum variant `{}`{}; ", + enum_candidates[0].0, others) + } else { + String::new() + }; + let msg = format!("{}try using the variant's enum", preamble); + + err.span_suggestions( + span, + &msg, + enum_candidates.into_iter() + .map(|(_variant_path, enum_ty_path)| enum_ty_path) + // Variants re-exported in prelude doesn't mean `prelude::v1` is the + // type name! + // FIXME: is there a more principled way to do this that + // would work for other re-exports? + .filter(|enum_ty_path| enum_ty_path != "std::prelude::v1") + // Also write `Option` rather than `std::prelude::v1::Option`. + .map(|enum_ty_path| { + // FIXME #56861: DRY-er prelude filtering. + enum_ty_path.trim_start_matches("std::prelude::v1::").to_owned() + }), + Applicability::MachineApplicable, + ); + } + } + if path.len() == 1 && self.self_type_is_available(span) { + if let Some(candidate) = self.lookup_assoc_candidate(ident, ns, is_expected) { + let self_is_available = self.self_value_is_available(path[0].ident.span, span); + match candidate { + AssocSuggestion::Field => { + err.span_suggestion( + span, + "try", + format!("self.{}", path_str), + Applicability::MachineApplicable, + ); + if !self_is_available { + err.span_label(span, format!("`self` value is a keyword \ + only available in \ + methods with `self` parameter")); + } + } + AssocSuggestion::MethodWithSelf if self_is_available => { + err.span_suggestion( + span, + "try", + format!("self.{}", path_str), + Applicability::MachineApplicable, + ); + } + AssocSuggestion::MethodWithSelf | AssocSuggestion::AssocItem => { + err.span_suggestion( + span, + "try", + format!("Self::{}", path_str), + Applicability::MachineApplicable, + ); + } + } + return (err, candidates); + } + } + + let mut levenshtein_worked = false; + + // Try Levenshtein algorithm. + let suggestion = self.lookup_typo_candidate(path, ns, is_expected, span); + if let Some(suggestion) = suggestion { + let msg = format!( + "{} {} with a similar name exists", + suggestion.article, suggestion.kind + ); + err.span_suggestion( + ident_span, + &msg, + suggestion.candidate.to_string(), + Applicability::MaybeIncorrect, + ); + + levenshtein_worked = true; + } + + // Try context-dependent help if relaxed lookup didn't work. + if let Some(def) = def { + if self.smart_resolve_context_dependent_help(&mut err, + span, + source, + def, + &path_str, + &fallback_label) { + return (err, candidates); + } + } + + // Fallback label. + if !levenshtein_worked { + err.span_label(base_span, fallback_label); + self.type_ascription_suggestion(&mut err, base_span); + } + (err, candidates) + } + + /// Provides context-dependent help for errors reported by the `smart_resolve_path_fragment` + /// function. + /// Returns `true` if able to provide context-dependent help. + fn smart_resolve_context_dependent_help( + &mut self, + err: &mut DiagnosticBuilder<'a>, + span: Span, + source: PathSource<'_>, + def: Def, + path_str: &str, + fallback_label: &str, + ) -> bool { + let ns = source.namespace(); + let is_expected = &|def| source.is_expected(def); + + let path_sep = |err: &mut DiagnosticBuilder<'_>, expr: &Expr| match expr.node { + ExprKind::Field(_, ident) => { + err.span_suggestion( + expr.span, + "use the path separator to refer to an item", + format!("{}::{}", path_str, ident), + Applicability::MaybeIncorrect, + ); + true + } + ExprKind::MethodCall(ref segment, ..) => { + let span = expr.span.with_hi(segment.ident.span.hi()); + err.span_suggestion( + span, + "use the path separator to refer to an item", + format!("{}::{}", path_str, segment.ident), + Applicability::MaybeIncorrect, + ); + true + } + _ => false, + }; + + match (def, source) { + (Def::Macro(..), _) => { + err.span_suggestion( + span, + "use `!` to invoke the macro", + format!("{}!", path_str), + Applicability::MaybeIncorrect, + ); + if path_str == "try" && span.rust_2015() { + err.note("if you want the `try` keyword, you need to be in the 2018 edition"); + } + } + (Def::TyAlias(..), PathSource::Trait(_)) => { + err.span_label(span, "type aliases cannot be used as traits"); + if nightly_options::is_nightly_build() { + err.note("did you mean to use a trait alias?"); + } + } + (Def::Mod(..), PathSource::Expr(Some(parent))) => if !path_sep(err, &parent) { + return false; + }, + (Def::Enum(..), PathSource::TupleStruct) + | (Def::Enum(..), PathSource::Expr(..)) => { + if let Some(variants) = self.collect_enum_variants(def) { + if !variants.is_empty() { + let msg = if variants.len() == 1 { + "try using the enum's variant" + } else { + "try using one of the enum's variants" + }; + + err.span_suggestions( + span, + msg, + variants.iter().map(path_names_to_string), + Applicability::MaybeIncorrect, + ); + } + } else { + err.note("did you mean to use one of the enum's variants?"); + } + }, + (Def::Struct(def_id), _) if ns == ValueNS => { + if let Some((ctor_def, ctor_vis)) + = self.struct_constructors.get(&def_id).cloned() { + let accessible_ctor = self.is_accessible(ctor_vis); + if is_expected(ctor_def) && !accessible_ctor { + err.span_label( + span, + format!("constructor is not visible here due to private fields"), + ); + } + } else { + // HACK(estebank): find a better way to figure out that this was a + // parser issue where a struct literal is being used on an expression + // where a brace being opened means a block is being started. Look + // ahead for the next text to see if `span` is followed by a `{`. + let sm = self.session.source_map(); + let mut sp = span; + loop { + sp = sm.next_point(sp); + match sm.span_to_snippet(sp) { + Ok(ref snippet) => { + if snippet.chars().any(|c| { !c.is_whitespace() }) { + break; + } + } + _ => break, + } + } + let followed_by_brace = match sm.span_to_snippet(sp) { + Ok(ref snippet) if snippet == "{" => true, + _ => false, + }; + // In case this could be a struct literal that needs to be surrounded + // by parenthesis, find the appropriate span. + let mut i = 0; + let mut closing_brace = None; + loop { + sp = sm.next_point(sp); + match sm.span_to_snippet(sp) { + Ok(ref snippet) => { + if snippet == "}" { + let sp = span.to(sp); + if let Ok(snippet) = sm.span_to_snippet(sp) { + closing_brace = Some((sp, snippet)); + } + break; + } + } + _ => break, + } + i += 1; + // The bigger the span, the more likely we're incorrect -- + // bound it to 100 chars long. + if i > 100 { + break; + } + } + match source { + PathSource::Expr(Some(parent)) => if !path_sep(err, &parent) { + err.span_label( + span, + format!("did you mean `{} {{ /* fields */ }}`?", path_str), + ); + } + PathSource::Expr(None) if followed_by_brace == true => { + if let Some((sp, snippet)) = closing_brace { + err.span_suggestion( + sp, + "surround the struct literal with parenthesis", + format!("({})", snippet), + Applicability::MaybeIncorrect, + ); + } else { + err.span_label( + span, + format!("did you mean `({} {{ /* fields */ }})`?", path_str), + ); + } + }, + _ => { + err.span_label( + span, + format!("did you mean `{} {{ /* fields */ }}`?", path_str), + ); + }, + } + } + } + (Def::Union(..), _) | + (Def::Variant(..), _) | + (Def::Ctor(_, _, CtorKind::Fictive), _) if ns == ValueNS => { + err.span_label(span, format!("did you mean `{} {{ /* fields */ }}`?", path_str)); + } + (Def::SelfTy(..), _) if ns == ValueNS => { + err.span_label(span, fallback_label); + err.note("can't use `Self` as a constructor, you must use the implemented struct"); + } + (Def::TyAlias(_), _) | (Def::AssociatedTy(..), _) if ns == ValueNS => { + err.note("can't use a type alias as a constructor"); + } + _ => return false, + } + true + } +} -impl<'a, 'b:'a, 'c: 'b> ImportResolver<'a, 'b, 'c> { - /// Add suggestions for a path that cannot be resolved. +impl<'a, 'b:'a> ImportResolver<'a, 'b> { + /// Adds suggestions for a path that cannot be resolved. pub(crate) fn make_path_suggestion( &mut self, span: Span, @@ -32,7 +436,7 @@ impl<'a, 'b:'a, 'c: 'b> ImportResolver<'a, 'b, 'c> { // On 2015 `{{root}}` is usually added implicitly. (Some(fst), Some(snd)) if fst.ident.name == keywords::PathRoot.name() && !snd.ident.is_path_segment_keyword() => {} - // `ident::...` on 2018 + // `ident::...` on 2018. (Some(fst), _) if fst.ident.span.rust_2018() && !fst.ident.is_path_segment_keyword() => { // Insert a placeholder that's later replaced by `self`/`super`/etc. @@ -71,7 +475,7 @@ impl<'a, 'b:'a, 'c: 'b> ImportResolver<'a, 'b, 'c> { } } - /// Suggest a missing `crate::` if that resolves to an correct module. + /// Suggests a missing `crate::` if that resolves to an correct module. /// /// ``` /// | @@ -102,7 +506,7 @@ impl<'a, 'b:'a, 'c: 'b> ImportResolver<'a, 'b, 'c> { } } - /// Suggest a missing `super::` if that resolves to an correct module. + /// Suggests a missing `super::` if that resolves to an correct module. /// /// ``` /// | @@ -126,7 +530,7 @@ impl<'a, 'b:'a, 'c: 'b> ImportResolver<'a, 'b, 'c> { } } - /// Suggest a missing external crate name if that resolves to an correct module. + /// Suggests a missing external crate name if that resolves to an correct module. /// /// ``` /// | @@ -147,7 +551,7 @@ impl<'a, 'b:'a, 'c: 'b> ImportResolver<'a, 'b, 'c> { } // Sort extern crate names in reverse order to get - // 1) some consistent ordering for emitted dignostics and + // 1) some consistent ordering for emitted dignostics, and // 2) `std` suggestions before `core` suggestions. let mut extern_crate_names = self.resolver.extern_prelude.iter().map(|(ident, _)| ident.name).collect::>(); diff --git a/src/librustc_resolve/lib.rs b/src/librustc_resolve/lib.rs index e449fece6b474..ffc783ae9f235 100644 --- a/src/librustc_resolve/lib.rs +++ b/src/librustc_resolve/lib.rs @@ -1,41 +1,19 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![feature(crate_visibility_modifier)] #![feature(label_break_value)] #![feature(nll)] #![feature(rustc_diagnostic_macros)] -#![feature(slice_sort_by_cached_key)] - -#[macro_use] -extern crate bitflags; -#[macro_use] -extern crate log; -#[macro_use] -extern crate syntax; -extern crate syntax_pos; -extern crate rustc_errors as errors; -extern crate arena; -#[macro_use] -extern crate rustc; -extern crate rustc_data_structures; -extern crate rustc_metadata; + +#![recursion_limit="256"] + +#![deny(rust_2018_idioms)] +#![cfg_attr(not(stage0), deny(internal))] pub use rustc::hir::def::{Namespace, PerNS}; -use self::TypeParameters::*; -use self::RibKind::*; +use GenericParameters::*; +use RibKind::*; use rustc::hir::map::{Definitions, DefCollector}; use rustc::hir::{self, PrimTy, Bool, Char, Float, Int, Uint, Str}; @@ -46,9 +24,9 @@ use rustc::hir::def::*; use rustc::hir::def::Namespace::*; use rustc::hir::def_id::{CRATE_DEF_INDEX, LOCAL_CRATE, DefId}; use rustc::hir::{Freevar, FreevarMap, TraitCandidate, TraitMap, GlobMap}; -use rustc::session::config::nightly_options; -use rustc::ty; +use rustc::ty::{self, DefIdTree}; use rustc::util::nodemap::{NodeMap, NodeSet, FxHashMap, FxHashSet, DefIdMap}; +use rustc::{bug, span_bug}; use rustc_metadata::creader::CrateLoader; use rustc_metadata::cstore::CStore; @@ -70,12 +48,15 @@ use syntax::ast::{Item, ItemKind, ImplItem, ImplItemKind}; use syntax::ast::{Label, Local, Mutability, Pat, PatKind, Path}; use syntax::ast::{QSelf, TraitItemKind, TraitRef, Ty, TyKind}; use syntax::ptr::P; +use syntax::{span_err, struct_span_err, unwrap_or, walk_list}; -use syntax_pos::{Span, DUMMY_SP, MultiSpan}; +use syntax_pos::{BytePos, Span, DUMMY_SP, MultiSpan}; use errors::{Applicability, DiagnosticBuilder, DiagnosticId}; +use log::debug; + use std::cell::{Cell, RefCell}; -use std::{cmp, fmt, iter, ptr}; +use std::{cmp, fmt, iter, mem, ptr}; use std::collections::BTreeSet; use std::mem::replace; use rustc_data_structures::ptr_key::PtrKey; @@ -111,6 +92,7 @@ enum ScopeSet { /// A free importable items suggested in case of resolution failure. struct ImportSuggestion { + did: Option, path: Path, } @@ -128,6 +110,16 @@ struct BindingError { target: BTreeSet, } +struct TypoSuggestion { + candidate: Symbol, + + /// The kind of the binding ("crate", "module", etc.) + kind: &'static str, + + /// An appropriate article to refer to the binding ("a", "an", etc.) + article: &'static str, +} + impl PartialOrd for BindingError { fn partial_cmp(&self, other: &BindingError) -> Option { Some(self.cmp(other)) @@ -146,66 +138,71 @@ impl Ord for BindingError { } } +/// A span, message, replacement text, and applicability. +type Suggestion = (Span, String, String, Applicability); + enum ResolutionError<'a> { - /// error E0401: can't use type parameters from outer function - TypeParametersFromOuterFunction(Def), - /// error E0403: the name is already used for a type parameter in this type parameter list - NameAlreadyUsedInTypeParameterList(Name, &'a Span), - /// error E0407: method is not a member of trait + /// Error E0401: can't use type or const parameters from outer function. + GenericParamsFromOuterFunction(Def), + /// Error E0403: the name is already used for a type or const parameter in this generic + /// parameter list. + NameAlreadyUsedInParameterList(Name, &'a Span), + /// Error E0407: method is not a member of trait. MethodNotMemberOfTrait(Name, &'a str), - /// error E0437: type is not a member of trait + /// Error E0437: type is not a member of trait. TypeNotMemberOfTrait(Name, &'a str), - /// error E0438: const is not a member of trait + /// Error E0438: const is not a member of trait. ConstNotMemberOfTrait(Name, &'a str), - /// error E0408: variable `{}` is not bound in all patterns + /// Error E0408: variable `{}` is not bound in all patterns. VariableNotBoundInPattern(&'a BindingError), - /// error E0409: variable `{}` is bound in inconsistent ways within the same match arm + /// Error E0409: variable `{}` is bound in inconsistent ways within the same match arm. VariableBoundWithDifferentMode(Name, Span), - /// error E0415: identifier is bound more than once in this parameter list + /// Error E0415: identifier is bound more than once in this parameter list. IdentifierBoundMoreThanOnceInParameterList(&'a str), - /// error E0416: identifier is bound more than once in the same pattern + /// Error E0416: identifier is bound more than once in the same pattern. IdentifierBoundMoreThanOnceInSamePattern(&'a str), - /// error E0426: use of undeclared label + /// Error E0426: use of undeclared label. UndeclaredLabel(&'a str, Option), - /// error E0429: `self` imports are only allowed within a { } list + /// Error E0429: `self` imports are only allowed within a `{ }` list. SelfImportsOnlyAllowedWithin, - /// error E0430: `self` import can only appear once in the list + /// Error E0430: `self` import can only appear once in the list. SelfImportCanOnlyAppearOnceInTheList, - /// error E0431: `self` import can only appear in an import list with a non-empty prefix + /// Error E0431: `self` import can only appear in an import list with a non-empty prefix. SelfImportOnlyInImportListWithNonEmptyPrefix, - /// error E0433: failed to resolve - FailedToResolve(&'a str), - /// error E0434: can't capture dynamic environment in a fn item + /// Error E0433: failed to resolve. + FailedToResolve { label: String, suggestion: Option }, + /// Error E0434: can't capture dynamic environment in a fn item. CannotCaptureDynamicEnvironmentInFnItem, - /// error E0435: attempt to use a non-constant value in a constant + /// Error E0435: attempt to use a non-constant value in a constant. AttemptToUseNonConstantValueInConstant, - /// error E0530: X bindings cannot shadow Ys + /// Error E0530: `X` bindings cannot shadow `Y`s. BindingShadowsSomethingUnacceptable(&'a str, Name, &'a NameBinding<'a>), - /// error E0128: type parameters with a default cannot use forward declared identifiers - ForwardDeclaredTyParam, + /// Error E0128: type parameters with a default cannot use forward-declared identifiers. + ForwardDeclaredTyParam, // FIXME(const_generics:defaults) } -/// Combines an error with provided span and emits it +/// Combines an error with provided span and emits it. /// /// This takes the error provided, combines it with the span and any additional spans inside the /// error and emits it. -fn resolve_error<'sess, 'a>(resolver: &'sess Resolver, +fn resolve_error<'sess, 'a>(resolver: &'sess Resolver<'_>, span: Span, resolution_error: ResolutionError<'a>) { resolve_struct_error(resolver, span, resolution_error).emit(); } -fn resolve_struct_error<'sess, 'a>(resolver: &'sess Resolver, +fn resolve_struct_error<'sess, 'a>(resolver: &'sess Resolver<'_>, span: Span, resolution_error: ResolutionError<'a>) -> DiagnosticBuilder<'sess> { match resolution_error { - ResolutionError::TypeParametersFromOuterFunction(outer_def) => { + ResolutionError::GenericParamsFromOuterFunction(outer_def) => { let mut err = struct_span_err!(resolver.session, - span, - E0401, - "can't use type parameters from outer function"); - err.span_label(span, "use of type variable from outer function"); + span, + E0401, + "can't use generic parameters from outer function", + ); + err.span_label(span, format!("use of generic parameter from outer function")); let cm = resolver.session.source_map(); match outer_def { @@ -229,42 +226,48 @@ fn resolve_struct_error<'sess, 'a>(resolver: &'sess Resolver, } return err; }, - Def::TyParam(typaram_defid) => { - if let Some(typaram_span) = resolver.definitions.opt_span(typaram_defid) { - err.span_label(typaram_span, "type variable from outer function"); + Def::TyParam(def_id) => { + if let Some(span) = resolver.definitions.opt_span(def_id) { + err.span_label(span, "type variable from outer function"); } - }, + } + Def::ConstParam(def_id) => { + if let Some(span) = resolver.definitions.opt_span(def_id) { + err.span_label(span, "const variable from outer function"); + } + } _ => { - bug!("TypeParametersFromOuterFunction should only be used with Def::SelfTy or \ - Def::TyParam") + bug!("GenericParamsFromOuterFunction should only be used with Def::SelfTy, \ + Def::TyParam"); } } // Try to retrieve the span of the function signature and generate a new message with - // a local type parameter - let sugg_msg = "try using a local type parameter instead"; + // a local type or const parameter. + let sugg_msg = &format!("try using a local generic parameter instead"); if let Some((sugg_span, new_snippet)) = cm.generate_local_type_param_snippet(span) { // Suggest the modification to the user - err.span_suggestion_with_applicability( + err.span_suggestion( sugg_span, sugg_msg, new_snippet, Applicability::MachineApplicable, ); } else if let Some(sp) = cm.generate_fn_name_span(span) { - err.span_label(sp, "try adding a local type parameter in this method instead"); + err.span_label(sp, + format!("try adding a local generic parameter in this method instead")); } else { - err.help("try using a local type parameter instead"); + err.help(&format!("try using a local generic parameter instead")); } err } - ResolutionError::NameAlreadyUsedInTypeParameterList(name, first_use_span) => { + ResolutionError::NameAlreadyUsedInParameterList(name, first_use_span) => { let mut err = struct_span_err!(resolver.session, span, E0403, - "the name `{}` is already used for a type parameter \ - in this type parameter list", + "the name `{}` is already used for a generic \ + parameter in this list of generic parameters", name); err.span_label(span, "already used"); err.span_label(first_use_span.clone(), format!("first use of `{}`", name)); @@ -381,10 +384,15 @@ fn resolve_struct_error<'sess, 'a>(resolver: &'sess Resolver, err.span_label(span, "can only appear in an import list with a non-empty prefix"); err } - ResolutionError::FailedToResolve(msg) => { + ResolutionError::FailedToResolve { label, suggestion } => { let mut err = struct_span_err!(resolver.session, span, E0433, - "failed to resolve: {}", msg); - err.span_label(span, msg); + "failed to resolve: {}", &label); + err.span_label(span, label); + + if let Some((span, msg, suggestion, applicability)) = suggestion { + err.span_suggestion(span, &msg, suggestion, applicability); + } + err } ResolutionError::CannotCaptureDynamicEnvironmentInFnItem => { @@ -426,11 +434,11 @@ fn resolve_struct_error<'sess, 'a>(resolver: &'sess Resolver, /// Adjust the impl span so that just the `impl` keyword is taken by removing /// everything after `<` (`"impl Iterator for A {}" -> "impl"`) and -/// everything after the first whitespace (`"impl Iterator for A" -> "impl"`) +/// everything after the first whitespace (`"impl Iterator for A" -> "impl"`). /// -/// Attention: The method used is very fragile since it essentially duplicates the work of the +/// *Attention*: the method used is very fragile since it essentially duplicates the work of the /// parser. If you need to use this function or something similar, please consider updating the -/// source_map functions and this function to something more robust. +/// `source_map` functions and this function to something more robust. fn reduce_impl_span_to_impl_keyword(cm: &SourceMap, impl_span: Span) -> Span { let impl_span = cm.span_until_char(impl_span, '<'); let impl_span = cm.span_until_whitespace(impl_span); @@ -551,8 +559,7 @@ impl<'a> PathSource<'a> { Def::Struct(..) | Def::Union(..) | Def::Enum(..) | Def::Trait(..) | Def::TraitAlias(..) | Def::TyAlias(..) | Def::AssociatedTy(..) | Def::PrimTy(..) | Def::TyParam(..) | - Def::SelfTy(..) | Def::Existential(..) | - Def::ForeignTy(..) => true, + Def::SelfTy(..) | Def::Existential(..) | Def::ForeignTy(..) => true, _ => false, }, PathSource::Trait(AliasPossibility::No) => match def { @@ -565,24 +572,20 @@ impl<'a> PathSource<'a> { _ => false, }, PathSource::Expr(..) => match def { - Def::StructCtor(_, CtorKind::Const) | Def::StructCtor(_, CtorKind::Fn) | - Def::VariantCtor(_, CtorKind::Const) | Def::VariantCtor(_, CtorKind::Fn) | + Def::Ctor(_, _, CtorKind::Const) | Def::Ctor(_, _, CtorKind::Fn) | Def::Const(..) | Def::Static(..) | Def::Local(..) | Def::Upvar(..) | Def::Fn(..) | Def::Method(..) | Def::AssociatedConst(..) | - Def::SelfCtor(..) => true, + Def::SelfCtor(..) | Def::ConstParam(..) => true, _ => false, }, PathSource::Pat => match def { - Def::StructCtor(_, CtorKind::Const) | - Def::VariantCtor(_, CtorKind::Const) | + Def::Ctor(_, _, CtorKind::Const) | Def::Const(..) | Def::AssociatedConst(..) | Def::SelfCtor(..) => true, _ => false, }, PathSource::TupleStruct => match def { - Def::StructCtor(_, CtorKind::Fn) | - Def::VariantCtor(_, CtorKind::Fn) | - Def::SelfCtor(..) => true, + Def::Ctor(_, _, CtorKind::Fn) | Def::SelfCtor(..) => true, _ => false, }, PathSource::Struct => match def { @@ -741,8 +744,8 @@ impl<'tcx> Visitor<'tcx> for UsePlacementFinder { } } -/// This thing walks the whole crate in DFS manner, visiting each item, resolving names as it goes. -impl<'a, 'tcx, 'cl> Visitor<'tcx> for Resolver<'a, 'cl> { +/// Walks the whole crate in DFS order, visiting each item, resolving names as it goes. +impl<'a, 'tcx> Visitor<'tcx> for Resolver<'a> { fn visit_item(&mut self, item: &'tcx Item) { self.resolve_item(item); } @@ -753,6 +756,7 @@ impl<'a, 'tcx, 'cl> Visitor<'tcx> for Resolver<'a, 'cl> { self.resolve_block(block); } fn visit_anon_const(&mut self, constant: &'tcx ast::AnonConst) { + debug!("visit_anon_const {:?}", constant); self.with_constant_rib(|this| { visit::walk_anon_const(this, constant); }); @@ -786,15 +790,15 @@ impl<'a, 'tcx, 'cl> Visitor<'tcx> for Resolver<'a, 'cl> { visit::walk_poly_trait_ref(self, tref, m); } fn visit_foreign_item(&mut self, foreign_item: &'tcx ForeignItem) { - let type_parameters = match foreign_item.node { + let generic_params = match foreign_item.node { ForeignItemKind::Fn(_, ref generics) => { - HasTypeParameters(generics, ItemRibKind) + HasGenericParams(generics, ItemRibKind) } - ForeignItemKind::Static(..) => NoTypeParameters, - ForeignItemKind::Ty => NoTypeParameters, - ForeignItemKind::Macro(..) => NoTypeParameters, + ForeignItemKind::Static(..) => NoGenericParams, + ForeignItemKind::Ty => NoGenericParams, + ForeignItemKind::Macro(..) => NoGenericParams, }; - self.with_type_parameter_rib(type_parameters, |this| { + self.with_generic_param_rib(generic_params, |this| { visit::walk_foreign_item(this, foreign_item); }); } @@ -804,11 +808,12 @@ impl<'a, 'tcx, 'cl> Visitor<'tcx> for Resolver<'a, 'cl> { _: Span, node_id: NodeId) { + debug!("(resolving function) entering function"); let (rib_kind, asyncness) = match function_kind { FnKind::ItemFn(_, ref header, ..) => - (ItemRibKind, header.asyncness), + (FnItemRibKind, header.asyncness.node), FnKind::Method(_, ref sig, _, _) => - (TraitOrImplItemRibKind, sig.header.asyncness), + (TraitOrImplItemRibKind, sig.header.asyncness.node), FnKind::Closure(_) => // Async closures aren't resolved through `visit_fn`-- they're // processed separately @@ -860,9 +865,10 @@ impl<'a, 'tcx, 'cl> Visitor<'tcx> for Resolver<'a, 'cl> { self.label_ribs.pop(); self.ribs[ValueNS].pop(); } + fn visit_generics(&mut self, generics: &'tcx Generics) { // For type parameter defaults, we have to ban access - // to following type parameters, as the Substs can only + // to following type parameters, as the InternalSubsts can only // provide previous type parameters as they're built. We // put all the parameters on the ban list and then remove // them one by one as they are processed and become available. @@ -870,6 +876,7 @@ impl<'a, 'tcx, 'cl> Visitor<'tcx> for Resolver<'a, 'cl> { let mut found_default = false; default_ban_rib.bindings.extend(generics.params.iter() .filter_map(|param| match param.kind { + GenericParamKind::Const { .. } | GenericParamKind::Lifetime { .. } => None, GenericParamKind::Type { ref default, .. } => { found_default |= default.is_some(); @@ -898,6 +905,13 @@ impl<'a, 'tcx, 'cl> Visitor<'tcx> for Resolver<'a, 'cl> { // Allow all following defaults to refer to this type parameter. default_ban_rib.bindings.remove(&Ident::with_empty_ctxt(param.ident.name)); } + GenericParamKind::Const { ref ty } => { + for bound in ¶m.bounds { + self.visit_param_bound(bound); + } + + self.visit_ty(ty); + } } } for p in &generics.where_clause.predicates { @@ -907,9 +921,9 @@ impl<'a, 'tcx, 'cl> Visitor<'tcx> for Resolver<'a, 'cl> { } #[derive(Copy, Clone)] -enum TypeParameters<'a, 'b> { - NoTypeParameters, - HasTypeParameters(// Type parameters. +enum GenericParameters<'a, 'b> { + NoGenericParams, + HasGenericParams(// Type parameters. &'b Generics, // The kind of the rib used for type parameters. @@ -923,7 +937,7 @@ enum RibKind<'a> { /// No translation needs to be applied. NormalRibKind, - /// We passed through a closure scope at the given node ID. + /// We passed through a closure scope at the given `NodeId`. /// Translate upvars as appropriate. ClosureRibKind(NodeId /* func id */), @@ -933,6 +947,10 @@ enum RibKind<'a> { /// upvars). TraitOrImplItemRibKind, + /// We passed through a function definition. Disallow upvars. + /// Permit only those const parameters that are specified in the function's generics. + FnItemRibKind, + /// We passed through an item scope. Disallow upvars. ItemRibKind, @@ -951,7 +969,7 @@ enum RibKind<'a> { ForwardTyParamBanRibKind, } -/// One local scope. +/// A single local scope. /// /// A rib represents a scope names can live in. Note that these appear in many places, not just /// around braces. At any place where the list of accessible names (of the given namespace) @@ -1013,7 +1031,7 @@ enum ModuleOrUniformRoot<'a> { CrateRootAndExternPrelude, /// Virtual module that denotes resolution in extern prelude. - /// Used for paths starting with `::` on 2018 edition or `extern::`. + /// Used for paths starting with `::` on 2018 edition. ExternPrelude, /// Virtual module that denotes resolution in current scope. @@ -1022,11 +1040,11 @@ enum ModuleOrUniformRoot<'a> { CurrentScope, } -impl<'a> PartialEq for ModuleOrUniformRoot<'a> { - fn eq(&self, other: &Self) -> bool { - match (*self, *other) { +impl ModuleOrUniformRoot<'_> { + fn same_def(lhs: Self, rhs: Self) -> bool { + match (lhs, rhs) { (ModuleOrUniformRoot::Module(lhs), - ModuleOrUniformRoot::Module(rhs)) => ptr::eq(lhs, rhs), + ModuleOrUniformRoot::Module(rhs)) => lhs.def() == rhs.def(), (ModuleOrUniformRoot::CrateRootAndExternPrelude, ModuleOrUniformRoot::CrateRootAndExternPrelude) | (ModuleOrUniformRoot::ExternPrelude, ModuleOrUniformRoot::ExternPrelude) | @@ -1041,11 +1059,16 @@ enum PathResult<'a> { Module(ModuleOrUniformRoot<'a>), NonModule(PathResolution), Indeterminate, - Failed(Span, String, bool /* is the error from the last segment? */), + Failed { + span: Span, + label: String, + suggestion: Option, + is_error_from_last_segment: bool, + }, } enum ModuleKind { - /// An anonymous module, eg. just a block. + /// An anonymous module; e.g., just a block. /// /// ``` /// fn main() { @@ -1173,10 +1196,6 @@ impl<'a> ModuleData<'a> { } } - fn is_local(&self) -> bool { - self.normal_ancestor_id.is_local() - } - fn nearest_item_scope(&'a self) -> Module<'a> { if self.is_trait() { self.parent.unwrap() } else { self } } @@ -1194,7 +1213,7 @@ impl<'a> ModuleData<'a> { } impl<'a> fmt::Debug for ModuleData<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:?}", self.def()) } } @@ -1203,6 +1222,7 @@ impl<'a> fmt::Debug for ModuleData<'a> { #[derive(Clone, Debug)] pub struct NameBinding<'a> { kind: NameBindingKind<'a>, + ambiguity: Option<(&'a NameBinding<'a>, AmbiguityKind)>, expansion: Mark, span: Span, vis: ty::Visibility, @@ -1227,10 +1247,15 @@ enum NameBindingKind<'a> { directive: &'a ImportDirective<'a>, used: Cell, }, - Ambiguity { - kind: AmbiguityKind, - b1: &'a NameBinding<'a>, - b2: &'a NameBinding<'a>, +} + +impl<'a> NameBindingKind<'a> { + /// Is this a name binding of a import? + fn is_import(&self) -> bool { + match *self { + NameBindingKind::Import { .. } => true, + _ => false, + } } } @@ -1238,18 +1263,17 @@ struct PrivacyError<'a>(Span, Ident, &'a NameBinding<'a>); struct UseError<'a> { err: DiagnosticBuilder<'a>, - /// Attach `use` statements for these candidates + /// Attach `use` statements for these candidates. candidates: Vec, - /// The node id of the module to place the use statements in + /// The `NodeId` of the module to place the use-statements in. node_id: NodeId, - /// Whether the diagnostic should state that it's "better" + /// Whether the diagnostic should state that it's "better". better: bool, } #[derive(Clone, Copy, PartialEq, Debug)] enum AmbiguityKind { Import, - AbsolutePath, BuiltinAttr, DeriveHelper, LegacyHelperVsPrelude, @@ -1265,8 +1289,6 @@ impl AmbiguityKind { match self { AmbiguityKind::Import => "name vs any other name during import resolution", - AmbiguityKind::AbsolutePath => - "name in the crate root vs extern crate during absolute path resolution", AmbiguityKind::BuiltinAttr => "built-in attribute vs any other name", AmbiguityKind::DeriveHelper => @@ -1321,19 +1343,17 @@ impl<'a> NameBinding<'a> { NameBindingKind::Def(def, _) => def, NameBindingKind::Module(module) => module.def().unwrap(), NameBindingKind::Import { binding, .. } => binding.def(), - NameBindingKind::Ambiguity { .. } => Def::Err, } } - fn def_ignoring_ambiguity(&self) -> Def { - match self.kind { - NameBindingKind::Import { binding, .. } => binding.def_ignoring_ambiguity(), - NameBindingKind::Ambiguity { b1, .. } => b1.def_ignoring_ambiguity(), - _ => self.def(), + fn is_ambiguity(&self) -> bool { + self.ambiguity.is_some() || match self.kind { + NameBindingKind::Import { binding, .. } => binding.is_ambiguity(), + _ => false, } } - // We sometimes need to treat variants as `pub` for backwards compatibility + // We sometimes need to treat variants as `pub` for backwards compatibility. fn pseudo_vis(&self) -> ty::Visibility { if self.is_variant() && self.def().def_id().is_local() { ty::Visibility::Public @@ -1345,7 +1365,7 @@ impl<'a> NameBinding<'a> { fn is_variant(&self) -> bool { match self.kind { NameBindingKind::Def(Def::Variant(..), _) | - NameBindingKind::Def(Def::VariantCtor(..), _) => true, + NameBindingKind::Def(Def::Ctor(_, CtorOf::Variant, ..), _) => true, _ => false, } } @@ -1374,7 +1394,6 @@ impl<'a> NameBinding<'a> { fn is_glob_import(&self) -> bool { match self.kind { NameBindingKind::Import { directive, .. } => directive.is_glob(), - NameBindingKind::Ambiguity { b1, .. } => b1.is_glob_import(), _ => false, } } @@ -1394,7 +1413,7 @@ impl<'a> NameBinding<'a> { } fn macro_kind(&self) -> Option { - match self.def_ignoring_ambiguity() { + match self.def() { Def::Macro(_, kind) => Some(kind), Def::NonMacroAttr(..) => Some(MacroKind::Attr), _ => None, @@ -1415,7 +1434,7 @@ impl<'a> NameBinding<'a> { // in some later round and screw up our previously found resolution. // See more detailed explanation in // https://github.com/rust-lang/rust/pull/53778#issuecomment-419224049 - fn may_appear_after(&self, invoc_parent_expansion: Mark, binding: &NameBinding) -> bool { + fn may_appear_after(&self, invoc_parent_expansion: Mark, binding: &NameBinding<'_>) -> bool { // self > max(invoc, binding) => !(self <= invoc || self <= binding) // Expansions are partially ordered, so "may appear after" is an inversion of // "certainly appears before or simultaneously" and includes unordered cases. @@ -1467,7 +1486,7 @@ impl PrimitiveTypeTable { } } -#[derive(Default, Clone)] +#[derive(Debug, Default, Clone)] pub struct ExternPreludeEntry<'a> { extern_crate_item: Option<&'a NameBinding<'a>>, pub introduced_by_item: bool, @@ -1476,7 +1495,7 @@ pub struct ExternPreludeEntry<'a> { /// The main resolver class. /// /// This is the visitor that walks the whole crate. -pub struct Resolver<'a, 'b: 'a> { +pub struct Resolver<'a> { session: &'a Session, cstore: &'a CStore, @@ -1487,7 +1506,7 @@ pub struct Resolver<'a, 'b: 'a> { prelude: Option>, pub extern_prelude: FxHashMap>, - /// n.b. This is used only for better diagnostics, not name resolution itself. + /// N.B., this is used only for better diagnostics, not name resolution itself. has_self: FxHashSet, /// Names of fields of an item `DefId` accessible with dot syntax. @@ -1555,9 +1574,7 @@ pub struct Resolver<'a, 'b: 'a> { extern_module_map: FxHashMap<(DefId, bool /* MacrosOnly? */), Module<'a>>, binding_parent_modules: FxHashMap>, Module<'a>>, - pub make_glob_map: bool, - /// Maps imports to the names of items actually imported (this actually maps - /// all imports, but only glob imports are actually interesting). + /// Maps glob imports to the names of items actually imported. pub glob_map: GlobMap, used_imports: FxHashSet<(NodeId, Namespace)>, @@ -1568,19 +1585,19 @@ pub struct Resolver<'a, 'b: 'a> { /// they are used (in a `break` or `continue` statement) pub unused_labels: FxHashMap, - /// privacy errors are delayed until the end in order to deduplicate them + /// Privacy errors are delayed until the end in order to deduplicate them. privacy_errors: Vec>, - /// ambiguity errors are delayed for deduplication + /// Ambiguity errors are delayed for deduplication. ambiguity_errors: Vec>, - /// `use` injections are delayed for better placement and deduplication + /// `use` injections are delayed for better placement and deduplication. use_injections: Vec>, - /// crate-local macro expanded `macro_export` referred to by a module-relative path + /// Crate-local macro expanded `macro_export` referred to by a module-relative path. macro_expanded_macro_export_errors: BTreeSet<(Span, Span)>, arenas: &'a ResolverArenas<'a>, dummy_binding: &'a NameBinding<'a>, - crate_loader: &'a mut CrateLoader<'b>, + crate_loader: &'a mut CrateLoader<'a>, macro_names: FxHashSet, builtin_macros: FxHashMap>, macro_use_prelude: FxHashMap>, @@ -1588,7 +1605,6 @@ pub struct Resolver<'a, 'b: 'a> { macro_map: FxHashMap>, macro_defs: FxHashMap, local_macro_def_scopes: FxHashMap>, - pub found_unresolved_macro: bool, /// List of crate local macros that we need to warn about as being unused. /// Right now this only includes macro_rules! macros, and macros 2.0. @@ -1602,17 +1618,17 @@ pub struct Resolver<'a, 'b: 'a> { potentially_unused_imports: Vec<&'a ImportDirective<'a>>, - /// This table maps struct IDs into struct constructor IDs, + /// Table for mapping struct IDs into struct constructor IDs, /// it's not used during normal resolution, only for better error reporting. struct_constructors: DefIdMap<(Def, ty::Visibility)>, - /// Only used for better errors on `fn(): fn()` + /// Only used for better errors on `fn(): fn()`. current_type_ascription: Vec, injected_crate: Option>, } -/// Nothing really interesting here, it just provides memory for the rest of the crate. +/// Nothing really interesting here; it just provides memory for the rest of the crate. #[derive(Default)] pub struct ResolverArenas<'a> { modules: arena::TypedArena>, @@ -1632,14 +1648,14 @@ impl<'a> ResolverArenas<'a> { } module } - fn local_modules(&'a self) -> ::std::cell::Ref<'a, Vec>> { + fn local_modules(&'a self) -> std::cell::Ref<'a, Vec>> { self.local_modules.borrow() } fn alloc_name_binding(&'a self, name_binding: NameBinding<'a>) -> &'a NameBinding<'a> { self.name_bindings.alloc(name_binding) } fn alloc_import_directive(&'a self, import_directive: ImportDirective<'a>) - -> &'a ImportDirective { + -> &'a ImportDirective<'_> { self.import_directives.alloc(import_directive) } fn alloc_name_resolution(&'a self) -> &'a RefCell> { @@ -1654,7 +1670,7 @@ impl<'a> ResolverArenas<'a> { } } -impl<'a, 'b: 'a, 'cl: 'b> ty::DefIdTree for &'a Resolver<'b, 'cl> { +impl<'a, 'b: 'a> ty::DefIdTree for &'a Resolver<'b> { fn parent(self, id: DefId) -> Option { match id.krate { LOCAL_CRATE => self.definitions.def_key(id.index).parent, @@ -1665,7 +1681,7 @@ impl<'a, 'b: 'a, 'cl: 'b> ty::DefIdTree for &'a Resolver<'b, 'cl> { /// This interface is used through the AST→HIR step, to embed full paths into the HIR. After that /// the resolver is no longer needed as all the relevant information is inline. -impl<'a, 'cl> hir::lowering::Resolver for Resolver<'a, 'cl> { +impl<'a> hir::lowering::Resolver for Resolver<'a> { fn resolve_hir_path( &mut self, path: &ast::Path, @@ -1682,7 +1698,12 @@ impl<'a, 'cl> hir::lowering::Resolver for Resolver<'a, 'cl> { components: &[&str], is_value: bool ) -> hir::Path { - let segments = iter::once(keywords::PathRoot.ident()) + let root = if crate_root.is_some() { + keywords::PathRoot + } else { + keywords::Crate + }; + let segments = iter::once(root.ident()) .chain( crate_root.into_iter() .chain(components.iter().cloned()) @@ -1711,14 +1732,13 @@ impl<'a, 'cl> hir::lowering::Resolver for Resolver<'a, 'cl> { } } -impl<'a, 'crateloader> Resolver<'a, 'crateloader> { - /// Rustdoc uses this to resolve things in a recoverable way. ResolutionError<'a> +impl<'a> Resolver<'a> { + /// Rustdoc uses this to resolve things in a recoverable way. `ResolutionError<'a>` /// isn't something that can be returned because it can't be made to live that long, /// and also it's a private type. Fortunately rustdoc doesn't need to know the error, /// just that an error occurred. pub fn resolve_str_path_error(&mut self, span: Span, path_str: &str, is_value: bool) -> Result { - use std::iter; let mut errored = false; let path = if path_str.starts_with("::") { @@ -1749,20 +1769,20 @@ impl<'a, 'crateloader> Resolver<'a, 'crateloader> { } } - /// resolve_hir_path, but takes a callback in case there was an error + /// Like `resolve_hir_path`, but takes a callback in case there was an error. fn resolve_hir_path_cb( &mut self, path: &ast::Path, is_value: bool, error_callback: F, ) -> hir::Path - where F: for<'c, 'b> FnOnce(&'c mut Resolver, Span, ResolutionError<'b>) + where F: for<'c, 'b> FnOnce(&'c mut Resolver<'_>, Span, ResolutionError<'b>) { let namespace = if is_value { ValueNS } else { TypeNS }; let span = path.span; let segments = &path.segments; let path = Segment::from_path(&path); - // FIXME (Manishearth): Intra doc links won't get warned of epoch changes + // FIXME(Manishearth): intra-doc links won't get warned of epoch changes. let def = match self.resolve_path_without_parent_scope(&path, Some(namespace), true, span, CrateLint::No) { PathResult::Module(ModuleOrUniformRoot::Module(module)) => @@ -1770,13 +1790,18 @@ impl<'a, 'crateloader> Resolver<'a, 'crateloader> { PathResult::NonModule(path_res) if path_res.unresolved_segments() == 0 => path_res.base_def(), PathResult::NonModule(..) => { - let msg = "type-relative paths are not supported in this context"; - error_callback(self, span, ResolutionError::FailedToResolve(msg)); + error_callback(self, span, ResolutionError::FailedToResolve { + label: String::from("type-relative paths are not supported in this context"), + suggestion: None, + }); Def::Err } PathResult::Module(..) | PathResult::Indeterminate => unreachable!(), - PathResult::Failed(span, msg, _) => { - error_callback(self, span, ResolutionError::FailedToResolve(&msg)); + PathResult::Failed { span, label, suggestion, .. } => { + error_callback(self, span, ResolutionError::FailedToResolve { + label, + suggestion, + }); Def::Err } }; @@ -1800,15 +1825,14 @@ impl<'a, 'crateloader> Resolver<'a, 'crateloader> { } } -impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { +impl<'a> Resolver<'a> { pub fn new(session: &'a Session, cstore: &'a CStore, krate: &Crate, crate_name: &str, - make_glob_map: MakeGlobMap, - crate_loader: &'a mut CrateLoader<'crateloader>, + crate_loader: &'a mut CrateLoader<'a>, arenas: &'a ResolverArenas<'a>) - -> Resolver<'a, 'crateloader> { + -> Resolver<'a> { let root_def_id = DefId::local(CRATE_DEF_INDEX); let root_module_kind = ModuleKind::Def(Def::Mod(root_def_id), keywords::Invalid.name()); let graph_root = arenas.alloc_module(ModuleData { @@ -1822,7 +1846,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { DefCollector::new(&mut definitions, Mark::root()) .collect_root(crate_name, session.local_crate_disambiguator()); - let mut extern_prelude: FxHashMap = + let mut extern_prelude: FxHashMap> = session.opts.externs.iter().map(|kv| (Ident::from_str(kv.0), Default::default())) .collect(); @@ -1889,7 +1913,6 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { extern_module_map: FxHashMap::default(), binding_parent_modules: FxHashMap::default(), - make_glob_map: make_glob_map == MakeGlobMap::Yes, glob_map: Default::default(), used_imports: FxHashSet::default(), @@ -1906,6 +1929,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { arenas, dummy_binding: arenas.alloc_name_binding(NameBinding { kind: NameBindingKind::Def(Def::Err, false), + ambiguity: None, expansion: Mark::root(), span: DUMMY_SP, vis: ty::Visibility::Public, @@ -1923,7 +1947,6 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { name_already_seen: FxHashMap::default(), potentially_unused_imports: Vec::new(), struct_constructors: Default::default(), - found_unresolved_macro: false, unused_macros: FxHashSet::default(), current_type_ascription: Vec::new(), injected_crate: None, @@ -1977,39 +2000,37 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { fn record_use(&mut self, ident: Ident, ns: Namespace, used_binding: &'a NameBinding<'a>, is_lexical_scope: bool) { - match used_binding.kind { - NameBindingKind::Import { directive, binding, ref used } if !used.get() => { - // Avoid marking `extern crate` items that refer to a name from extern prelude, - // but not introduce it, as used if they are accessed from lexical scope. - if is_lexical_scope { - if let Some(entry) = self.extern_prelude.get(&ident.modern()) { - if let Some(crate_item) = entry.extern_crate_item { - if ptr::eq(used_binding, crate_item) && !entry.introduced_by_item { - return; - } + if let Some((b2, kind)) = used_binding.ambiguity { + self.ambiguity_errors.push(AmbiguityError { + kind, ident, b1: used_binding, b2, + misc1: AmbiguityErrorMisc::None, + misc2: AmbiguityErrorMisc::None, + }); + } + if let NameBindingKind::Import { directive, binding, ref used } = used_binding.kind { + // Avoid marking `extern crate` items that refer to a name from extern prelude, + // but not introduce it, as used if they are accessed from lexical scope. + if is_lexical_scope { + if let Some(entry) = self.extern_prelude.get(&ident.modern()) { + if let Some(crate_item) = entry.extern_crate_item { + if ptr::eq(used_binding, crate_item) && !entry.introduced_by_item { + return; } } } - used.set(true); - directive.used.set(true); - self.used_imports.insert((directive.id, ns)); - self.add_to_glob_map(directive.id, ident); - self.record_use(ident, ns, binding, false); - } - NameBindingKind::Ambiguity { kind, b1, b2 } => { - self.ambiguity_errors.push(AmbiguityError { - kind, ident, b1, b2, - misc1: AmbiguityErrorMisc::None, - misc2: AmbiguityErrorMisc::None, - }); } - _ => {} + used.set(true); + directive.used.set(true); + self.used_imports.insert((directive.id, ns)); + self.add_to_glob_map(&directive, ident); + self.record_use(ident, ns, binding, false); } } - fn add_to_glob_map(&mut self, id: NodeId, ident: Ident) { - if self.make_glob_map { - self.glob_map.entry(id).or_default().insert(ident.name); + #[inline] + fn add_to_glob_map(&mut self, directive: &ImportDirective<'_>, ident: Ident) { + if directive.is_glob() { + self.glob_map.entry(directive.id).or_default().insert(ident.name); } } @@ -2036,22 +2057,24 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { record_used_id: Option, path_span: Span) -> Option> { - let record_used = record_used_id.is_some(); assert!(ns == TypeNS || ns == ValueNS); - if ns == TypeNS { - ident.span = if ident.name == keywords::SelfUpper.name() { - // FIXME(jseyfried) improve `Self` hygiene - ident.span.with_ctxt(SyntaxContext::empty()) - } else { - ident.span.modern() - } + if ident.name == keywords::Invalid.name() { + return Some(LexicalScopeBinding::Def(Def::Err)); + } + ident.span = if ident.name == keywords::SelfUpper.name() { + // FIXME(jseyfried) improve `Self` hygiene + ident.span.with_ctxt(SyntaxContext::empty()) + } else if ns == TypeNS { + ident.span.modern() } else { - ident = ident.modern_and_legacy(); - } + ident.span.modern_and_legacy() + }; // Walk backwards up the ribs in scope. + let record_used = record_used_id.is_some(); let mut module = self.graph_root; for i in (0 .. self.ribs[ns].len()).rev() { + debug!("walk rib\n{:?}", self.ribs[ns][i].bindings); if let Some(def) = self.ribs[ns][i].bindings.get(&ident).cloned() { // The ident resolves to a type parameter or local variable. return Some(LexicalScopeBinding::Def( @@ -2320,7 +2343,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { // implementations thus found, for compatibility with old resolve pass. pub fn with_scope(&mut self, id: NodeId, f: F) -> T - where F: FnOnce(&mut Resolver) -> T + where F: FnOnce(&mut Resolver<'_>) -> T { let id = self.definitions.local_def_id(id); let module = self.module_map.get(&id).cloned(); // clones a reference @@ -2342,12 +2365,12 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { } } - /// Searches the current set of local scopes for labels. Returns the first non-None label that + /// Searches the current set of local scopes for labels. Returns the first non-`None` label that /// is returned by the given predicate function /// /// Stops after meeting a closure. fn search_label(&self, mut ident: Ident, pred: P) -> Option - where P: Fn(&Rib, Ident) -> Option + where P: Fn(&Rib<'_>, Ident) -> Option { for rib in self.label_ribs.iter().rev() { match rib.kind { @@ -2373,8 +2396,9 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { } fn resolve_adt(&mut self, item: &Item, generics: &Generics) { + debug!("resolve_adt"); self.with_current_self_item(item, |this| { - this.with_type_parameter_rib(HasTypeParameters(generics, ItemRibKind), |this| { + this.with_generic_param_rib(HasGenericParams(generics, ItemRibKind), |this| { let item_def_id = this.definitions.local_def_id(item.id); this.with_self_rib(Def::SelfTy(None, Some(item_def_id)), |this| { visit::walk_item(this, item); @@ -2395,11 +2419,27 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { ast::UseTreeKind::Simple(..) if segments.len() == 1 => &[TypeNS, ValueNS][..], _ => &[TypeNS], }; + let report_error = |this: &Self, ns| { + let what = if ns == TypeNS { "type parameters" } else { "local variables" }; + this.session.span_err(ident.span, &format!("imports cannot refer to {}", what)); + }; + for &ns in nss { - if let Some(LexicalScopeBinding::Def(..)) = - self.resolve_ident_in_lexical_scope(ident, ns, None, use_tree.prefix.span) { - let what = if ns == TypeNS { "type parameters" } else { "local variables" }; - self.session.span_err(ident.span, &format!("imports cannot refer to {}", what)); + match self.resolve_ident_in_lexical_scope(ident, ns, None, use_tree.prefix.span) { + Some(LexicalScopeBinding::Def(..)) => { + report_error(self, ns); + } + Some(LexicalScopeBinding::Item(binding)) => { + let orig_blacklisted_binding = + mem::replace(&mut self.blacklisted_binding, Some(binding)); + if let Some(LexicalScopeBinding::Def(..)) = + self.resolve_ident_in_lexical_scope(ident, ns, None, + use_tree.prefix.span) { + report_error(self, ns); + } + self.blacklisted_binding = orig_blacklisted_binding; + } + None => {} } } } else if let ast::UseTreeKind::Nested(use_trees) = &use_tree.kind { @@ -2411,13 +2451,13 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { fn resolve_item(&mut self, item: &Item) { let name = item.ident.name; - debug!("(resolving item) resolving {}", name); + debug!("(resolving item) resolving {} ({:?})", name, item.node); match item.node { ItemKind::Ty(_, ref generics) | ItemKind::Fn(_, _, ref generics, _) | ItemKind::Existential(_, ref generics) => { - self.with_type_parameter_rib(HasTypeParameters(generics, ItemRibKind), + self.with_generic_param_rib(HasGenericParams(generics, ItemRibKind), |this| visit::walk_item(this, item)); } @@ -2436,16 +2476,16 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { ItemKind::Trait(.., ref generics, ref bounds, ref trait_items) => { // Create a new rib for the trait-wide type parameters. - self.with_type_parameter_rib(HasTypeParameters(generics, ItemRibKind), |this| { + self.with_generic_param_rib(HasGenericParams(generics, ItemRibKind), |this| { let local_def_id = this.definitions.local_def_id(item.id); this.with_self_rib(Def::SelfTy(Some(local_def_id), None), |this| { this.visit_generics(generics); walk_list!(this, visit_param_bound, bounds); for trait_item in trait_items { - let type_parameters = HasTypeParameters(&trait_item.generics, + let generic_params = HasGenericParams(&trait_item.generics, TraitOrImplItemRibKind); - this.with_type_parameter_rib(type_parameters, |this| { + this.with_generic_param_rib(generic_params, |this| { match trait_item.node { TraitItemKind::Const(ref ty, ref default) => { this.visit_ty(ty); @@ -2477,7 +2517,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { ItemKind::TraitAlias(ref generics, ref bounds) => { // Create a new rib for the trait-wide type parameters. - self.with_type_parameter_rib(HasTypeParameters(generics, ItemRibKind), |this| { + self.with_generic_param_rib(HasGenericParams(generics, ItemRibKind), |this| { let local_def_id = this.definitions.local_def_id(item.id); this.with_self_rib(Def::SelfTy(Some(local_def_id), None), |this| { this.visit_generics(generics); @@ -2494,6 +2534,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { ItemKind::Static(ref ty, _, ref expr) | ItemKind::Const(ref ty, ref expr) => { + debug!("resolve_item ItemKind::Const"); self.with_item_rib(|this| { this.visit_ty(ty); this.with_constant_rib(|this| { @@ -2515,23 +2556,25 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { } } - fn with_type_parameter_rib<'b, F>(&'b mut self, type_parameters: TypeParameters<'a, 'b>, f: F) - where F: FnOnce(&mut Resolver) + fn with_generic_param_rib<'b, F>(&'b mut self, generic_params: GenericParameters<'a, 'b>, f: F) + where F: FnOnce(&mut Resolver<'_>) { - match type_parameters { - HasTypeParameters(generics, rib_kind) => { + debug!("with_generic_param_rib"); + match generic_params { + HasGenericParams(generics, rib_kind) => { let mut function_type_rib = Rib::new(rib_kind); + let mut function_value_rib = Rib::new(rib_kind); let mut seen_bindings = FxHashMap::default(); for param in &generics.params { match param.kind { GenericParamKind::Lifetime { .. } => {} GenericParamKind::Type { .. } => { let ident = param.ident.modern(); - debug!("with_type_parameter_rib: {}", param.id); + debug!("with_generic_param_rib: {}", param.id); if seen_bindings.contains_key(&ident) { let span = seen_bindings.get(&ident).unwrap(); - let err = ResolutionError::NameAlreadyUsedInTypeParameterList( + let err = ResolutionError::NameAlreadyUsedInParameterList( ident.name, span, ); @@ -2544,25 +2587,45 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { function_type_rib.bindings.insert(ident, def); self.record_def(param.id, PathResolution::new(def)); } + GenericParamKind::Const { .. } => { + let ident = param.ident.modern(); + debug!("with_generic_param_rib: {}", param.id); + + if seen_bindings.contains_key(&ident) { + let span = seen_bindings.get(&ident).unwrap(); + let err = ResolutionError::NameAlreadyUsedInParameterList( + ident.name, + span, + ); + resolve_error(self, param.ident.span, err); + } + seen_bindings.entry(ident).or_insert(param.ident.span); + + let def = Def::ConstParam(self.definitions.local_def_id(param.id)); + function_value_rib.bindings.insert(ident, def); + self.record_def(param.id, PathResolution::new(def)); + } } } + self.ribs[ValueNS].push(function_value_rib); self.ribs[TypeNS].push(function_type_rib); } - NoTypeParameters => { + NoGenericParams => { // Nothing to do. } } f(self); - if let HasTypeParameters(..) = type_parameters { + if let HasGenericParams(..) = generic_params { self.ribs[TypeNS].pop(); + self.ribs[ValueNS].pop(); } } fn with_label_rib(&mut self, f: F) - where F: FnOnce(&mut Resolver) + where F: FnOnce(&mut Resolver<'_>) { self.label_ribs.push(Rib::new(NormalRibKind)); f(self); @@ -2570,7 +2633,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { } fn with_item_rib(&mut self, f: F) - where F: FnOnce(&mut Resolver) + where F: FnOnce(&mut Resolver<'_>) { self.ribs[ValueNS].push(Rib::new(ItemRibKind)); self.ribs[TypeNS].push(Rib::new(ItemRibKind)); @@ -2580,8 +2643,9 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { } fn with_constant_rib(&mut self, f: F) - where F: FnOnce(&mut Resolver) + where F: FnOnce(&mut Resolver<'_>) { + debug!("with_constant_rib"); self.ribs[ValueNS].push(Rib::new(ConstantItemRibKind)); self.label_ribs.push(Rib::new(ConstantItemRibKind)); f(self); @@ -2590,7 +2654,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { } fn with_current_self_type(&mut self, self_type: &Ty, f: F) -> T - where F: FnOnce(&mut Resolver) -> T + where F: FnOnce(&mut Resolver<'_>) -> T { // Handle nested impls (inside fn bodies) let previous_value = replace(&mut self.current_self_type, Some(self_type.clone())); @@ -2600,7 +2664,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { } fn with_current_self_item(&mut self, self_item: &Item, f: F) -> T - where F: FnOnce(&mut Resolver) -> T + where F: FnOnce(&mut Resolver<'_>) -> T { let previous_value = replace(&mut self.current_self_item, Some(self_item.id)); let result = f(self); @@ -2608,9 +2672,9 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { result } - /// This is called to resolve a trait reference from an `impl` (i.e., `impl Trait for Foo`) + /// This is called to resolve a trait reference from an `impl` (i.e., `impl Trait for Foo`). fn with_optional_trait_ref(&mut self, opt_trait_ref: Option<&TraitRef>, f: F) -> T - where F: FnOnce(&mut Resolver, Option) -> T + where F: FnOnce(&mut Resolver<'_>, Option) -> T { let mut new_val = None; let mut new_id = None; @@ -2647,11 +2711,11 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { } fn with_self_rib(&mut self, self_def: Def, f: F) - where F: FnOnce(&mut Resolver) + where F: FnOnce(&mut Resolver<'_>) { let mut self_type_rib = Rib::new(NormalRibKind); - // plain insert (no renaming, types are not currently hygienic....) + // Plain insert (no renaming, since types are not currently hygienic) self_type_rib.bindings.insert(keywords::SelfUpper.ident(), self_def); self.ribs[TypeNS].push(self_type_rib); f(self); @@ -2659,7 +2723,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { } fn with_self_struct_ctor_rib(&mut self, impl_id: DefId, f: F) - where F: FnOnce(&mut Resolver) + where F: FnOnce(&mut Resolver<'_>) { let self_def = Def::SelfCtor(impl_id); let mut self_type_rib = Rib::new(NormalRibKind); @@ -2675,8 +2739,9 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { self_type: &Ty, item_id: NodeId, impl_items: &[ImplItem]) { + debug!("resolve_implementation"); // If applicable, create a rib for the type parameters. - self.with_type_parameter_rib(HasTypeParameters(generics, ItemRibKind), |this| { + self.with_generic_param_rib(HasGenericParams(generics, ItemRibKind), |this| { // Dummy self type for better errors if `Self` is used in the trait path. this.with_self_rib(Def::SelfTy(None, None), |this| { // Resolve the trait reference, if necessary. @@ -2689,30 +2754,37 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { } // Resolve the self type. this.visit_ty(self_type); - // Resolve the type parameters. + // Resolve the generic parameters. this.visit_generics(generics); // Resolve the items within the impl. this.with_current_self_type(self_type, |this| { this.with_self_struct_ctor_rib(item_def_id, |this| { + debug!("resolve_implementation with_self_struct_ctor_rib"); for impl_item in impl_items { this.resolve_visibility(&impl_item.vis); // We also need a new scope for the impl item type parameters. - let type_parameters = HasTypeParameters(&impl_item.generics, - TraitOrImplItemRibKind); - this.with_type_parameter_rib(type_parameters, |this| { + let generic_params = HasGenericParams(&impl_item.generics, + TraitOrImplItemRibKind); + this.with_generic_param_rib(generic_params, |this| { use self::ResolutionError::*; match impl_item.node { ImplItemKind::Const(..) => { + debug!( + "resolve_implementation ImplItemKind::Const", + ); // If this is a trait impl, ensure the const // exists in trait - this.check_trait_item(impl_item.ident, - ValueNS, - impl_item.span, - |n, s| ConstNotMemberOfTrait(n, s)); - this.with_constant_rib(|this| - visit::walk_impl_item(this, impl_item) + this.check_trait_item( + impl_item.ident, + ValueNS, + impl_item.span, + |n, s| ConstNotMemberOfTrait(n, s), ); + + this.with_constant_rib(|this| { + visit::walk_impl_item(this, impl_item) + }); } ImplItemKind::Method(..) => { // If this is a trait impl, ensure the method @@ -2760,7 +2832,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { } fn check_trait_item(&mut self, ident: Ident, ns: Namespace, span: Span, err: F) - where F: FnOnce(Name, &str) -> ResolutionError + where F: FnOnce(Name, &str) -> ResolutionError<'_> { // If there is a TraitRef in scope for an impl, then the method must be in the // trait. @@ -3022,16 +3094,14 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { let is_syntactic_ambiguity = opt_pat.is_none() && bmode == BindingMode::ByValue(Mutability::Immutable); match def { - Def::StructCtor(_, CtorKind::Const) | - Def::VariantCtor(_, CtorKind::Const) | + Def::Ctor(_, _, CtorKind::Const) | Def::Const(..) if is_syntactic_ambiguity => { // Disambiguate in favor of a unit struct/variant // or constant pattern. self.record_use(ident, ValueNS, binding.unwrap(), false); Some(PathResolution::new(def)) } - Def::StructCtor(..) | Def::VariantCtor(..) | - Def::Const(..) | Def::Static(..) => { + Def::Ctor(..) | Def::Const(..) | Def::Static(..) => { // This is unambiguously a fresh binding, either syntactically // (e.g., `IDENT @ PAT` or `ref IDENT`) or because `IDENT` resolves // to something unusable as a pattern (e.g., constructor function), @@ -3091,7 +3161,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { id: NodeId, qself: Option<&QSelf>, path: &Path, - source: PathSource) + source: PathSource<'_>) -> PathResolution { self.smart_resolve_path_with_crate_lint(id, qself, path, source, CrateLint::SimplePath(id)) } @@ -3101,7 +3171,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { /// sometimes needed for the lint that recommends rewriting /// absolute paths to `crate`, so that it knows how to frame the /// suggestion. If you are just resolving a path like `foo::bar` - /// that appears...somewhere, though, then you just want + /// that appears in an arbitrary location, then you just want /// `CrateLint::SimplePath`, which is what `smart_resolve_path` /// already provides. fn smart_resolve_path_with_crate_lint( @@ -3109,7 +3179,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { id: NodeId, qself: Option<&QSelf>, path: &Path, - source: PathSource, + source: PathSource<'_>, crate_lint: CrateLint ) -> PathResolution { self.smart_resolve_path_fragment( @@ -3127,309 +3197,14 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { qself: Option<&QSelf>, path: &[Segment], span: Span, - source: PathSource, + source: PathSource<'_>, crate_lint: CrateLint) -> PathResolution { - let ident_span = path.last().map_or(span, |ident| ident.ident.span); let ns = source.namespace(); let is_expected = &|def| source.is_expected(def); - let is_enum_variant = &|def| if let Def::Variant(..) = def { true } else { false }; - // Base error is amended with one short label and possibly some longer helps/notes. let report_errors = |this: &mut Self, def: Option| { - // Make the base error. - let expected = source.descr_expected(); - let path_str = Segment::names_to_string(path); - let item_str = path.last().unwrap().ident; - let code = source.error_code(def.is_some()); - let (base_msg, fallback_label, base_span) = if let Some(def) = def { - (format!("expected {}, found {} `{}`", expected, def.kind_name(), path_str), - format!("not a {}", expected), - span) - } else { - let item_span = path.last().unwrap().ident.span; - let (mod_prefix, mod_str) = if path.len() == 1 { - (String::new(), "this scope".to_string()) - } else if path.len() == 2 && path[0].ident.name == keywords::PathRoot.name() { - (String::new(), "the crate root".to_string()) - } else { - let mod_path = &path[..path.len() - 1]; - let mod_prefix = match this.resolve_path_without_parent_scope( - mod_path, Some(TypeNS), false, span, CrateLint::No - ) { - PathResult::Module(ModuleOrUniformRoot::Module(module)) => - module.def(), - _ => None, - }.map_or(String::new(), |def| format!("{} ", def.kind_name())); - (mod_prefix, format!("`{}`", Segment::names_to_string(mod_path))) - }; - (format!("cannot find {} `{}` in {}{}", expected, item_str, mod_prefix, mod_str), - format!("not found in {}", mod_str), - item_span) - }; - - let code = DiagnosticId::Error(code.into()); - let mut err = this.session.struct_span_err_with_code(base_span, &base_msg, code); - - // Emit help message for fake-self from other languages like `this`(javascript) - if ["this", "my"].contains(&&*item_str.as_str()) - && this.self_value_is_available(path[0].ident.span, span) { - err.span_suggestion_with_applicability( - span, - "did you mean", - "self".to_string(), - Applicability::MaybeIncorrect, - ); - } - - // Emit special messages for unresolved `Self` and `self`. - if is_self_type(path, ns) { - __diagnostic_used!(E0411); - err.code(DiagnosticId::Error("E0411".into())); - err.span_label(span, format!("`Self` is only available in impls, traits, \ - and type definitions")); - return (err, Vec::new()); - } - if is_self_value(path, ns) { - debug!("smart_resolve_path_fragment E0424 source:{:?}", source); - - __diagnostic_used!(E0424); - err.code(DiagnosticId::Error("E0424".into())); - err.span_label(span, match source { - PathSource::Pat => { - format!("`self` value is a keyword \ - and may not be bound to \ - variables or shadowed") - } - _ => { - format!("`self` value is a keyword \ - only available in methods \ - with `self` parameter") - } - }); - return (err, Vec::new()); - } - - // Try to lookup the name in more relaxed fashion for better error reporting. - let ident = path.last().unwrap().ident; - let candidates = this.lookup_import_candidates(ident, ns, is_expected); - if candidates.is_empty() && is_expected(Def::Enum(DefId::local(CRATE_DEF_INDEX))) { - let enum_candidates = - this.lookup_import_candidates(ident, ns, is_enum_variant); - let mut enum_candidates = enum_candidates.iter() - .map(|suggestion| import_candidate_to_paths(&suggestion)).collect::>(); - enum_candidates.sort(); - for (sp, variant_path, enum_path) in enum_candidates { - if sp.is_dummy() { - let msg = format!("there is an enum variant `{}`, \ - try using `{}`?", - variant_path, - enum_path); - err.help(&msg); - } else { - err.span_suggestion_with_applicability( - span, - "you can try using the variant's enum", - enum_path, - Applicability::MachineApplicable, - ); - } - } - } - if path.len() == 1 && this.self_type_is_available(span) { - if let Some(candidate) = this.lookup_assoc_candidate(ident, ns, is_expected) { - let self_is_available = this.self_value_is_available(path[0].ident.span, span); - match candidate { - AssocSuggestion::Field => { - err.span_suggestion_with_applicability( - span, - "try", - format!("self.{}", path_str), - Applicability::MachineApplicable, - ); - if !self_is_available { - err.span_label(span, format!("`self` value is a keyword \ - only available in \ - methods with `self` parameter")); - } - } - AssocSuggestion::MethodWithSelf if self_is_available => { - err.span_suggestion_with_applicability( - span, - "try", - format!("self.{}", path_str), - Applicability::MachineApplicable, - ); - } - AssocSuggestion::MethodWithSelf | AssocSuggestion::AssocItem => { - err.span_suggestion_with_applicability( - span, - "try", - format!("Self::{}", path_str), - Applicability::MachineApplicable, - ); - } - } - return (err, candidates); - } - } - - let mut levenshtein_worked = false; - - // Try Levenshtein algorithm. - if let Some(candidate) = this.lookup_typo_candidate(path, ns, is_expected, span) { - err.span_label(ident_span, format!("did you mean `{}`?", candidate)); - levenshtein_worked = true; - } - - // Try context dependent help if relaxed lookup didn't work. - if let Some(def) = def { - match (def, source) { - (Def::Macro(..), _) => { - err.span_label(span, format!("did you mean `{}!(...)`?", path_str)); - return (err, candidates); - } - (Def::TyAlias(..), PathSource::Trait(_)) => { - err.span_label(span, "type aliases cannot be used as traits"); - if nightly_options::is_nightly_build() { - err.note("did you mean to use a trait alias?"); - } - return (err, candidates); - } - (Def::Mod(..), PathSource::Expr(Some(parent))) => match parent.node { - ExprKind::Field(_, ident) => { - err.span_label(parent.span, format!("did you mean `{}::{}`?", - path_str, ident)); - return (err, candidates); - } - ExprKind::MethodCall(ref segment, ..) => { - err.span_label(parent.span, format!("did you mean `{}::{}(...)`?", - path_str, segment.ident)); - return (err, candidates); - } - _ => {} - }, - (Def::Enum(..), PathSource::TupleStruct) - | (Def::Enum(..), PathSource::Expr(..)) => { - if let Some(variants) = this.collect_enum_variants(def) { - err.note(&format!("did you mean to use one \ - of the following variants?\n{}", - variants.iter() - .map(|suggestion| path_names_to_string(suggestion)) - .map(|suggestion| format!("- `{}`", suggestion)) - .collect::>() - .join("\n"))); - - } else { - err.note("did you mean to use one of the enum's variants?"); - } - return (err, candidates); - }, - (Def::Struct(def_id), _) if ns == ValueNS => { - if let Some((ctor_def, ctor_vis)) - = this.struct_constructors.get(&def_id).cloned() { - let accessible_ctor = this.is_accessible(ctor_vis); - if is_expected(ctor_def) && !accessible_ctor { - err.span_label(span, format!("constructor is not visible \ - here due to private fields")); - } - } else { - // HACK(estebank): find a better way to figure out that this was a - // parser issue where a struct literal is being used on an expression - // where a brace being opened means a block is being started. Look - // ahead for the next text to see if `span` is followed by a `{`. - let sm = this.session.source_map(); - let mut sp = span; - loop { - sp = sm.next_point(sp); - match sm.span_to_snippet(sp) { - Ok(ref snippet) => { - if snippet.chars().any(|c| { !c.is_whitespace() }) { - break; - } - } - _ => break, - } - } - let followed_by_brace = match sm.span_to_snippet(sp) { - Ok(ref snippet) if snippet == "{" => true, - _ => false, - }; - match source { - PathSource::Expr(Some(parent)) => { - match parent.node { - ExprKind::MethodCall(ref path_assignment, _) => { - err.span_suggestion_with_applicability( - sm.start_point(parent.span) - .to(path_assignment.ident.span), - "use `::` to access an associated function", - format!("{}::{}", - path_str, - path_assignment.ident), - Applicability::MaybeIncorrect - ); - return (err, candidates); - }, - _ => { - err.span_label( - span, - format!("did you mean `{} {{ /* fields */ }}`?", - path_str), - ); - return (err, candidates); - }, - } - }, - PathSource::Expr(None) if followed_by_brace == true => { - err.span_label( - span, - format!("did you mean `({} {{ /* fields */ }})`?", - path_str), - ); - return (err, candidates); - }, - _ => { - err.span_label( - span, - format!("did you mean `{} {{ /* fields */ }}`?", - path_str), - ); - return (err, candidates); - }, - } - } - return (err, candidates); - } - (Def::Union(..), _) | - (Def::Variant(..), _) | - (Def::VariantCtor(_, CtorKind::Fictive), _) if ns == ValueNS => { - err.span_label(span, format!("did you mean `{} {{ /* fields */ }}`?", - path_str)); - return (err, candidates); - } - (Def::SelfTy(..), _) if ns == ValueNS => { - err.span_label(span, fallback_label); - err.note("can't use `Self` as a constructor, you must use the \ - implemented struct"); - return (err, candidates); - } - (Def::TyAlias(_), _) | (Def::AssociatedTy(..), _) if ns == ValueNS => { - err.note("can't use a type alias as a constructor"); - return (err, candidates); - } - _ => {} - } - } - - // Fallback label. - if !levenshtein_worked { - err.span_label(base_span, fallback_label); - this.type_ascription_suggestion(&mut err, base_span); - } - (err, candidates) - }; - let report_errors = |this: &mut Self, def: Option| { - let (err, candidates) = report_errors(this, def); + let (err, candidates) = this.smart_resolve_report_errors(path, span, source, def); let def_id = this.current_module.normal_ancestor_id; let node_id = this.definitions.as_local_node_id(def_id).unwrap(); let better = def.is_some(); @@ -3492,31 +3267,82 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { resolution } - fn type_ascription_suggestion(&self, - err: &mut DiagnosticBuilder, - base_span: Span) { + /// Only used in a specific case of type ascription suggestions + #[doc(hidden)] + fn get_colon_suggestion_span(&self, start: Span) -> Span { + let cm = self.session.source_map(); + start.to(cm.next_point(start)) + } + + fn type_ascription_suggestion( + &self, + err: &mut DiagnosticBuilder<'_>, + base_span: Span, + ) { debug!("type_ascription_suggetion {:?}", base_span); let cm = self.session.source_map(); + let base_snippet = cm.span_to_snippet(base_span); debug!("self.current_type_ascription {:?}", self.current_type_ascription); if let Some(sp) = self.current_type_ascription.last() { let mut sp = *sp; - loop { // try to find the `:`, bail on first non-':'/non-whitespace + loop { + // Try to find the `:`; bail on first non-':' / non-whitespace. sp = cm.next_point(sp); if let Ok(snippet) = cm.span_to_snippet(sp.to(cm.next_point(sp))) { - debug!("snippet {:?}", snippet); let line_sp = cm.lookup_char_pos(sp.hi()).line; let line_base_sp = cm.lookup_char_pos(base_span.lo()).line; - debug!("{:?} {:?}", line_sp, line_base_sp); if snippet == ":" { - err.span_label(base_span, - "expecting a type here because of type ascription"); + let mut show_label = true; if line_sp != line_base_sp { - err.span_suggestion_short_with_applicability( + err.span_suggestion_short( sp, "did you mean to use `;` here instead?", ";".to_string(), Applicability::MaybeIncorrect, ); + } else { + let colon_sp = self.get_colon_suggestion_span(sp); + let after_colon_sp = self.get_colon_suggestion_span( + colon_sp.shrink_to_hi(), + ); + if !cm.span_to_snippet(after_colon_sp).map(|s| s == " ") + .unwrap_or(false) + { + err.span_suggestion( + colon_sp, + "maybe you meant to write a path separator here", + "::".to_string(), + Applicability::MaybeIncorrect, + ); + show_label = false; + } + if let Ok(base_snippet) = base_snippet { + let mut sp = after_colon_sp; + for _ in 0..100 { + // Try to find an assignment + sp = cm.next_point(sp); + let snippet = cm.span_to_snippet(sp.to(cm.next_point(sp))); + match snippet { + Ok(ref x) if x.as_str() == "=" => { + err.span_suggestion( + base_span, + "maybe you meant to write an assignment here", + format!("let {}", base_snippet), + Applicability::MaybeIncorrect, + ); + show_label = false; + break; + } + Ok(ref x) if x.as_str() == "\n" => break, + Err(_) => break, + Ok(_) => {} + } + } + } + } + if show_label { + err.span_label(base_span, + "expecting a type here because of type ascription"); } break; } else if !snippet.trim().is_empty() { @@ -3671,7 +3497,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { // Such behavior is required for backward compatibility. // The same fallback is used when `a` resolves to nothing. PathResult::Module(ModuleOrUniformRoot::Module(_)) | - PathResult::Failed(..) + PathResult::Failed { .. } if (ns == TypeNS || path.len() > 1) && self.primitive_type_table.primitive_types .contains_key(&path[0].ident.name) => { @@ -3680,11 +3506,11 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { } PathResult::Module(ModuleOrUniformRoot::Module(module)) => PathResolution::new(module.def().unwrap()), - PathResult::Failed(span, msg, false) => { - resolve_error(self, span, ResolutionError::FailedToResolve(&msg)); + PathResult::Failed { is_error_from_last_segment: false, span, label, suggestion } => { + resolve_error(self, span, ResolutionError::FailedToResolve { label, suggestion }); err_path_resolution() } - PathResult::Module(..) | PathResult::Failed(..) => return None, + PathResult::Module(..) | PathResult::Failed { .. } => return None, PathResult::Indeterminate => bug!("indetermined path result in resolve_qpath"), }; @@ -3792,7 +3618,12 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { } } let msg = "there are too many initial `super`s.".to_string(); - return PathResult::Failed(ident.span, msg, false); + return PathResult::Failed { + span: ident.span, + label: msg, + suggestion: None, + is_error_from_last_segment: false, + }; } if i == 0 { if name == keywords::SelfLower.name() { @@ -3801,8 +3632,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { self.resolve_self(&mut ctxt, self.current_module))); continue; } - if name == keywords::Extern.name() || - name == keywords::PathRoot.name() && ident.span.rust_2018() { + if name == keywords::PathRoot.name() && ident.span.rust_2018() { module = Some(ModuleOrUniformRoot::ExternPrelude); continue; } @@ -3830,12 +3660,17 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { } else { format!("`{}`", name) }; - let msg = if i == 1 && path[0].ident.name == keywords::PathRoot.name() { + let label = if i == 1 && path[0].ident.name == keywords::PathRoot.name() { format!("global paths cannot start with {}", name_str) } else { format!("{} in paths can only be used in start position", name_str) }; - return PathResult::Failed(ident.span, msg, false); + return PathResult::Failed { + span: ident.span, + label, + suggestion: None, + is_error_from_last_segment: false, + }; } let binding = if let Some(module) = module { @@ -3874,6 +3709,13 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { module = Some(ModuleOrUniformRoot::Module(next_module)); record_segment_def(self, def); } else if def == Def::ToolMod && i + 1 != path.len() { + if binding.is_import() { + self.session.struct_span_err( + ident.span, "cannot use a tool module through an import" + ).span_note( + binding.span, "the tool module imported here" + ).emit(); + } let def = Def::NonMacroAttr(NonMacroAttrKind::Tool); return PathResult::NonModule(PathResolution::new(def)); } else if def == Def::Err { @@ -3889,9 +3731,19 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { def, path.len() - i - 1 )); } else { - return PathResult::Failed(ident.span, - format!("not a module `{}`", ident), - is_last); + let label = format!( + "`{}` is {} {}, not a module", + ident, + def.article(), + def.kind_name(), + ); + + return PathResult::Failed { + span: ident.span, + label, + suggestion: None, + is_error_from_last_segment: is_last, + }; } } Err(Undetermined) => return PathResult::Indeterminate, @@ -3907,7 +3759,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { Some(ModuleOrUniformRoot::Module(module)) => module.def(), _ => None, }; - let msg = if module_def == self.graph_root.def() { + let (label, suggestion) = if module_def == self.graph_root.def() { let is_mod = |def| match def { Def::Mod(..) => true, _ => false }; let mut candidates = self.lookup_import_candidates(ident, TypeNS, is_mod); @@ -3915,16 +3767,32 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { (c.path.segments.len(), c.path.to_string()) }); if let Some(candidate) = candidates.get(0) { - format!("did you mean `{}`?", candidate.path) + ( + String::from("unresolved import"), + Some(( + ident.span, + String::from("a similar path exists"), + candidate.path.to_string(), + Applicability::MaybeIncorrect, + )), + ) + } else if !ident.is_reserved() { + (format!("maybe a missing `extern crate {};`?", ident), None) } else { - format!("maybe a missing `extern crate {};`?", ident) + // the parser will already have complained about the keyword being used + return PathResult::NonModule(err_path_resolution()); } } else if i == 0 { - format!("use of undeclared type or module `{}`", ident) + (format!("use of undeclared type or module `{}`", ident), None) } else { - format!("could not find `{}` in `{}`", ident, path[i - 1].ident) + (format!("could not find `{}` in `{}`", ident, path[i - 1].ident), None) + }; + return PathResult::Failed { + span: ident.span, + label, + suggestion, + is_error_from_last_segment: is_last, }; - return PathResult::Failed(ident.span, msg, is_last); } } } @@ -3943,7 +3811,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { crate_lint: CrateLint, path: &[Segment], path_span: Span, - second_binding: Option<&NameBinding>, + second_binding: Option<&NameBinding<'_>>, ) { let (diag_id, diag_span) = match crate_lint { CrateLint::No => return, @@ -3959,8 +3827,8 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { }; // We're only interested in `use` paths which should start with - // `{{root}}` or `extern` currently. - if first_name != keywords::Extern.name() && first_name != keywords::PathRoot.name() { + // `{{root}}` currently. + if first_name != keywords::PathRoot.name() { return } @@ -4005,6 +3873,7 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { mut def: Def, record_used: bool, span: Span) -> Def { + debug!("adjust_local_def"); let ribs = &self.ribs[ns][rib_index + 1..]; // An invalid forward use of a type parameter from a previous default. @@ -4021,6 +3890,9 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { span_bug!(span, "unexpected {:?} in bindings", def) } Def::Local(node_id) => { + use ResolutionError::*; + let mut res_err = None; + for rib in ribs { match rib.kind { NormalRibKind | ModuleRibKind(..) | MacroDefinition(..) | @@ -4051,26 +3923,31 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { seen.insert(node_id, depth); } } - ItemRibKind | TraitOrImplItemRibKind => { + ItemRibKind | FnItemRibKind | TraitOrImplItemRibKind => { // This was an attempt to access an upvar inside a // named function item. This is not allowed, so we // report an error. if record_used { - resolve_error(self, span, - ResolutionError::CannotCaptureDynamicEnvironmentInFnItem); + // We don't immediately trigger a resolve error, because + // we want certain other resolution errors (namely those + // emitted for `ConstantItemRibKind` below) to take + // precedence. + res_err = Some(CannotCaptureDynamicEnvironmentInFnItem); } - return Def::Err; } ConstantItemRibKind => { // Still doesn't deal with upvars if record_used { - resolve_error(self, span, - ResolutionError::AttemptToUseNonConstantValueInConstant); + resolve_error(self, span, AttemptToUseNonConstantValueInConstant); } return Def::Err; } } } + if let Some(res_err) = res_err { + resolve_error(self, span, res_err); + return Def::Err; + } } Def::TyParam(..) | Def::SelfTy(..) => { for rib in ribs { @@ -4080,18 +3957,42 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { ConstantItemRibKind => { // Nothing to do. Continue. } - ItemRibKind => { - // This was an attempt to use a type parameter outside - // its scope. + ItemRibKind | FnItemRibKind => { + // This was an attempt to use a type parameter outside its scope. if record_used { - resolve_error(self, span, - ResolutionError::TypeParametersFromOuterFunction(def)); + resolve_error( + self, + span, + ResolutionError::GenericParamsFromOuterFunction(def), + ); } return Def::Err; } } } } + Def::ConstParam(..) => { + let mut ribs = ribs.iter().peekable(); + if let Some(Rib { kind: FnItemRibKind, .. }) = ribs.peek() { + // When declaring const parameters inside function signatures, the first rib + // is always a `FnItemRibKind`. In this case, we can skip it, to avoid it + // (spuriously) conflicting with the const param. + ribs.next(); + } + for rib in ribs { + if let ItemRibKind | FnItemRibKind = rib.kind { + // This was an attempt to use a const parameter outside its scope. + if record_used { + resolve_error( + self, + span, + ResolutionError::GenericParamsFromOuterFunction(def), + ); + } + return Def::Err; + } + } + } _ => {} } def @@ -4159,19 +4060,25 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { None } - fn lookup_typo_candidate(&mut self, - path: &[Segment], - ns: Namespace, - filter_fn: FilterFn, - span: Span) - -> Option - where FilterFn: Fn(Def) -> bool + fn lookup_typo_candidate( + &mut self, + path: &[Segment], + ns: Namespace, + filter_fn: FilterFn, + span: Span, + ) -> Option + where + FilterFn: Fn(Def) -> bool, { - let add_module_candidates = |module: Module, names: &mut Vec| { + let add_module_candidates = |module: Module<'_>, names: &mut Vec| { for (&(ident, _), resolution) in module.resolutions.borrow().iter() { if let Some(binding) = resolution.borrow().binding { if filter_fn(binding.def()) { - names.push(ident.name); + names.push(TypoSuggestion { + candidate: ident.name, + article: binding.def().article(), + kind: binding.def().kind_name(), + }); } } } @@ -4185,7 +4092,11 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { // Locals and type parameters for (ident, def) in &rib.bindings { if filter_fn(*def) { - names.push(ident.name); + names.push(TypoSuggestion { + candidate: ident.name, + article: def.article(), + kind: def.kind_name(), + }); } } // Items in scope @@ -4198,7 +4109,27 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { } else { // Items from the prelude if !module.no_implicit_prelude { - names.extend(self.extern_prelude.iter().map(|(ident, _)| ident.name)); + names.extend(self.extern_prelude.clone().iter().flat_map(|(ident, _)| { + self.crate_loader + .maybe_process_path_extern(ident.name, ident.span) + .and_then(|crate_id| { + let crate_mod = Def::Mod(DefId { + krate: crate_id, + index: CRATE_DEF_INDEX, + }); + + if filter_fn(crate_mod) { + Some(TypoSuggestion { + candidate: ident.name, + article: "a", + kind: "crate", + }) + } else { + None + } + }) + })); + if let Some(prelude) = self.prelude { add_module_candidates(prelude, &mut names); } @@ -4210,7 +4141,13 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { // Add primitive types to the mix if filter_fn(Def::PrimTy(Bool)) { names.extend( - self.primitive_type_table.primitive_types.iter().map(|(name, _)| name) + self.primitive_type_table.primitive_types.iter().map(|(name, _)| { + TypoSuggestion { + candidate: *name, + article: "a", + kind: "primitive type", + } + }) ) } } else { @@ -4227,15 +4164,22 @@ impl<'a, 'crateloader: 'a> Resolver<'a, 'crateloader> { let name = path[path.len() - 1].ident.name; // Make sure error reporting is deterministic. - names.sort_by_cached_key(|name| name.as_str()); - match find_best_match_for_name(names.iter(), &name.as_str(), None) { - Some(found) if found != name => Some(found), + names.sort_by_cached_key(|suggestion| suggestion.candidate.as_str()); + + match find_best_match_for_name( + names.iter().map(|suggestion| &suggestion.candidate), + &name.as_str(), + None, + ) { + Some(found) if found != name => names + .into_iter() + .find(|suggestion| suggestion.candidate == found), _ => None, } } fn with_resolved_label(&mut self, label: Option
SomeStruct { + // fn foo(...) + // } + // + // Here we can have a path like + // `a::b::SomeStruct::::foo::`, in which case parameters + // may appear in two places. The penultimate segment, + // `SomeStruct::`, contains parameters in TypeSpace, and the + // final segment, `foo::` contains parameters in fn space. + // + // 5. Reference to a local variable + // + // Local variables can't have any type parameters. + // + // The first step then is to categorize the segments appropriately. + + let tcx = self.tcx(); + + assert!(!segments.is_empty()); + let last = segments.len() - 1; + + let mut path_segs = vec![]; + + match def { + // Case 1. Reference to a struct constructor. + Def::Ctor(def_id, CtorOf::Struct, ..) | + Def::SelfCtor(.., def_id) => { + // Everything but the final segment should have no + // parameters at all. + let generics = tcx.generics_of(def_id); + // Variant and struct constructors use the + // generics of their parent type definition. + let generics_def_id = generics.parent.unwrap_or(def_id); + path_segs.push(PathSeg(generics_def_id, last)); + } + + // Case 2. Reference to a variant constructor. + Def::Ctor(def_id, CtorOf::Variant, ..) | Def::Variant(def_id, ..) => { + let adt_def = self_ty.map(|t| t.ty_adt_def().unwrap()); + let (generics_def_id, index) = if let Some(adt_def) = adt_def { + debug_assert!(adt_def.is_enum()); + (adt_def.did, last) + } else if last >= 1 && segments[last - 1].args.is_some() { + // Everything but the penultimate segment should have no + // parameters at all. + let mut def_id = def_id; + + // `Def::Ctor` -> `Def::Variant` + if let Def::Ctor(..) = def { + def_id = tcx.parent(def_id).unwrap() + } + + // `Def::Variant` -> `Def::Item` (enum) + let enum_def_id = tcx.parent(def_id).unwrap(); + (enum_def_id, last - 1) + } else { + // FIXME: lint here recommending `Enum::<...>::Variant` form + // instead of `Enum::Variant::<...>` form. + + // Everything but the final segment should have no + // parameters at all. + let generics = tcx.generics_of(def_id); + // Variant and struct constructors use the + // generics of their parent type definition. + (generics.parent.unwrap_or(def_id), last) + }; + path_segs.push(PathSeg(generics_def_id, index)); + } + + // Case 3. Reference to a top-level value. + Def::Fn(def_id) | + Def::Const(def_id) | + Def::ConstParam(def_id) | + Def::Static(def_id, _) => { + path_segs.push(PathSeg(def_id, last)); + } + + // Case 4. Reference to a method or associated const. + Def::Method(def_id) | + Def::AssociatedConst(def_id) => { + if segments.len() >= 2 { + let generics = tcx.generics_of(def_id); + path_segs.push(PathSeg(generics.parent.unwrap(), last - 1)); + } + path_segs.push(PathSeg(def_id, last)); + } + + // Case 5. Local variable, no generics. + Def::Local(..) | Def::Upvar(..) => {} + + _ => bug!("unexpected definition: {:?}", def), + } + + debug!("path_segs = {:?}", path_segs); + + path_segs + } + // Check a type `Path` and convert it to a `Ty`. pub fn def_to_ty(&self, opt_self_ty: Option>, @@ -1469,46 +1701,57 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx> + 'o { self.prohibit_generics(path.segments.split_last().unwrap().1); self.ast_path_to_ty(span, did, path.segments.last().unwrap()) } - Def::Variant(did) if permit_variants => { + Def::Variant(_) if permit_variants => { // Convert "variant type" as if it were a real type. // The resulting `Ty` is type of the variant's enum for now. assert_eq!(opt_self_ty, None); - self.prohibit_generics(path.segments.split_last().unwrap().1); - self.ast_path_to_ty(span, - tcx.parent_def_id(did).unwrap(), - path.segments.last().unwrap()) + + let path_segs = self.def_ids_for_path_segments(&path.segments, None, path.def); + let generic_segs: FxHashSet<_> = + path_segs.iter().map(|PathSeg(_, index)| index).collect(); + self.prohibit_generics(path.segments.iter().enumerate().filter_map(|(index, seg)| { + if !generic_segs.contains(&index) { + Some(seg) + } else { + None + } + })); + + let PathSeg(def_id, index) = path_segs.last().unwrap(); + self.ast_path_to_ty(span, *def_id, &path.segments[*index]) } Def::TyParam(did) => { assert_eq!(opt_self_ty, None); self.prohibit_generics(&path.segments); - let node_id = tcx.hir().as_local_node_id(did).unwrap(); - let item_id = tcx.hir().get_parent_node(node_id); - let item_def_id = tcx.hir().local_def_id(item_id); + let hir_id = tcx.hir().as_local_hir_id(did).unwrap(); + let item_id = tcx.hir().get_parent_node_by_hir_id(hir_id); + let item_def_id = tcx.hir().local_def_id_from_hir_id(item_id); let generics = tcx.generics_of(item_def_id); - let index = generics.param_def_id_to_index[&tcx.hir().local_def_id(node_id)]; - tcx.mk_ty_param(index, tcx.hir().name(node_id).as_interned_str()) + let index = generics.param_def_id_to_index[ + &tcx.hir().local_def_id_from_hir_id(hir_id)]; + tcx.mk_ty_param(index, tcx.hir().name_by_hir_id(hir_id).as_interned_str()) } Def::SelfTy(_, Some(def_id)) => { - // `Self` in impl (we know the concrete type) - + // `Self` in impl (we know the concrete type). assert_eq!(opt_self_ty, None); self.prohibit_generics(&path.segments); - - tcx.at(span).type_of(def_id) + // Try to evaluate any array length constants + self.normalize_ty(span, tcx.at(span).type_of(def_id)) } Def::SelfTy(Some(_), None) => { - // `Self` in trait + // `Self` in trait. assert_eq!(opt_self_ty, None); self.prohibit_generics(&path.segments); tcx.mk_self_type() } Def::AssociatedTy(def_id) => { - self.prohibit_generics(&path.segments[..path.segments.len()-2]); + debug_assert!(path.segments.len() >= 2); + self.prohibit_generics(&path.segments[..path.segments.len() - 2]); self.qpath_to_ty(span, opt_self_ty, def_id, - &path.segments[path.segments.len()-2], + &path.segments[path.segments.len() - 2], path.segments.last().unwrap()) } Def::PrimTy(prim_ty) => { @@ -1535,7 +1778,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx> + 'o { /// internal notion of a type. pub fn ast_ty_to_ty(&self, ast_ty: &hir::Ty) -> Ty<'tcx> { debug!("ast_ty_to_ty(id={:?}, ast_ty={:?} ty_ty={:?})", - ast_ty.id, ast_ty, ast_ty.node); + ast_ty.hir_id, ast_ty, ast_ty.node); let tcx = self.tcx(); @@ -1562,7 +1805,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx> + 'o { tcx.mk_tup(fields.iter().map(|t| self.ast_ty_to_ty(&t))) } hir::TyKind::BareFn(ref bf) => { - require_c_abi_if_variadic(tcx, &bf.decl, bf.abi, ast_ty.span); + require_c_abi_if_c_variadic(tcx, &bf.decl, bf.abi, ast_ty.span); tcx.mk_fn_ptr(self.ty_of_fn(bf.unsafety, bf.abi, &bf.decl)) } hir::TyKind::TraitObject(ref bounds, ref lifetime) => { @@ -1576,7 +1819,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx> + 'o { self.def_to_ty(opt_self_ty, path, false) } hir::TyKind::Def(item_id, ref lifetimes) => { - let did = tcx.hir().local_def_id(item_id.id); + let did = tcx.hir().local_def_id_from_hir_id(item_id.id); self.impl_trait_ty_to_ty(did, lifetimes) }, hir::TyKind::Path(hir::QPath::TypeRelative(ref qself, ref segment)) => { @@ -1588,12 +1831,10 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx> + 'o { } else { Def::Err }; - self.associated_path_def_to_ty(ast_ty.id, ast_ty.span, ty, def, segment).0 + self.associated_path_to_ty(ast_ty.hir_id, ast_ty.span, ty, def, segment, false).0 } hir::TyKind::Array(ref ty, ref length) => { - let length_def_id = tcx.hir().local_def_id(length.id); - let substs = Substs::identity_for_item(tcx, length_def_id); - let length = ty::Const::unevaluated(tcx, length_def_id, substs, tcx.types.usize); + let length = self.ast_const_to_const(length, tcx.types.usize); let array_ty = tcx.mk_ty(ty::Array(self.ast_ty_to_ty(&ty), length)); self.normalize_ty(ast_ty.span, array_ty) } @@ -1615,12 +1856,57 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx> + 'o { hir::TyKind::Err => { tcx.types.err } + hir::TyKind::CVarArgs(lt) => { + let va_list_did = match tcx.lang_items().va_list() { + Some(did) => did, + None => span_bug!(ast_ty.span, + "`va_list` lang item required for variadics"), + }; + let region = self.ast_region_to_region(<, None); + tcx.type_of(va_list_did).subst(tcx, &[region.into()]) + } }; self.record_ty(ast_ty.hir_id, result_ty, ast_ty.span); result_ty } + pub fn ast_const_to_const( + &self, + ast_const: &hir::AnonConst, + ty: Ty<'tcx> + ) -> &'tcx ty::Const<'tcx> { + debug!("ast_const_to_const(id={:?}, ast_const={:?})", ast_const.hir_id, ast_const); + + let tcx = self.tcx(); + let def_id = tcx.hir().local_def_id_from_hir_id(ast_const.hir_id); + + let mut const_ = ty::Const { + val: ConstValue::Unevaluated( + def_id, + InternalSubsts::identity_for_item(tcx, def_id), + ), + ty, + }; + + let expr = &tcx.hir().body(ast_const.body).value; + if let ExprKind::Path(ref qpath) = expr.node { + if let hir::QPath::Resolved(_, ref path) = qpath { + if let Def::ConstParam(def_id) = path.def { + let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); + let item_id = tcx.hir().get_parent_node(node_id); + let item_def_id = tcx.hir().local_def_id(item_id); + let generics = tcx.generics_of(item_def_id); + let index = generics.param_def_id_to_index[&tcx.hir().local_def_id(node_id)]; + let name = tcx.hir().name(node_id).as_interned_str(); + const_.val = ConstValue::Param(ty::ParamConst::new(index, name)); + } + } + }; + + tcx.mk_const(const_) + } + pub fn impl_trait_ty_to_ty( &self, def_id: DefId, @@ -1632,7 +1918,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx> + 'o { let generics = tcx.generics_of(def_id); debug!("impl_trait_ty_to_ty: generics={:?}", generics); - let substs = Substs::for_item(tcx, def_id, |param, _| { + let substs = InternalSubsts::for_item(tcx, def_id, |param, _| { if let Some(i) = (param.index as usize).checked_sub(generics.parent_count) { // Our own parameters are the resolved lifetimes. match param.kind { @@ -1697,7 +1983,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> dyn AstConv<'gcx, 'tcx> + 'o { let bare_fn_ty = ty::Binder::bind(tcx.mk_fn_sig( input_tys, output_ty, - decl.variadic, + decl.c_variadic, unsafety, abi )); diff --git a/src/librustc_typeck/check/_match.rs b/src/librustc_typeck/check/_match.rs index 87ee903cf4302..032821e6d42f2 100644 --- a/src/librustc_typeck/check/_match.rs +++ b/src/librustc_typeck/check/_match.rs @@ -1,15 +1,7 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use check::{FnCtxt, Expectation, Diverges, Needs}; -use check::coercion::CoerceMany; +use crate::check::{FnCtxt, Expectation, Diverges, Needs}; +use crate::check::coercion::CoerceMany; +use crate::util::nodemap::FxHashMap; +use errors::Applicability; use rustc::hir::{self, PatKind}; use rustc::hir::def::{Def, CtorKind}; use rustc::hir::pat_util::EnumerateAndAdjustIterator; @@ -22,27 +14,40 @@ use syntax::source_map::Spanned; use syntax::ptr::P; use syntax::util::lev_distance::find_best_match_for_name; use syntax_pos::Span; -use util::nodemap::FxHashMap; use std::collections::hash_map::Entry::{Occupied, Vacant}; use std::cmp; +use super::report_unexpected_variant_def; + impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { - /// The `is_arg` argument indicates whether this pattern is the - /// *outermost* pattern in an argument (e.g., in `fn foo(&x: - /// &u32)`, it is true for the `&x` pattern but not `x`). This is - /// used to tailor error reporting. + /// `match_discrim_span` argument having a `Span` indicates that this pattern is part of + /// a match expression arm guard, and it points to the match discriminant to add context + /// in type errors. In the folloowing example, `match_discrim_span` corresponds to the + /// `a + b` expression: + /// + /// ```text + /// error[E0308]: mismatched types + /// --> src/main.rs:5:9 + /// | + /// 4 | let temp: usize = match a + b { + /// | ----- this expression has type `usize` + /// 5 | Ok(num) => num, + /// | ^^^^^^^ expected usize, found enum `std::result::Result` + /// | + /// = note: expected type `usize` + /// found type `std::result::Result<_, _>` + /// ``` pub fn check_pat_walk( &self, pat: &'gcx hir::Pat, mut expected: Ty<'tcx>, mut def_bm: ty::BindingMode, - is_arg: bool) - { + match_discrim_span: Option, + ) { let tcx = self.tcx; - debug!("check_pat_walk(pat={:?},expected={:?},def_bm={:?},is_arg={})", - pat, expected, def_bm, is_arg); + debug!("check_pat_walk(pat={:?},expected={:?},def_bm={:?})", pat, expected, def_bm); let is_non_ref_pat = match pat.node { PatKind::Struct(..) | @@ -59,7 +64,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } PatKind::Path(ref qpath) => { - let (def, _, _) = self.resolve_ty_and_def_ufcs(qpath, pat.id, pat.span); + let (def, _, _) = self.resolve_ty_and_def_ufcs(qpath, pat.hir_id, pat.span); match def { Def::Const(..) | Def::AssociatedConst(..) => false, _ => true, @@ -83,7 +88,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // See the examples in `run-pass/match-defbm*.rs`. let mut pat_adjustments = vec![]; while let ty::Ref(_, inner_ty, inner_mutability) = exp_ty.sty { - debug!("inspecting {:?} with type {:?}", exp_ty, exp_ty.sty); + debug!("inspecting {:?}", exp_ty); debug!("current discriminant is Ref, inserting implicit deref"); // Preserve the reference type. We'll need it later during HAIR lowering. @@ -178,7 +183,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let rhs_ty = self.check_expr(end); // Check that both end-points are of numeric or char type. - let numeric_or_char = |ty: Ty| ty.is_numeric() || ty.is_char(); + let numeric_or_char = |ty: Ty<'_>| ty.is_numeric() || ty.is_char(); let lhs_compat = numeric_or_char(lhs_ty); let rhs_compat = numeric_or_char(rhs_ty); @@ -218,8 +223,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let common_type = self.resolve_type_vars_if_possible(&lhs_ty); // subtyping doesn't matter here, as the value is some kind of scalar - self.demand_eqtype(pat.span, expected, lhs_ty); - self.demand_eqtype(pat.span, expected, rhs_ty); + self.demand_eqtype_pat(pat.span, expected, lhs_ty, match_discrim_span); + self.demand_eqtype_pat(pat.span, expected, rhs_ty, match_discrim_span); common_type } PatKind::Binding(ba, var_id, _, ref sub) => { @@ -234,7 +239,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { .pat_binding_modes_mut() .insert(pat.hir_id, bm); debug!("check_pat_walk: pat.hir_id={:?} bm={:?}", pat.hir_id, bm); - let local_ty = self.local_ty(pat.span, pat.id).decl_ty; + let local_ty = self.local_ty(pat.span, pat.hir_id).decl_ty; match bm { ty::BindByReference(mutbl) => { // if the binding is like @@ -248,42 +253,50 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // `x` is assigned a value of type `&M T`, hence `&M T <: typeof(x)` is // required. However, we use equality, which is stronger. See (*) for // an explanation. - self.demand_eqtype(pat.span, region_ty, local_ty); + self.demand_eqtype_pat(pat.span, region_ty, local_ty, match_discrim_span); } // otherwise the type of x is the expected type T ty::BindByValue(_) => { // As above, `T <: typeof(x)` is required but we // use equality, see (*) below. - self.demand_eqtype(pat.span, expected, local_ty); + self.demand_eqtype_pat(pat.span, expected, local_ty, match_discrim_span); } } // if there are multiple arms, make sure they all agree on // what the type of the binding `x` ought to be - if var_id != pat.id { + if var_id != pat.hir_id { let vt = self.local_ty(pat.span, var_id).decl_ty; - self.demand_eqtype(pat.span, vt, local_ty); + self.demand_eqtype_pat(pat.span, vt, local_ty, match_discrim_span); } if let Some(ref p) = *sub { - self.check_pat_walk(&p, expected, def_bm, true); + self.check_pat_walk(&p, expected, def_bm, match_discrim_span); } local_ty } PatKind::TupleStruct(ref qpath, ref subpats, ddpos) => { - self.check_pat_tuple_struct(pat, qpath, &subpats, ddpos, expected, def_bm) + self.check_pat_tuple_struct( + pat, + qpath, + &subpats, + ddpos, + expected, + def_bm, + match_discrim_span, + ) } PatKind::Path(ref qpath) => { self.check_pat_path(pat, qpath, expected) } PatKind::Struct(ref qpath, ref fields, etc) => { - self.check_pat_struct(pat, qpath, fields, etc, expected, def_bm) + self.check_pat_struct(pat, qpath, fields, etc, expected, def_bm, match_discrim_span) } PatKind::Tuple(ref elements, ddpos) => { let mut expected_len = elements.len(); if ddpos.is_some() { - // Require known type only when `..` is present + // Require known type only when `..` is present. if let ty::Tuple(ref tys) = self.structurally_resolved_type(pat.span, expected).sty { expected_len = tys.len(); @@ -292,8 +305,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let max_len = cmp::max(expected_len, elements.len()); let element_tys_iter = (0..max_len).map(|_| self.next_ty_var( - // FIXME: MiscVariable for now, obtaining the span and name information - // from all tuple elements isn't trivial. + // FIXME: `MiscVariable` for now -- obtaining the span and name information + // from all tuple elements isn't trivial. TypeVariableOrigin::TypeInference(pat.span))); let element_tys = tcx.mk_type_list(element_tys_iter); let pat_ty = tcx.mk_ty(ty::Tuple(element_tys)); @@ -303,12 +316,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // further errors being emitted when using the bindings. #50333 let element_tys_iter = (0..max_len).map(|_| tcx.types.err); for (_, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) { - self.check_pat_walk(elem, &tcx.types.err, def_bm, true); + self.check_pat_walk(elem, &tcx.types.err, def_bm, match_discrim_span); } tcx.mk_tup(element_tys_iter) } else { for (i, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) { - self.check_pat_walk(elem, &element_tys[i], def_bm, true); + self.check_pat_walk(elem, &element_tys[i], def_bm, match_discrim_span); } pat_ty } @@ -321,11 +334,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Here, `demand::subtype` is good enough, but I don't // think any errors can be introduced by using // `demand::eqtype`. - self.demand_eqtype(pat.span, expected, uniq_ty); - self.check_pat_walk(&inner, inner_ty, def_bm, true); + self.demand_eqtype_pat(pat.span, expected, uniq_ty, match_discrim_span); + self.check_pat_walk(&inner, inner_ty, def_bm, match_discrim_span); uniq_ty } else { - self.check_pat_walk(&inner, tcx.types.err, def_bm, true); + self.check_pat_walk(&inner, tcx.types.err, def_bm, match_discrim_span); tcx.types.err } } @@ -357,15 +370,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Look for a case like `fn foo(&foo: u32)` and suggest // `fn foo(foo: &u32)` if let Some(mut err) = err { - if is_arg { - if let PatKind::Binding(..) = inner.node { - if let Ok(snippet) = tcx.sess.source_map() - .span_to_snippet(pat.span) - { - err.help(&format!("did you mean `{}: &{}`?", - &snippet[1..], - expected)); - } + if let PatKind::Binding(..) = inner.node { + if let Ok(snippet) = tcx.sess.source_map() + .span_to_snippet(pat.span) + { + err.help(&format!("did you mean `{}: &{}`?", + &snippet[1..], + expected)); } } err.emit(); @@ -374,10 +385,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } }; - self.check_pat_walk(&inner, inner_ty, def_bm, true); + self.check_pat_walk(&inner, inner_ty, def_bm, match_discrim_span); rptr_ty } else { - self.check_pat_walk(&inner, tcx.types.err, def_bm, true); + self.check_pat_walk(&inner, tcx.types.err, def_bm, match_discrim_span); tcx.types.err } } @@ -435,13 +446,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }; for elt in before { - self.check_pat_walk(&elt, inner_ty, def_bm, true); + self.check_pat_walk(&elt, inner_ty, def_bm, match_discrim_span); } if let Some(ref slice) = *slice { - self.check_pat_walk(&slice, slice_ty, def_bm, true); + self.check_pat_walk(&slice, slice_ty, def_bm, match_discrim_span); } for elt in after { - self.check_pat_walk(&elt, inner_ty, def_bm, true); + self.check_pat_walk(&elt, inner_ty, def_bm, match_discrim_span); } expected_ty } @@ -532,12 +543,14 @@ https://doc.rust-lang.org/reference/types.html#trait-objects"); true } - pub fn check_match(&self, - expr: &'gcx hir::Expr, - discrim: &'gcx hir::Expr, - arms: &'gcx [hir::Arm], - expected: Expectation<'tcx>, - match_src: hir::MatchSource) -> Ty<'tcx> { + pub fn check_match( + &self, + expr: &'gcx hir::Expr, + discrim: &'gcx hir::Expr, + arms: &'gcx [hir::Arm], + expected: Expectation<'tcx>, + match_src: hir::MatchSource, + ) -> Ty<'tcx> { let tcx = self.tcx; // Not entirely obvious: if matches may create ref bindings, we want to @@ -617,7 +630,7 @@ https://doc.rust-lang.org/reference/types.html#trait-objects"); if self.diverges.get().always() { for arm in arms { - self.warn_if_unreachable(arm.body.id, arm.body.span, "arm"); + self.warn_if_unreachable(arm.body.hir_id, arm.body.span, "arm"); } } @@ -632,8 +645,12 @@ https://doc.rust-lang.org/reference/types.html#trait-objects"); let mut all_pats_diverge = Diverges::WarnedAlways; for p in &arm.pats { self.diverges.set(Diverges::Maybe); - self.check_pat_walk(&p, discrim_ty, - ty::BindingMode::BindByValue(hir::Mutability::MutImmutable), true); + self.check_pat_walk( + &p, + discrim_ty, + ty::BindingMode::BindByValue(hir::Mutability::MutImmutable), + Some(discrim.span), + ); all_pats_diverge &= self.diverges.get(); } @@ -672,6 +689,8 @@ https://doc.rust-lang.org/reference/types.html#trait-objects"); CoerceMany::with_coercion_sites(coerce_first, arms) }; + let mut other_arms = vec![]; // used only for diagnostics + let mut prior_arm_ty = None; for (i, (arm, pats_diverge)) in arms.iter().zip(all_arm_pats_diverge).enumerate() { if let Some(ref g) = arm.guard { self.diverges.set(pats_diverge); @@ -692,17 +711,37 @@ https://doc.rust-lang.org/reference/types.html#trait-objects"); _ => false }; + let arm_span = if let hir::ExprKind::Block(ref blk, _) = arm.body.node { + // Point at the block expr instead of the entire block + blk.expr.as_ref().map(|e| e.span).unwrap_or(arm.body.span) + } else { + arm.body.span + }; if is_if_let_fallback { let cause = self.cause(expr.span, ObligationCauseCode::IfExpressionWithNoElse); assert!(arm_ty.is_unit()); coercion.coerce_forced_unit(self, &cause, &mut |_| (), true); } else { - let cause = self.cause(expr.span, ObligationCauseCode::MatchExpressionArm { - arm_span: arm.body.span, - source: match_src - }); + let cause = if i == 0 { + // The reason for the first arm to fail is not that the match arms diverge, + // but rather that there's a prior obligation that doesn't hold. + self.cause(arm_span, ObligationCauseCode::BlockTailExpression(arm.body.hir_id)) + } else { + self.cause(expr.span, ObligationCauseCode::MatchExpressionArm { + arm_span, + source: match_src, + prior_arms: other_arms.clone(), + last_ty: prior_arm_ty.unwrap(), + discrim_hir_id: discrim.hir_id, + }) + }; coercion.coerce(self, &cause, &arm.body, arm_ty); } + other_arms.push(arm_span); + if other_arms.len() > 5 { + other_arms.remove(0); + } + prior_arm_ty = Some(arm_ty); } // We won't diverge unless the discriminant or all arms diverge. @@ -711,84 +750,94 @@ https://doc.rust-lang.org/reference/types.html#trait-objects"); coercion.complete(self) } - fn check_pat_struct(&self, - pat: &'gcx hir::Pat, - qpath: &hir::QPath, - fields: &'gcx [Spanned], - etc: bool, - expected: Ty<'tcx>, - def_bm: ty::BindingMode) -> Ty<'tcx> + fn check_pat_struct( + &self, + pat: &'gcx hir::Pat, + qpath: &hir::QPath, + fields: &'gcx [Spanned], + etc: bool, + expected: Ty<'tcx>, + def_bm: ty::BindingMode, + match_discrim_span: Option, + ) -> Ty<'tcx> { // Resolve the path and check the definition for errors. - let (variant, pat_ty) = if let Some(variant_ty) = self.check_struct_path(qpath, pat.id) { + let (variant, pat_ty) = if let Some(variant_ty) = self.check_struct_path(qpath, pat.hir_id) + { variant_ty } else { for field in fields { - self.check_pat_walk(&field.node.pat, self.tcx.types.err, def_bm, true); + self.check_pat_walk( + &field.node.pat, + self.tcx.types.err, + def_bm, + match_discrim_span, + ); } return self.tcx.types.err; }; // Type-check the path. - self.demand_eqtype(pat.span, expected, pat_ty); + self.demand_eqtype_pat(pat.span, expected, pat_ty, match_discrim_span); // Type-check subpatterns. - if self.check_struct_pat_fields(pat_ty, pat.id, pat.span, variant, fields, etc, def_bm) { + if self.check_struct_pat_fields(pat_ty, pat.hir_id, pat.span, variant, fields, etc, def_bm) + { pat_ty } else { self.tcx.types.err } } - fn check_pat_path(&self, - pat: &hir::Pat, - qpath: &hir::QPath, - expected: Ty<'tcx>) -> Ty<'tcx> - { + fn check_pat_path( + &self, + pat: &hir::Pat, + qpath: &hir::QPath, + expected: Ty<'tcx>, + ) -> Ty<'tcx> { let tcx = self.tcx; - let report_unexpected_def = |def: Def| { - span_err!(tcx.sess, pat.span, E0533, - "expected unit struct/variant or constant, found {} `{}`", - def.kind_name(), - hir::print::to_string(tcx.hir(), |s| s.print_qpath(qpath, false))); - }; // Resolve the path and check the definition for errors. - let (def, opt_ty, segments) = self.resolve_ty_and_def_ufcs(qpath, pat.id, pat.span); + let (def, opt_ty, segments) = self.resolve_ty_and_def_ufcs(qpath, pat.hir_id, pat.span); match def { Def::Err => { self.set_tainted_by_errors(); return tcx.types.err; } Def::Method(..) => { - report_unexpected_def(def); + report_unexpected_variant_def(tcx, &def, pat.span, qpath); return tcx.types.err; } - Def::VariantCtor(_, CtorKind::Const) | - Def::StructCtor(_, CtorKind::Const) | - Def::SelfCtor(..) | + Def::Ctor(_, _, CtorKind::Fictive) | + Def::Ctor(_, _, CtorKind::Fn) => { + report_unexpected_variant_def(tcx, &def, pat.span, qpath); + return tcx.types.err; + } + Def::Ctor(_, _, CtorKind::Const) | Def::SelfCtor(..) | Def::Const(..) | Def::AssociatedConst(..) => {} // OK _ => bug!("unexpected pattern definition: {:?}", def) } // Type-check the path. - let pat_ty = self.instantiate_value_path(segments, opt_ty, def, pat.span, pat.id).0; + let pat_ty = self.instantiate_value_path(segments, opt_ty, def, pat.span, pat.hir_id).0; self.demand_suptype(pat.span, expected, pat_ty); pat_ty } - fn check_pat_tuple_struct(&self, - pat: &hir::Pat, - qpath: &hir::QPath, - subpats: &'gcx [P], - ddpos: Option, - expected: Ty<'tcx>, - def_bm: ty::BindingMode) -> Ty<'tcx> - { + fn check_pat_tuple_struct( + &self, + pat: &hir::Pat, + qpath: &hir::QPath, + subpats: &'gcx [P], + ddpos: Option, + expected: Ty<'tcx>, + def_bm: ty::BindingMode, + match_arm_pat_span: Option, + ) -> Ty<'tcx> { let tcx = self.tcx; let on_error = || { for pat in subpats { - self.check_pat_walk(&pat, tcx.types.err, def_bm, true); + self.check_pat_walk(&pat, tcx.types.err, def_bm, match_arm_pat_span); } }; let report_unexpected_def = |def: Def| { @@ -801,7 +850,7 @@ https://doc.rust-lang.org/reference/types.html#trait-objects"); }; // Resolve the path and check the definition for errors. - let (def, opt_ty, segments) = self.resolve_ty_and_def_ufcs(qpath, pat.id, pat.span); + let (def, opt_ty, segments) = self.resolve_ty_and_def_ufcs(qpath, pat.hir_id, pat.span); if def == Def::Err { self.set_tainted_by_errors(); on_error(); @@ -809,7 +858,8 @@ https://doc.rust-lang.org/reference/types.html#trait-objects"); } // Type-check the path. - let (pat_ty, def) = self.instantiate_value_path(segments, opt_ty, def, pat.span, pat.id); + let (pat_ty, def) = self.instantiate_value_path(segments, opt_ty, def, pat.span, + pat.hir_id); if !pat_ty.is_fn() { report_unexpected_def(def); return self.tcx.types.err; @@ -825,8 +875,7 @@ https://doc.rust-lang.org/reference/types.html#trait-objects"); report_unexpected_def(def); return tcx.types.err; } - Def::VariantCtor(_, CtorKind::Fn) | - Def::StructCtor(_, CtorKind::Fn) => { + Def::Ctor(_, _, CtorKind::Fn) => { tcx.expect_variant_def(def) } _ => bug!("unexpected pattern definition: {:?}", def) @@ -836,20 +885,20 @@ https://doc.rust-lang.org/reference/types.html#trait-objects"); let pat_ty = pat_ty.fn_sig(tcx).output(); let pat_ty = pat_ty.no_bound_vars().expect("expected fn type"); - self.demand_eqtype(pat.span, expected, pat_ty); + self.demand_eqtype_pat(pat.span, expected, pat_ty, match_arm_pat_span); // Type-check subpatterns. if subpats.len() == variant.fields.len() || subpats.len() < variant.fields.len() && ddpos.is_some() { let substs = match pat_ty.sty { ty::Adt(_, substs) => substs, - ref ty => bug!("unexpected pattern type {:?}", ty), + _ => bug!("unexpected pattern type {:?}", pat_ty), }; for (i, subpat) in subpats.iter().enumerate_and_adjust(variant.fields.len(), ddpos) { let field_ty = self.field_ty(subpat.span, &variant.fields[i], substs); - self.check_pat_walk(&subpat, field_ty, def_bm, true); + self.check_pat_walk(&subpat, field_ty, def_bm, match_arm_pat_span); - self.tcx.check_stability(variant.fields[i].did, Some(pat.id), subpat.span); + self.tcx.check_stability(variant.fields[i].did, Some(pat.hir_id), subpat.span); } } else { let subpats_ending = if subpats.len() == 1 { "" } else { "s" }; @@ -867,14 +916,16 @@ https://doc.rust-lang.org/reference/types.html#trait-objects"); pat_ty } - fn check_struct_pat_fields(&self, - adt_ty: Ty<'tcx>, - pat_id: ast::NodeId, - span: Span, - variant: &'tcx ty::VariantDef, - fields: &'gcx [Spanned], - etc: bool, - def_bm: ty::BindingMode) -> bool { + fn check_struct_pat_fields( + &self, + adt_ty: Ty<'tcx>, + pat_id: hir::HirId, + span: Span, + variant: &'tcx ty::VariantDef, + fields: &'gcx [Spanned], + etc: bool, + def_bm: ty::BindingMode, + ) -> bool { let tcx = self.tcx; let (substs, adt) = match adt_ty.sty { @@ -897,7 +948,7 @@ https://doc.rust-lang.org/reference/types.html#trait-objects"); let mut inexistent_fields = vec![]; // Typecheck each field. for &Spanned { node: ref field, span } in fields { - let ident = tcx.adjust_ident(field.ident, variant.did, self.body_id).0; + let ident = tcx.adjust_ident(field.ident, variant.def_id, self.body_id).0; let field_ty = match used_fields.entry(ident) { Occupied(occupied) => { struct_span_err!(tcx.sess, span, E0025, @@ -915,56 +966,62 @@ https://doc.rust-lang.org/reference/types.html#trait-objects"); vacant.insert(span); field_map.get(&ident) .map(|(i, f)| { - self.write_field_index(field.id, *i); + self.write_field_index(field.hir_id, *i); self.tcx.check_stability(f.did, Some(pat_id), span); self.field_ty(span, f, substs) }) .unwrap_or_else(|| { - inexistent_fields.push((span, field.ident)); + inexistent_fields.push(field.ident); no_field_errors = false; tcx.types.err }) } }; - self.check_pat_walk(&field.pat, field_ty, def_bm, true); + self.check_pat_walk(&field.pat, field_ty, def_bm, None); } let mut unmentioned_fields = variant.fields .iter() .map(|field| field.ident.modern()) .filter(|ident| !used_fields.contains_key(&ident)) .collect::>(); - if inexistent_fields.len() > 0 { + if inexistent_fields.len() > 0 && !variant.recovered { let (field_names, t, plural) = if inexistent_fields.len() == 1 { - (format!("a field named `{}`", inexistent_fields[0].1), "this", "") + (format!("a field named `{}`", inexistent_fields[0]), "this", "") } else { (format!("fields named {}", inexistent_fields.iter() - .map(|(_, name)| format!("`{}`", name)) + .map(|ident| format!("`{}`", ident)) .collect::>() .join(", ")), "these", "s") }; - let spans = inexistent_fields.iter().map(|(span, _)| *span).collect::>(); + let spans = inexistent_fields.iter().map(|ident| ident.span).collect::>(); let mut err = struct_span_err!(tcx.sess, spans, E0026, "{} `{}` does not have {}", kind_name, - tcx.item_path_str(variant.did), + tcx.def_path_str(variant.def_id), field_names); - if let Some((span, ident)) = inexistent_fields.last() { - err.span_label(*span, + if let Some(ident) = inexistent_fields.last() { + err.span_label(ident.span, format!("{} `{}` does not have {} field{}", kind_name, - tcx.item_path_str(variant.did), + tcx.def_path_str(variant.def_id), t, plural)); if plural == "" { let input = unmentioned_fields.iter().map(|field| &field.name); let suggested_name = - find_best_match_for_name(input, &ident.name.as_str(), None); + find_best_match_for_name(input, &ident.as_str(), None); if let Some(suggested_name) = suggested_name { - err.span_suggestion(*span, "did you mean", suggested_name.to_string()); + err.span_suggestion( + ident.span, + "a field with a similar name exists", + suggested_name.to_string(), + Applicability::MaybeIncorrect, + ); + // we don't want to throw `E0027` in case we have thrown `E0026` for them unmentioned_fields.retain(|&x| x.as_str() != suggested_name.as_str()); } diff --git a/src/librustc_typeck/check/autoderef.rs b/src/librustc_typeck/check/autoderef.rs index 1b594342c9a71..f863cfe1887db 100644 --- a/src/librustc_typeck/check/autoderef.rs +++ b/src/librustc_typeck/check/autoderef.rs @@ -1,22 +1,11 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use astconv::AstConv; - use super::{FnCtxt, PlaceOp, Needs}; use super::method::MethodCallee; -use rustc::infer::InferOk; +use rustc::hir; +use rustc::infer::{InferCtxt, InferOk}; use rustc::session::DiagnosticMessageId; -use rustc::traits; -use rustc::ty::{self, Ty, TraitRef}; +use rustc::traits::{self, TraitEngine}; +use rustc::ty::{self, Ty, TyCtxt, TraitRef}; use rustc::ty::{ToPredicate, TypeFoldable}; use rustc::ty::adjustment::{Adjustment, Adjust, OverloadedDeref}; @@ -32,20 +21,24 @@ enum AutoderefKind { } pub struct Autoderef<'a, 'gcx: 'tcx, 'tcx: 'a> { - fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, + infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + body_id: hir::HirId, + param_env: ty::ParamEnv<'tcx>, steps: Vec<(Ty<'tcx>, AutoderefKind)>, cur_ty: Ty<'tcx>, obligations: Vec>, at_start: bool, include_raw_pointers: bool, span: Span, + silence_errors: bool, + reached_recursion_limit: bool } impl<'a, 'gcx, 'tcx> Iterator for Autoderef<'a, 'gcx, 'tcx> { type Item = (Ty<'tcx>, usize); fn next(&mut self) -> Option { - let tcx = self.fcx.tcx; + let tcx = self.infcx.tcx; debug!("autoderef: steps={:?}, cur_ty={:?}", self.steps, @@ -57,24 +50,10 @@ impl<'a, 'gcx, 'tcx> Iterator for Autoderef<'a, 'gcx, 'tcx> { } if self.steps.len() >= *tcx.sess.recursion_limit.get() { - // We've reached the recursion limit, error gracefully. - let suggested_limit = *tcx.sess.recursion_limit.get() * 2; - let msg = format!("reached the recursion limit while auto-dereferencing `{:?}`", - self.cur_ty); - let error_id = (DiagnosticMessageId::ErrorId(55), Some(self.span), msg); - let fresh = tcx.sess.one_time_diagnostics.borrow_mut().insert(error_id); - if fresh { - struct_span_err!(tcx.sess, - self.span, - E0055, - "reached the recursion limit while auto-dereferencing `{:?}`", - self.cur_ty) - .span_label(self.span, "deref recursion limit reached") - .help(&format!( - "consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate", - suggested_limit)) - .emit(); + if !self.silence_errors { + report_autoderef_recursion_limit_error(tcx, self.span, self.cur_ty); } + self.reached_recursion_limit = true; return None; } @@ -107,10 +86,32 @@ impl<'a, 'gcx, 'tcx> Iterator for Autoderef<'a, 'gcx, 'tcx> { } impl<'a, 'gcx, 'tcx> Autoderef<'a, 'gcx, 'tcx> { + pub fn new(infcx: &'a InferCtxt<'a, 'gcx, 'tcx>, + param_env: ty::ParamEnv<'tcx>, + body_id: hir::HirId, + span: Span, + base_ty: Ty<'tcx>) + -> Autoderef<'a, 'gcx, 'tcx> + { + Autoderef { + infcx, + body_id, + param_env, + steps: vec![], + cur_ty: infcx.resolve_type_vars_if_possible(&base_ty), + obligations: vec![], + at_start: true, + include_raw_pointers: false, + silence_errors: false, + reached_recursion_limit: false, + span, + } + } + fn overloaded_deref_ty(&mut self, ty: Ty<'tcx>) -> Option> { debug!("overloaded_deref_ty({:?})", ty); - let tcx = self.fcx.tcx(); + let tcx = self.infcx.tcx; // let trait_ref = TraitRef { @@ -118,43 +119,52 @@ impl<'a, 'gcx, 'tcx> Autoderef<'a, 'gcx, 'tcx> { substs: tcx.mk_substs_trait(self.cur_ty, &[]), }; - let cause = traits::ObligationCause::misc(self.span, self.fcx.body_id); + let cause = traits::ObligationCause::misc(self.span, self.body_id); let obligation = traits::Obligation::new(cause.clone(), - self.fcx.param_env, + self.param_env, trait_ref.to_predicate()); - if !self.fcx.predicate_may_hold(&obligation) { + if !self.infcx.predicate_may_hold(&obligation) { debug!("overloaded_deref_ty: cannot match obligation"); return None; } - let mut selcx = traits::SelectionContext::new(self.fcx); - let normalized_ty = traits::normalize_projection_type(&mut selcx, - self.fcx.param_env, - ty::ProjectionTy::from_ref_and_name( - tcx, - trait_ref, - Ident::from_str("Target"), - ), - cause, - 0, - &mut self.obligations); - - debug!("overloaded_deref_ty({:?}) = {:?}", ty, normalized_ty); + let mut fulfillcx = traits::FulfillmentContext::new_in_snapshot(); + let normalized_ty = fulfillcx.normalize_projection_type( + &self.infcx, + self.param_env, + ty::ProjectionTy::from_ref_and_name( + tcx, + trait_ref, + Ident::from_str("Target"), + ), + cause); + if let Err(e) = fulfillcx.select_where_possible(&self.infcx) { + // This shouldn't happen, except for evaluate/fulfill mismatches, + // but that's not a reason for an ICE (`predicate_may_hold` is conservative + // by design). + debug!("overloaded_deref_ty: encountered errors {:?} while fulfilling", + e); + return None; + } + let obligations = fulfillcx.pending_obligations(); + debug!("overloaded_deref_ty({:?}) = ({:?}, {:?})", + ty, normalized_ty, obligations); + self.obligations.extend(obligations); - Some(self.fcx.resolve_type_vars_if_possible(&normalized_ty)) + Some(self.infcx.resolve_type_vars_if_possible(&normalized_ty)) } /// Returns the final type, generating an error if it is an /// unresolved inference variable. - pub fn unambiguous_final_ty(&self) -> Ty<'tcx> { - self.fcx.structurally_resolved_type(self.span, self.cur_ty) + pub fn unambiguous_final_ty(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { + fcx.structurally_resolved_type(self.span, self.cur_ty) } /// Returns the final type we ended up with, which may well be an /// inference variable (we will resolve it first, if possible). pub fn maybe_ambiguous_final_ty(&self) -> Ty<'tcx> { - self.fcx.resolve_type_vars_if_possible(&self.cur_ty) + self.infcx.resolve_type_vars_if_possible(&self.cur_ty) } pub fn step_count(&self) -> usize { @@ -162,19 +172,19 @@ impl<'a, 'gcx, 'tcx> Autoderef<'a, 'gcx, 'tcx> { } /// Returns the adjustment steps. - pub fn adjust_steps(&self, needs: Needs) + pub fn adjust_steps(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>, needs: Needs) -> Vec> { - self.fcx.register_infer_ok_obligations(self.adjust_steps_as_infer_ok(needs)) + fcx.register_infer_ok_obligations(self.adjust_steps_as_infer_ok(fcx, needs)) } - pub fn adjust_steps_as_infer_ok(&self, needs: Needs) + pub fn adjust_steps_as_infer_ok(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>, needs: Needs) -> InferOk<'tcx, Vec>> { let mut obligations = vec![]; let targets = self.steps.iter().skip(1).map(|&(ty, _)| ty) .chain(iter::once(self.cur_ty)); let steps: Vec<_> = self.steps.iter().map(|&(source, kind)| { if let AutoderefKind::Overloaded = kind { - self.fcx.try_overloaded_deref(self.span, source, needs) + fcx.try_overloaded_deref(self.span, source, needs) .and_then(|InferOk { value: method, obligations: o }| { obligations.extend(o); if let ty::Ref(region, _, mutbl) = method.sig.output().sty { @@ -211,8 +221,16 @@ impl<'a, 'gcx, 'tcx> Autoderef<'a, 'gcx, 'tcx> { self } - pub fn finalize(self) { - let fcx = self.fcx; + pub fn silence_errors(mut self) -> Self { + self.silence_errors = true; + self + } + + pub fn reached_recursion_limit(&self) -> bool { + self.reached_recursion_limit + } + + pub fn finalize(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) { fcx.register_predicates(self.into_obligations()); } @@ -221,17 +239,32 @@ impl<'a, 'gcx, 'tcx> Autoderef<'a, 'gcx, 'tcx> { } } +pub fn report_autoderef_recursion_limit_error<'a, 'gcx, 'tcx>( + tcx: TyCtxt<'a, 'gcx, 'tcx>, span: Span, ty: Ty<'tcx>) +{ + // We've reached the recursion limit, error gracefully. + let suggested_limit = *tcx.sess.recursion_limit.get() * 2; + let msg = format!("reached the recursion limit while auto-dereferencing `{:?}`", + ty); + let error_id = (DiagnosticMessageId::ErrorId(55), Some(span), msg); + let fresh = tcx.sess.one_time_diagnostics.borrow_mut().insert(error_id); + if fresh { + struct_span_err!(tcx.sess, + span, + E0055, + "reached the recursion limit while auto-dereferencing `{:?}`", + ty) + .span_label(span, "deref recursion limit reached") + .help(&format!( + "consider adding a `#![recursion_limit=\"{}\"]` attribute to your crate", + suggested_limit)) + .emit(); + } +} + impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn autoderef(&'a self, span: Span, base_ty: Ty<'tcx>) -> Autoderef<'a, 'gcx, 'tcx> { - Autoderef { - fcx: self, - steps: vec![], - cur_ty: self.resolve_type_vars_if_possible(&base_ty), - obligations: vec![], - at_start: true, - include_raw_pointers: false, - span, - } + Autoderef::new(self, self.param_env, self.body_id, span, base_ty) } pub fn try_overloaded_deref(&self, diff --git a/src/librustc_typeck/check/callee.rs b/src/librustc_typeck/check/callee.rs index 75ae868883484..15ae39600f6b4 100644 --- a/src/librustc_typeck/check/callee.rs +++ b/src/librustc_typeck/check/callee.rs @@ -1,33 +1,24 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use super::{Expectation, FnCtxt, Needs, TupleArgumentsFlag}; use super::autoderef::Autoderef; use super::method::MethodCallee; +use super::{Expectation, FnCtxt, Needs, TupleArgumentsFlag}; +use errors::{Applicability, DiagnosticBuilder}; use hir::def::Def; use hir::def_id::{DefId, LOCAL_CRATE}; +use rustc::ty::adjustment::{Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability}; +use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; use rustc::{infer, traits}; -use rustc::ty::{self, TyCtxt, TypeFoldable, Ty}; -use rustc::ty::adjustment::{Adjustment, Adjust, AllowTwoPhase, AutoBorrow, AutoBorrowMutability}; +use rustc::infer::type_variable::TypeVariableOrigin; use rustc_target::spec::abi; use syntax::ast::Ident; use syntax_pos::Span; -use errors::Applicability; use rustc::hir; -/// Check that it is legal to call methods of the trait corresponding +/// Checks that it is legal to call methods of the trait corresponding /// to `trait_id` (this only cares about the trait, not the specific -/// method that is called) -pub fn check_legal_trait_for_method_call(tcx: TyCtxt, span: Span, trait_id: DefId) { +/// method that is called). +pub fn check_legal_trait_for_method_call(tcx: TyCtxt<'_, '_, '_>, span: Span, trait_id: DefId) { if tcx.lang_items().drop_trait() == Some(trait_id) { struct_span_err!(tcx.sess, span, E0040, "explicit use of destructor method") .span_label(span, "explicit destructor calls not allowed") @@ -38,26 +29,27 @@ pub fn check_legal_trait_for_method_call(tcx: TyCtxt, span: Span, trait_id: DefI enum CallStep<'tcx> { Builtin(Ty<'tcx>), DeferredClosure(ty::FnSig<'tcx>), - /// e.g., enum variant constructors + /// E.g., enum variant constructors. Overloaded(MethodCallee<'tcx>), } impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { - pub fn check_call(&self, - call_expr: &'gcx hir::Expr, - callee_expr: &'gcx hir::Expr, - arg_exprs: &'gcx [hir::Expr], - expected: Expectation<'tcx>) - -> Ty<'tcx> { + pub fn check_call( + &self, + call_expr: &'gcx hir::Expr, + callee_expr: &'gcx hir::Expr, + arg_exprs: &'gcx [hir::Expr], + expected: Expectation<'tcx>, + ) -> Ty<'tcx> { let original_callee_ty = self.check_expr(callee_expr); let expr_ty = self.structurally_resolved_type(call_expr.span, original_callee_ty); let mut autoderef = self.autoderef(callee_expr.span, expr_ty); let mut result = None; while result.is_none() && autoderef.next().is_some() { - result = self.try_overloaded_call_step(call_expr, callee_expr, &autoderef); + result = self.try_overloaded_call_step(call_expr, callee_expr, arg_exprs, &autoderef); } - autoderef.finalize(); + autoderef.finalize(self); let output = match result { None => { @@ -84,20 +76,23 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { output } - fn try_overloaded_call_step(&self, - call_expr: &'gcx hir::Expr, - callee_expr: &'gcx hir::Expr, - autoderef: &Autoderef<'a, 'gcx, 'tcx>) - -> Option> { - let adjusted_ty = autoderef.unambiguous_final_ty(); - debug!("try_overloaded_call_step(call_expr={:?}, adjusted_ty={:?})", - call_expr, - adjusted_ty); + fn try_overloaded_call_step( + &self, + call_expr: &'gcx hir::Expr, + callee_expr: &'gcx hir::Expr, + arg_exprs: &'gcx [hir::Expr], + autoderef: &Autoderef<'a, 'gcx, 'tcx>, + ) -> Option> { + let adjusted_ty = autoderef.unambiguous_final_ty(self); + debug!( + "try_overloaded_call_step(call_expr={:?}, adjusted_ty={:?})", + call_expr, adjusted_ty + ); // If the callee is a bare function or a closure, then we're all set. match adjusted_ty.sty { ty::FnDef(..) | ty::FnPtr(_) => { - let adjustments = autoderef.adjust_steps(Needs::None); + let adjustments = autoderef.adjust_steps(self, Needs::None); self.apply_adjustments(callee_expr, adjustments); return Some(CallStep::Builtin(adjusted_ty)); } @@ -110,21 +105,26 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // fnmut vs fnonce. If so, we have to defer further processing. if self.closure_kind(def_id, substs).is_none() { let closure_ty = self.closure_sig(def_id, substs); - let fn_sig = self.replace_bound_vars_with_fresh_vars( - call_expr.span, - infer::FnCall, - &closure_ty - ).0; - let adjustments = autoderef.adjust_steps(Needs::None); - self.record_deferred_call_resolution(def_id, DeferredCallResolution { - call_expr, - callee_expr, - adjusted_ty, - adjustments, - fn_sig, - closure_def_id: def_id, - closure_substs: substs, - }); + let fn_sig = self + .replace_bound_vars_with_fresh_vars( + call_expr.span, + infer::FnCall, + &closure_ty, + ) + .0; + let adjustments = autoderef.adjust_steps(self, Needs::None); + self.record_deferred_call_resolution( + def_id, + DeferredCallResolution { + call_expr, + callee_expr, + adjusted_ty, + adjustments, + fn_sig, + closure_def_id: def_id, + closure_substs: substs, + }, + ); return Some(CallStep::DeferredClosure(fn_sig)); } } @@ -144,34 +144,68 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { _ => {} } - self.try_overloaded_call_traits(call_expr, adjusted_ty).map(|(autoref, method)| { - let mut adjustments = autoderef.adjust_steps(Needs::None); - adjustments.extend(autoref); - self.apply_adjustments(callee_expr, adjustments); - CallStep::Overloaded(method) - }) + // Now, we look for the implementation of a Fn trait on the object's type. + // We first do it with the explicit instruction to look for an impl of + // `Fn`, with the tuple `Tuple` having an arity corresponding + // to the number of call parameters. + // If that fails (or_else branch), we try again without specifying the + // shape of the tuple (hence the None). This allows to detect an Fn trait + // is implemented, and use this information for diagnostic. + self.try_overloaded_call_traits(call_expr, adjusted_ty, Some(arg_exprs)) + .or_else(|| self.try_overloaded_call_traits(call_expr, adjusted_ty, None)) + .map(|(autoref, method)| { + let mut adjustments = autoderef.adjust_steps(self, Needs::None); + adjustments.extend(autoref); + self.apply_adjustments(callee_expr, adjustments); + CallStep::Overloaded(method) + }) } - fn try_overloaded_call_traits(&self, - call_expr: &hir::Expr, - adjusted_ty: Ty<'tcx>) - -> Option<(Option>, - MethodCallee<'tcx>)> { + fn try_overloaded_call_traits( + &self, + call_expr: &hir::Expr, + adjusted_ty: Ty<'tcx>, + opt_arg_exprs: Option<&'gcx [hir::Expr]>, + ) -> Option<(Option>, MethodCallee<'tcx>)> { // Try the options that are least restrictive on the caller first. - for &(opt_trait_def_id, method_name, borrow) in - &[(self.tcx.lang_items().fn_trait(), Ident::from_str("call"), true), - (self.tcx.lang_items().fn_mut_trait(), Ident::from_str("call_mut"), true), - (self.tcx.lang_items().fn_once_trait(), Ident::from_str("call_once"), false)] { + for &(opt_trait_def_id, method_name, borrow) in &[ + ( + self.tcx.lang_items().fn_trait(), + Ident::from_str("call"), + true, + ), + ( + self.tcx.lang_items().fn_mut_trait(), + Ident::from_str("call_mut"), + true, + ), + ( + self.tcx.lang_items().fn_once_trait(), + Ident::from_str("call_once"), + false, + ), + ] { let trait_def_id = match opt_trait_def_id { Some(def_id) => def_id, None => continue, }; - if let Some(ok) = self.lookup_method_in_trait(call_expr.span, - method_name, - trait_def_id, - adjusted_ty, - None) { + let opt_input_types = opt_arg_exprs.map(|arg_exprs| [self.tcx.mk_tup( + arg_exprs + .iter() + .map(|e| self.next_ty_var( + TypeVariableOrigin::TypeInference(e.span) + )) + )]); + let opt_input_types = opt_input_types.as_ref().map(AsRef::as_ref); + + if let Some(ok) = self.lookup_method_in_trait( + call_expr.span, + method_name, + trait_def_id, + adjusted_ty, + opt_input_types, + ) { let method = self.register_infer_ok_obligations(ok); let mut autoref = None; if borrow { @@ -183,11 +217,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // deployment, conservatively omit // overloaded function call ops. allow_two_phase_borrow: AllowTwoPhase::No, - } + }, }; autoref = Some(Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(region, mutbl)), - target: method.sig.inputs()[0] + target: method.sig.inputs()[0], }); } } @@ -198,23 +232,51 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { None } - fn confirm_builtin_call(&self, - call_expr: &hir::Expr, - callee_ty: Ty<'tcx>, - arg_exprs: &'gcx [hir::Expr], - expected: Expectation<'tcx>) - -> Ty<'tcx> { + /// Give appropriate suggestion when encountering `||{/* not callable */}()`, where the + /// likely intention is to call the closure, suggest `(||{})()`. (#55851) + fn identify_bad_closure_def_and_call( + &self, + err: &mut DiagnosticBuilder<'a>, + hir_id: hir::HirId, + callee_node: &hir::ExprKind, + callee_span: Span, + ) { + let hir_id = self.tcx.hir().get_parent_node_by_hir_id(hir_id); + let parent_node = self.tcx.hir().get_by_hir_id(hir_id); + if let ( + hir::Node::Expr(hir::Expr { node: hir::ExprKind::Closure(_, _, _, sp, ..), .. }), + hir::ExprKind::Block(..), + ) = (parent_node, callee_node) { + let start = sp.shrink_to_lo(); + let end = self.tcx.sess.source_map().next_point(callee_span); + err.multipart_suggestion( + "if you meant to create this closure and immediately call it, surround the \ + closure with parenthesis", + vec![(start, "(".to_string()), (end, ")".to_string())], + Applicability::MaybeIncorrect, + ); + } + } + + fn confirm_builtin_call( + &self, + call_expr: &hir::Expr, + callee_ty: Ty<'tcx>, + arg_exprs: &'gcx [hir::Expr], + expected: Expectation<'tcx>, + ) -> Ty<'tcx> { let (fn_sig, def_span) = match callee_ty.sty { - ty::FnDef(def_id, _) => { - (callee_ty.fn_sig(self.tcx), self.tcx.hir().span_if_local(def_id)) - } + ty::FnDef(def_id, _) => ( + callee_ty.fn_sig(self.tcx), + self.tcx.hir().span_if_local(def_id), + ), ty::FnPtr(sig) => (sig, None), ref t => { let mut unit_variant = None; if let &ty::Adt(adt_def, ..) = t { if adt_def.is_enum() { if let hir::ExprKind::Call(ref expr, _) = call_expr.node { - unit_variant = Some(self.tcx.hir().node_to_pretty_string(expr.id)) + unit_variant = Some(self.tcx.hir().hir_to_pretty_string(expr.hir_id)) } } } @@ -229,15 +291,26 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { match unit_variant { Some(ref path) => format!("enum variant `{}`", path), None => format!("`{}`", callee_ty), - }); + } + ); + + self.identify_bad_closure_def_and_call( + &mut err, + call_expr.hir_id, + &callee.node, + callee.span, + ); if let Some(ref path) = unit_variant { - err.span_suggestion_with_applicability( + err.span_suggestion( call_expr.span, - &format!("`{}` is a unit variant, you need to write it \ - without the parenthesis", path), + &format!( + "`{}` is a unit variant, you need to write it \ + without the parenthesis", + path + ), path.to_string(), - Applicability::MachineApplicable + Applicability::MachineApplicable, ); } @@ -245,48 +318,50 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let def = match callee.node { hir::ExprKind::Path(ref qpath) => { self.tables.borrow().qpath_def(qpath, callee.hir_id) - }, + } hir::ExprKind::Call(ref inner_callee, _) => { // If the call spans more than one line and the callee kind is // itself another `ExprCall`, that's a clue that we might just be // missing a semicolon (Issue #51055) - let call_is_multiline = self.tcx.sess.source_map() - .is_multiline(call_expr.span); + let call_is_multiline = + self.tcx.sess.source_map().is_multiline(call_expr.span); if call_is_multiline { let span = self.tcx.sess.source_map().next_point(callee.span); - err.span_suggestion_with_applicability( + err.span_suggestion( span, "try adding a semicolon", ";".to_owned(), - Applicability::MaybeIncorrect + Applicability::MaybeIncorrect, ); } if let hir::ExprKind::Path(ref inner_qpath) = inner_callee.node { inner_callee_path = Some(inner_qpath); - self.tables.borrow().qpath_def(inner_qpath, inner_callee.hir_id) + self.tables + .borrow() + .qpath_def(inner_qpath, inner_callee.hir_id) } else { Def::Err } - }, - _ => { - Def::Err } + _ => Def::Err, }; err.span_label(call_expr.span, "call expression requires function"); let def_span = match def { Def::Err => None, - Def::Local(id) | Def::Upvar(id, ..) => { - Some(self.tcx.hir().span(id)) - } - _ => def.opt_def_id().and_then(|did| self.tcx.hir().span_if_local(did)), + Def::Local(id) | Def::Upvar(id, ..) => Some(self.tcx.hir().span(id)), + _ => def + .opt_def_id() + .and_then(|did| self.tcx.hir().span_if_local(did)), }; if let Some(span) = def_span { let label = match (unit_variant, inner_callee_path) { (Some(path), _) => format!("`{}` defined here", path), (_, Some(hir::QPath::Resolved(_, path))) => format!( - "`{}` defined here returns `{}`", path, callee_ty.to_string() + "`{}` defined here returns `{}`", + path, + callee_ty.to_string() ), _ => format!("`{}` defined here", callee_ty.to_string()), }; @@ -294,19 +369,25 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } err.emit(); } else { - bug!("call_expr.node should be an ExprKind::Call, got {:?}", call_expr.node); + bug!( + "call_expr.node should be an ExprKind::Call, got {:?}", + call_expr.node + ); } // This is the "default" function signature, used in case of error. // In that case, we check each argument against "error" in order to // set up all the node type bindings. - (ty::Binder::bind(self.tcx.mk_fn_sig( - self.err_args(arg_exprs.len()).into_iter(), - self.tcx.types.err, - false, - hir::Unsafety::Normal, - abi::Abi::Rust - )), None) + ( + ty::Binder::bind(self.tcx.mk_fn_sig( + self.err_args(arg_exprs.len()).into_iter(), + self.tcx.types.err, + false, + hir::Unsafety::Normal, + abi::Abi::Rust, + )), + None, + ) } }; @@ -315,69 +396,90 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // renormalize the associated types at this point, since they // previously appeared within a `Binder<>` and hence would not // have been normalized before. - let fn_sig = - self.replace_bound_vars_with_fresh_vars(call_expr.span, infer::FnCall, &fn_sig) - .0; + let fn_sig = self + .replace_bound_vars_with_fresh_vars(call_expr.span, infer::FnCall, &fn_sig) + .0; let fn_sig = self.normalize_associated_types_in(call_expr.span, &fn_sig); + let inputs = if fn_sig.c_variadic { + if fn_sig.inputs().len() > 1 { + &fn_sig.inputs()[..fn_sig.inputs().len() - 1] + } else { + span_bug!(call_expr.span, + "C-variadic functions are only valid with one or more fixed arguments"); + } + } else { + &fn_sig.inputs()[..] + }; // Call the generic checker. - let expected_arg_tys = - self.expected_inputs_for_expected_output(call_expr.span, - expected, - fn_sig.output(), - fn_sig.inputs()); - self.check_argument_types(call_expr.span, - call_expr.span, - fn_sig.inputs(), - &expected_arg_tys[..], - arg_exprs, - fn_sig.variadic, - TupleArgumentsFlag::DontTupleArguments, - def_span); + let expected_arg_tys = self.expected_inputs_for_expected_output( + call_expr.span, + expected, + fn_sig.output(), + inputs, + ); + self.check_argument_types( + call_expr.span, + call_expr.span, + inputs, + &expected_arg_tys[..], + arg_exprs, + fn_sig.c_variadic, + TupleArgumentsFlag::DontTupleArguments, + def_span, + ); fn_sig.output() } - fn confirm_deferred_closure_call(&self, - call_expr: &hir::Expr, - arg_exprs: &'gcx [hir::Expr], - expected: Expectation<'tcx>, - fn_sig: ty::FnSig<'tcx>) - -> Ty<'tcx> { + fn confirm_deferred_closure_call( + &self, + call_expr: &hir::Expr, + arg_exprs: &'gcx [hir::Expr], + expected: Expectation<'tcx>, + fn_sig: ty::FnSig<'tcx>, + ) -> Ty<'tcx> { // `fn_sig` is the *signature* of the cosure being called. We // don't know the full details yet (`Fn` vs `FnMut` etc), but we // do know the types expected for each argument and the return // type. - let expected_arg_tys = self.expected_inputs_for_expected_output(call_expr.span, - expected, - fn_sig.output().clone(), - fn_sig.inputs()); - - self.check_argument_types(call_expr.span, - call_expr.span, - fn_sig.inputs(), - &expected_arg_tys, - arg_exprs, - fn_sig.variadic, - TupleArgumentsFlag::TupleArguments, - None); + let expected_arg_tys = self.expected_inputs_for_expected_output( + call_expr.span, + expected, + fn_sig.output().clone(), + fn_sig.inputs(), + ); + + self.check_argument_types( + call_expr.span, + call_expr.span, + fn_sig.inputs(), + &expected_arg_tys, + arg_exprs, + fn_sig.c_variadic, + TupleArgumentsFlag::TupleArguments, + None, + ); fn_sig.output() } - fn confirm_overloaded_call(&self, - call_expr: &hir::Expr, - arg_exprs: &'gcx [hir::Expr], - expected: Expectation<'tcx>, - method_callee: MethodCallee<'tcx>) - -> Ty<'tcx> { - let output_type = self.check_method_argument_types(call_expr.span, - call_expr.span, - Ok(method_callee), - arg_exprs, - TupleArgumentsFlag::TupleArguments, - expected); + fn confirm_overloaded_call( + &self, + call_expr: &hir::Expr, + arg_exprs: &'gcx [hir::Expr], + expected: Expectation<'tcx>, + method_callee: MethodCallee<'tcx>, + ) -> Ty<'tcx> { + let output_type = self.check_method_argument_types( + call_expr.span, + call_expr.span, + Ok(method_callee), + arg_exprs, + TupleArgumentsFlag::TupleArguments, + expected, + ); self.write_method_call(call_expr.hir_id, method_callee); output_type @@ -401,11 +503,12 @@ impl<'a, 'gcx, 'tcx> DeferredCallResolution<'gcx, 'tcx> { // we should not be invoked until the closure kind has been // determined by upvar inference - assert!(fcx.closure_kind(self.closure_def_id, self.closure_substs).is_some()); + assert!(fcx + .closure_kind(self.closure_def_id, self.closure_substs) + .is_some()); // We may now know enough to figure out fn vs fnmut etc. - match fcx.try_overloaded_call_traits(self.call_expr, - self.adjusted_ty) { + match fcx.try_overloaded_call_traits(self.call_expr, self.adjusted_ty, None) { Some((autoref, method_callee)) => { // One problem is that when we get here, we are going // to have a newly instantiated function signature @@ -420,22 +523,28 @@ impl<'a, 'gcx, 'tcx> DeferredCallResolution<'gcx, 'tcx> { debug!("attempt_resolution: method_callee={:?}", method_callee); for (method_arg_ty, self_arg_ty) in - method_sig.inputs().iter().skip(1).zip(self.fn_sig.inputs()) { + method_sig.inputs().iter().skip(1).zip(self.fn_sig.inputs()) + { fcx.demand_eqtype(self.call_expr.span, &self_arg_ty, &method_arg_ty); } - fcx.demand_eqtype(self.call_expr.span, method_sig.output(), self.fn_sig.output()); + fcx.demand_eqtype( + self.call_expr.span, + method_sig.output(), + self.fn_sig.output(), + ); let mut adjustments = self.adjustments; adjustments.extend(autoref); fcx.apply_adjustments(self.callee_expr, adjustments); - fcx.write_method_call(self.call_expr.hir_id, - method_callee); + fcx.write_method_call(self.call_expr.hir_id, method_callee); } None => { - span_bug!(self.call_expr.span, - "failed to find an overloaded call trait for closure call"); + span_bug!( + self.call_expr.span, + "failed to find an overloaded call trait for closure call" + ); } } } diff --git a/src/librustc_typeck/check/cast.rs b/src/librustc_typeck/check/cast.rs index 51271f0f35120..cad9e73bd2ac9 100644 --- a/src/librustc_typeck/check/cast.rs +++ b/src/librustc_typeck/check/cast.rs @@ -1,13 +1,3 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Code for type-checking cast expressions. //! //! A cast `e as U` is valid if one of the following holds: @@ -41,19 +31,19 @@ use super::FnCtxt; use errors::{DiagnosticBuilder,Applicability}; -use hir::def_id::DefId; -use lint; +use crate::hir::def_id::DefId; +use crate::lint; use rustc::hir; use rustc::session::Session; use rustc::traits; use rustc::ty::{self, Ty, TypeFoldable, TypeAndMut}; +use rustc::ty::subst::SubstsRef; use rustc::ty::adjustment::AllowTwoPhase; use rustc::ty::cast::{CastKind, CastTy}; -use rustc::ty::subst::Substs; use rustc::middle::lang_items; use syntax::ast; use syntax_pos::Span; -use util::common::ErrorReported; +use crate::util::common::ErrorReported; /// Reifies a cast check to be checked once we have full type information for /// a function context. @@ -73,13 +63,13 @@ enum PointerKind<'tcx> { /// No metadata attached, ie pointer to sized type or foreign type Thin, /// A trait object - Vtable(DefId), + Vtable(Option), /// Slice Length, /// The unsize info of this projection OfProjection(&'tcx ty::ProjectionTy<'tcx>), /// The unsize info of this opaque ty - OfOpaque(DefId, &'tcx Substs<'tcx>), + OfOpaque(DefId, SubstsRef<'tcx>), /// The unsize info of this parameter OfParam(&'tcx ty::ParamTy), } @@ -98,14 +88,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { return Err(ErrorReported); } - if self.type_is_known_to_be_sized(t, span) { + if self.type_is_known_to_be_sized_modulo_regions(t, span) { return Ok(Some(PointerKind::Thin)); } Ok(match t.sty { ty::Slice(_) | ty::Str => Some(PointerKind::Length), ty::Dynamic(ref tty, ..) => - Some(PointerKind::Vtable(tty.principal().def_id())), + Some(PointerKind::Vtable(tty.principal_def_id())), ty::Adt(def, substs) if def.is_struct() => { match def.non_enum_variant().fields.last() { None => Some(PointerKind::Thin), @@ -150,7 +140,7 @@ enum CastError { CastToBool, CastToChar, DifferingKinds, - /// Cast of thin to fat raw ptr (eg. `*const () as *const [u8]`) + /// Cast of thin to fat raw ptr (e.g., `*const () as *const [u8]`). SizedUnsizedCast, IllegalCast, NeedDeref, @@ -223,8 +213,14 @@ impl<'a, 'gcx, 'tcx> CastCheck<'tcx> { fcx.ty_to_string(self.expr_ty), cast_ty)); if let Ok(snippet) = fcx.sess().source_map().span_to_snippet(self.expr.span) { - err.span_help(self.expr.span, - &format!("did you mean `*{}`?", snippet)); + err.span_suggestion( + self.expr.span, + "dereference the expression", + format!("*{}", snippet), + Applicability::MaybeIncorrect, + ); + } else { + err.span_help(self.expr.span, "dereference the expression with `*`"); } err.emit(); } @@ -261,10 +257,28 @@ impl<'a, 'gcx, 'tcx> CastCheck<'tcx> { .emit(); } CastError::CastToBool => { - struct_span_err!(fcx.tcx.sess, self.span, E0054, "cannot cast as `bool`") - .span_label(self.span, "unsupported cast") - .help("compare with zero instead") - .emit(); + let mut err = + struct_span_err!(fcx.tcx.sess, self.span, E0054, "cannot cast as `bool`"); + + if self.expr_ty.is_numeric() { + match fcx.tcx.sess.source_map().span_to_snippet(self.expr.span) { + Ok(snippet) => { + err.span_suggestion( + self.span, + "compare with zero instead", + format!("{} != 0", snippet), + Applicability::MachineApplicable, + ); + } + Err(_) => { + err.span_help(self.span, "compare with zero instead"); + } + } + } else { + err.span_label(self.span, "unsupported cast"); + } + + err.emit(); } CastError::CastToChar => { type_error_struct!(fcx.tcx.sess, self.span, self.expr_ty, E0604, @@ -280,7 +294,7 @@ impl<'a, 'gcx, 'tcx> CastCheck<'tcx> { .emit(); } CastError::SizedUnsizedCast => { - use structured_errors::{SizedUnsizedCastError, StructuredDiagnostic}; + use crate::structured_errors::{SizedUnsizedCastError, StructuredDiagnostic}; SizedUnsizedCastError::new(&fcx.tcx.sess, self.span, self.expr_ty, @@ -300,7 +314,7 @@ impl<'a, 'gcx, 'tcx> CastCheck<'tcx> { err.note("The type information given here is insufficient to check whether \ the pointer cast is valid"); if unknown_cast_to { - err.span_suggestion_short_with_applicability( + err.span_suggestion_short( self.cast_span, "consider giving more type information", String::new(), @@ -331,7 +345,7 @@ impl<'a, 'gcx, 'tcx> CastCheck<'tcx> { if self.cast_ty.is_trait() { match fcx.tcx.sess.source_map().span_to_snippet(self.cast_span) { Ok(s) => { - err.span_suggestion_with_applicability( + err.span_suggestion( self.cast_span, "try casting to a reference instead", format!("&{}{}", mtstr, s), @@ -353,7 +367,7 @@ impl<'a, 'gcx, 'tcx> CastCheck<'tcx> { ty::Adt(def, ..) if def.is_box() => { match fcx.tcx.sess.source_map().span_to_snippet(self.cast_span) { Ok(s) => { - err.span_suggestion_with_applicability( + err.span_suggestion( self.cast_span, "try casting to a `Box` instead", format!("Box<{}>", s), @@ -385,9 +399,9 @@ impl<'a, 'gcx, 'tcx> CastCheck<'tcx> { } else { ("", lint::builtin::TRIVIAL_CASTS) }; - let mut err = fcx.tcx.struct_span_lint_node( + let mut err = fcx.tcx.struct_span_lint_hir( lint, - self.expr.id, + self.expr.hir_id, self.span, &format!("trivial {}cast: `{}` as `{}`", adjective, @@ -403,31 +417,30 @@ impl<'a, 'gcx, 'tcx> CastCheck<'tcx> { self.cast_ty = fcx.structurally_resolved_type(self.span, self.cast_ty); debug!("check_cast({}, {:?} as {:?})", - self.expr.id, + self.expr.hir_id, self.expr_ty, self.cast_ty); - if !fcx.type_is_known_to_be_sized(self.cast_ty, self.span) { + if !fcx.type_is_known_to_be_sized_modulo_regions(self.cast_ty, self.span) { self.report_cast_to_unsized_type(fcx); } else if self.expr_ty.references_error() || self.cast_ty.references_error() { // No sense in giving duplicate error messages } else if self.try_coercion_cast(fcx) { self.trivial_cast_lint(fcx); debug!(" -> CoercionCast"); - fcx.tables.borrow_mut().cast_kinds_mut().insert(self.expr.hir_id, - CastKind::CoercionCast); + fcx.tables.borrow_mut().set_coercion_cast(self.expr.hir_id.local_id); + } else { match self.do_check(fcx) { Ok(k) => { debug!(" -> {:?}", k); - fcx.tables.borrow_mut().cast_kinds_mut().insert(self.expr.hir_id, k); } Err(e) => self.report_cast_error(fcx, e), }; } } - /// Check a cast, and report an error if one exists. In some cases, this + /// Checks a cast, and report an error if one exists. In some cases, this /// can return Ok and create type errors in the fcx rather than returning /// directly. coercion-cast is handled in check instead of here. fn do_check(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Result { @@ -628,8 +641,8 @@ impl<'a, 'gcx, 'tcx> CastCheck<'tcx> { } impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { - fn type_is_known_to_be_sized(&self, ty: Ty<'tcx>, span: Span) -> bool { + fn type_is_known_to_be_sized_modulo_regions(&self, ty: Ty<'tcx>, span: Span) -> bool { let lang_item = self.tcx.require_lang_item(lang_items::SizedTraitLangItem); - traits::type_known_to_meet_bound(self, self.param_env, ty, lang_item, span) + traits::type_known_to_meet_bound_modulo_regions(self, self.param_env, ty, lang_item, span) } } diff --git a/src/librustc_typeck/check/closure.rs b/src/librustc_typeck/check/closure.rs index be15503e47906..f7396cbd42f2f 100644 --- a/src/librustc_typeck/check/closure.rs +++ b/src/librustc_typeck/check/closure.rs @@ -1,28 +1,18 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Code for type-checking closure expressions. use super::{check_fn, Expectation, FnCtxt, GeneratorTypes}; -use astconv::AstConv; -use middle::region; +use crate::astconv::AstConv; +use crate::middle::region; use rustc::hir::def_id::DefId; use rustc::infer::{InferOk, InferResult}; use rustc::infer::LateBoundRegionConversionTime; use rustc::infer::type_variable::TypeVariableOrigin; use rustc::traits::Obligation; use rustc::traits::error_reporting::ArgKind; -use rustc::ty::{self, ToPolyTraitRef, Ty, GenericParamDefKind}; +use rustc::ty::{self, Ty, GenericParamDefKind}; use rustc::ty::fold::TypeFoldable; -use rustc::ty::subst::Substs; +use rustc::ty::subst::InternalSubsts; use std::cmp; use std::iter; use rustc_target::spec::abi::Abi; @@ -82,7 +72,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { opt_kind, expected_sig ); - let expr_def_id = self.tcx.hir().local_def_id(expr.id); + let expr_def_id = self.tcx.hir().local_def_id_from_hir_id(expr.hir_id); let ClosureSignatures { bound_sig, @@ -96,7 +86,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.param_env, liberated_sig, decl, - expr.id, + expr.hir_id, body, gen, ).1; @@ -105,15 +95,17 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // types of upvars. These will be unified during the upvar // inference phase (`upvar.rs`). let base_substs = - Substs::identity_for_item(self.tcx, self.tcx.closure_base_def_id(expr_def_id)); + InternalSubsts::identity_for_item(self.tcx, self.tcx.closure_base_def_id(expr_def_id)); let substs = base_substs.extend_to(self.tcx,expr_def_id, |param, _| { match param.kind { GenericParamDefKind::Lifetime => { - span_bug!(expr.span, "closure has region param") + span_bug!(expr.span, "closure has lifetime param") + } + GenericParamDefKind::Type { .. } => { + self.infcx.next_ty_var(TypeVariableOrigin::ClosureSynthetic(expr.span)).into() } - GenericParamDefKind::Type {..} => { - self.infcx - .next_ty_var(TypeVariableOrigin::ClosureSynthetic(expr.span)).into() + GenericParamDefKind::Const => { + span_bug!(expr.span, "closure has const param") } } }); @@ -141,8 +133,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let closure_type = self.tcx.mk_closure(expr_def_id, substs); debug!( - "check_closure: expr.id={:?} closure_type={:?}", - expr.id, closure_type + "check_closure: expr.hir_id={:?} closure_type={:?}", + expr.hir_id, closure_type ); // Tuple up the arguments and insert the resulting function type into @@ -151,7 +143,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.tcx.mk_fn_sig( iter::once(self.tcx.intern_tup(sig.inputs())), sig.output(), - sig.variadic, + sig.c_variadic, sig.unsafety, sig.abi, ) @@ -200,7 +192,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.deduce_sig_from_projection(None, &pb) }) .next(); - let kind = self.tcx.lang_items().fn_trait_kind(object_type.principal().def_id()); + let kind = object_type.principal_def_id().and_then(|did| { + self.tcx.lang_items().fn_trait_kind(did) + }); (sig, kind) } ty::Infer(ty::TyVar(vid)) => self.deduce_expectations_from_obligations(vid), @@ -219,13 +213,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { &self, expected_vid: ty::TyVid, ) -> (Option>, Option) { - let fulfillment_cx = self.fulfillment_cx.borrow(); - // Here `expected_ty` is known to be a type inference variable. - - let expected_sig = fulfillment_cx - .pending_obligations() - .iter() - .filter_map(|obligation| { + let expected_sig = self.obligations_for_self_ty(expected_vid) + .find_map(|(_, obligation)| { debug!( "deduce_expectations_from_obligations: obligation.predicate={:?}", obligation.predicate @@ -234,52 +223,21 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if let ty::Predicate::Projection(ref proj_predicate) = obligation.predicate { // Given a Projection predicate, we can potentially infer // the complete signature. - let trait_ref = proj_predicate.to_poly_trait_ref(self.tcx); - self.self_type_matches_expected_vid(trait_ref, expected_vid) - .and_then(|_| { - self.deduce_sig_from_projection( - Some(obligation.cause.span), - proj_predicate - ) - }) + self.deduce_sig_from_projection( + Some(obligation.cause.span), + proj_predicate + ) } else { None } - }) - .next(); + }); // Even if we can't infer the full signature, we may be able to // infer the kind. This can occur if there is a trait-reference // like `F : Fn`. Note that due to subtyping we could encounter // many viable options, so pick the most restrictive. - let expected_kind = fulfillment_cx - .pending_obligations() - .iter() - .filter_map(|obligation| { - let opt_trait_ref = match obligation.predicate { - ty::Predicate::Projection(ref data) => Some(data.to_poly_trait_ref(self.tcx)), - ty::Predicate::Trait(ref data) => Some(data.to_poly_trait_ref()), - ty::Predicate::Subtype(..) => None, - ty::Predicate::RegionOutlives(..) => None, - ty::Predicate::TypeOutlives(..) => None, - ty::Predicate::WellFormed(..) => None, - ty::Predicate::ObjectSafe(..) => None, - ty::Predicate::ConstEvaluatable(..) => None, - - // N.B., this predicate is created by breaking down a - // `ClosureType: FnFoo()` predicate, where - // `ClosureType` represents some `Closure`. It can't - // possibly be referring to the current closure, - // because we haven't produced the `Closure` for - // this closure yet; this is exactly why the other - // code is looking for a self type of a unresolved - // inference variable. - ty::Predicate::ClosureKind(..) => None, - }; - opt_trait_ref - .and_then(|tr| self.self_type_matches_expected_vid(tr, expected_vid)) - .and_then(|tr| self.tcx.lang_items().fn_trait_kind(tr.def_id())) - }) + let expected_kind = self.obligations_for_self_ty(expected_vid) + .filter_map(|(tr, _)| self.tcx.lang_items().fn_trait_kind(tr.def_id())) .fold(None, |best, cur| { Some(best.map_or(cur, |best| cmp::min(best, cur))) }); @@ -339,22 +297,6 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { Some(ExpectedSig { cause_span, sig }) } - fn self_type_matches_expected_vid( - &self, - trait_ref: ty::PolyTraitRef<'tcx>, - expected_vid: ty::TyVid, - ) -> Option> { - let self_ty = self.shallow_resolve(trait_ref.self_ty()); - debug!( - "self_type_matches_expected_vid(trait_ref={:?}, self_ty={:?})", - trait_ref, self_ty - ); - match self_ty.sty { - ty::Infer(ty::TyVar(v)) if expected_vid == v => Some(trait_ref), - _ => None, - } - } - fn sig_of_closure( &self, expr_def_id: DefId, @@ -425,7 +367,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// /// # Arguments /// - /// - `expr_def_id`: the def-id of the closure expression + /// - `expr_def_id`: the `DefId` of the closure expression /// - `decl`: the HIR declaration of the closure /// - `body`: the body of the closure /// - `expected_sig`: the expected signature (if any). Note that @@ -446,7 +388,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Watch out for some surprises and just ignore the // expectation if things don't see to match up with what we // expect. - if expected_sig.sig.variadic != decl.variadic { + if expected_sig.sig.c_variadic != decl.c_variadic { return self.sig_of_closure_no_expectation(expr_def_id, decl, body); } else if expected_sig.sig.inputs_and_output.len() != decl.inputs.len() + 1 { return self.sig_of_closure_with_mismatched_number_of_arguments( @@ -464,7 +406,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let bound_sig = ty::Binder::bind(self.tcx.mk_fn_sig( expected_sig.sig.inputs().iter().cloned(), expected_sig.sig.output(), - decl.variadic, + decl.c_variadic, hir::Unsafety::Normal, Abi::RustCall, )); @@ -518,7 +460,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.closure_sigs(expr_def_id, body, error_sig) } - /// Enforce the user's types against the expectation. See + /// Enforce the user's types against the expectation. See /// `sig_of_closure_with_expectation` for details on the overall /// strategy. fn check_supplied_sig_against_expectation( @@ -556,7 +498,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.infcx.commit_if_ok(|_| { let mut all_obligations = vec![]; - // The liberated version of this signature should be be a subtype + // The liberated version of this signature should be a subtype // of the liberated form of the expectation. for ((hir_ty, &supplied_ty), expected_ty) in decl.inputs.iter() .zip(*supplied_sig.inputs().skip_binder()) // binder moved to (*) below @@ -634,7 +576,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { expr_def_id: DefId, decl: &hir::FnDecl, ) -> ty::PolyFnSig<'tcx> { - let astconv: &dyn AstConv = self; + let astconv: &dyn AstConv<'_, '_> = self; // First, convert the types that the user supplied (if any). let supplied_arguments = decl.inputs.iter().map(|a| astconv.ast_ty_to_ty(a)); @@ -646,7 +588,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let result = ty::Binder::bind(self.tcx.mk_fn_sig( supplied_arguments, supplied_return, - decl.variadic, + decl.c_variadic, hir::Unsafety::Normal, Abi::RustCall, )); @@ -666,7 +608,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// so should yield an error, but returns back a signature where /// all parameters are of type `TyErr`. fn error_sig_of_closure(&self, decl: &hir::FnDecl) -> ty::PolyFnSig<'tcx> { - let astconv: &dyn AstConv = self; + let astconv: &dyn AstConv<'_, '_> = self; let supplied_arguments = decl.inputs.iter().map(|a| { // Convert the types that the user supplied (if any), but ignore them. @@ -681,7 +623,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let result = ty::Binder::bind(self.tcx.mk_fn_sig( supplied_arguments, self.tcx.types.err, - decl.variadic, + decl.c_variadic, hir::Unsafety::Normal, Abi::RustCall, )); @@ -701,7 +643,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { .liberate_late_bound_regions(expr_def_id, &bound_sig); let liberated_sig = self.inh.normalize_associated_types_in( body.value.span, - body.value.id, + body.value.hir_id, self.param_env, &liberated_sig, ); diff --git a/src/librustc_typeck/check/coercion.rs b/src/librustc_typeck/check/coercion.rs index 8d844fe3a69e4..c470bc09e8cd0 100644 --- a/src/librustc_typeck/check/coercion.rs +++ b/src/librustc_typeck/check/coercion.rs @@ -1,17 +1,7 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! # Type Coercion //! //! Under certain circumstances we will coerce from one type to another, -//! for example by auto-borrowing. This occurs in situations where the +//! for example by auto-borrowing. This occurs in situations where the //! compiler has a firm 'expected type' that was supplied from the user, //! and where the actual type is similar to that expected type in purpose //! but not in representation (so actual subtyping is inappropriate). @@ -19,24 +9,24 @@ //! ## Reborrowing //! //! Note that if we are expecting a reference, we will *reborrow* -//! even if the argument provided was already a reference. This is +//! even if the argument provided was already a reference. This is //! useful for freezing mut/const things (that is, when the expected is &T //! but you have &const T or &mut T) and also for avoiding the linearity -//! of mut things (when the expected is &mut T and you have &mut T). See +//! of mut things (when the expected is &mut T and you have &mut T). See //! the various `src/test/run-pass/coerce-reborrow-*.rs` tests for //! examples of where this is useful. //! //! ## Subtle note //! //! When deciding what type coercions to consider, we do not attempt to -//! resolve any type variables we may encounter. This is because `b` +//! resolve any type variables we may encounter. This is because `b` //! represents the expected type "as the user wrote it", meaning that if //! the user defined a generic function like //! //! fn foo(a: A, b: A) { ... } //! //! and then we wrote `foo(&1, @2)`, we will not auto-borrow -//! either argument. In older code we went to some lengths to +//! either argument. In older code we went to some lengths to //! resolve the `b` variable, which could mean that we'd //! auto-borrow later arguments but not earlier ones, which //! seems very confusing. @@ -49,18 +39,18 @@ //! foo::<&int>(@1, @2) //! //! then we *will* auto-borrow, because we can't distinguish this from a -//! function that declared `&int`. This is inconsistent but it's easiest +//! function that declared `&int`. This is inconsistent but it's easiest //! at the moment. The right thing to do, I think, is to consider the //! *unsubstituted* type when deciding whether to auto-borrow, but the //! *substituted* type when considering the bounds and so forth. But most //! of our methods don't give access to the unsubstituted type, and -//! rightly so because they'd be error-prone. So maybe the thing to do is +//! rightly so because they'd be error-prone. So maybe the thing to do is //! to actually determine the kind of coercions that should occur -//! separately and pass them in. Or maybe it's ok as is. Anyway, it's -//! sort of a minor point so I've opted to leave it for later---after all +//! separately and pass them in. Or maybe it's ok as is. Anyway, it's +//! sort of a minor point so I've opted to leave it for later -- after all, //! we may want to adjust precisely when coercions occur. -use check::{FnCtxt, Needs}; +use crate::check::{FnCtxt, Needs}; use errors::DiagnosticBuilder; use rustc::hir; use rustc::hir::def_id::DefId; @@ -111,7 +101,7 @@ fn coerce_mutbls<'tcx>(from_mutbl: hir::Mutability, } } -fn identity(_: Ty) -> Vec { vec![] } +fn identity(_: Ty<'_>) -> Vec> { vec![] } fn simple<'tcx>(kind: Adjust<'tcx>) -> impl FnOnce(Ty<'tcx>) -> Vec> { move |target| vec![Adjustment { kind, target }] @@ -235,7 +225,8 @@ impl<'f, 'gcx, 'tcx> Coerce<'f, 'gcx, 'tcx> { } ty::Closure(def_id_a, substs_a) => { // Non-capturing closures are coercible to - // function pointers + // function pointers or unsafe function pointers. + // It cannot convert closures that require unsafe. self.coerce_closure_to_fn(a, def_id_a, substs_a, b) } _ => { @@ -419,7 +410,7 @@ impl<'f, 'gcx, 'tcx> Coerce<'f, 'gcx, 'tcx> { let needs = Needs::maybe_mut_place(mt_b.mutbl); let InferOk { value: mut adjustments, obligations: o } - = autoderef.adjust_steps_as_infer_ok(needs); + = autoderef.adjust_steps_as_infer_ok(self, needs); obligations.extend(o); obligations.extend(autoderef.into_obligations()); @@ -579,7 +570,33 @@ impl<'f, 'gcx, 'tcx> Coerce<'f, 'gcx, 'tcx> { }; match selcx.select(&obligation.with(trait_ref)) { // Uncertain or unimplemented. - Ok(None) | + Ok(None) => { + if trait_ref.def_id() == unsize_did { + let trait_ref = self.resolve_type_vars_if_possible(&trait_ref); + let self_ty = trait_ref.skip_binder().self_ty(); + let unsize_ty = trait_ref.skip_binder().input_types().nth(1).unwrap(); + debug!("coerce_unsized: ambiguous unsize case for {:?}", trait_ref); + match (&self_ty.sty, &unsize_ty.sty) { + (ty::Infer(ty::TyVar(v)), + ty::Dynamic(..)) if self.type_var_is_sized(*v) => { + debug!("coerce_unsized: have sized infer {:?}", v); + coercion.obligations.push(obligation); + // `$0: Unsize` where we know that `$0: Sized`, try going + // for unsizing. + } + _ => { + // Some other case for `$0: Unsize`. Note that we + // hit this case even if `Something` is a sized type, so just + // don't do the coercion. + debug!("coerce_unsized: ambiguous unsize"); + return Err(TypeError::Mismatch); + } + } + } else { + debug!("coerce_unsized: early return - ambiguous"); + return Err(TypeError::Mismatch); + } + } Err(traits::Unimplemented) => { debug!("coerce_unsized: early return - can't prove obligation"); return Err(TypeError::Mismatch); @@ -696,18 +713,21 @@ impl<'f, 'gcx, 'tcx> Coerce<'f, 'gcx, 'tcx> { let b = self.shallow_resolve(b); - let node_id_a = self.tcx.hir().as_local_node_id(def_id_a).unwrap(); + let hir_id_a = self.tcx.hir().as_local_hir_id(def_id_a).unwrap(); match b.sty { - ty::FnPtr(_) if self.tcx.with_freevars(node_id_a, |v| v.is_empty()) => { + ty::FnPtr(fn_ty) if self.tcx.with_freevars(hir_id_a, |v| v.is_empty()) => { // We coerce the closure, which has fn type // `extern "rust-call" fn((arg0,arg1,...)) -> _` // to // `fn(arg0,arg1,...) -> _` + // or + // `unsafe fn(arg0,arg1,...) -> _` let sig = self.closure_sig(def_id_a, substs_a); - let pointer_ty = self.tcx.coerce_closure_fn_ty(sig); + let unsafety = fn_ty.unsafety(); + let pointer_ty = self.tcx.coerce_closure_fn_ty(sig, unsafety); debug!("coerce_closure_to_fn(a={:?}, b={:?}, pty={:?})", a, b, pointer_ty); - self.unify_and(pointer_ty, b, simple(Adjust::ClosureFnPointer)) + self.unify_and(pointer_ty, b, simple(Adjust::ClosureFnPointer(unsafety))) } _ => self.unify_and(a, b, identity), } @@ -807,7 +827,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { debug!("coercion::try_find_coercion_lub({:?}, {:?})", prev_ty, new_ty); // Special-case that coercion alone cannot handle: - // Two function item types of differing IDs or Substs. + // Two function item types of differing IDs or InternalSubsts. if let (&ty::FnDef(..), &ty::FnDef(..)) = (&prev_ty.sty, &new_ty.sty) { // Don't reify if the function types have a LUB, i.e., they // are the same function and their parameters have a LUB. @@ -1015,8 +1035,8 @@ impl<'gcx, 'tcx, 'exprs, E> CoerceMany<'gcx, 'tcx, 'exprs, E> } } - /// Return the "expected type" with which this coercion was - /// constructed. This represents the "downward propagated" type + /// Returns the "expected type" with which this coercion was + /// constructed. This represents the "downward propagated" type /// that was given to us at the start of typing whatever construct /// we are typing (e.g., the match expression). /// @@ -1068,7 +1088,7 @@ impl<'gcx, 'tcx, 'exprs, E> CoerceMany<'gcx, 'tcx, 'exprs, E> pub fn coerce_forced_unit<'a>(&mut self, fcx: &FnCtxt<'a, 'gcx, 'tcx>, cause: &ObligationCause<'tcx>, - augment_error: &mut dyn FnMut(&mut DiagnosticBuilder), + augment_error: &mut dyn FnMut(&mut DiagnosticBuilder<'_>), label_unit_as_expected: bool) { self.coerce_inner(fcx, @@ -1087,7 +1107,7 @@ impl<'gcx, 'tcx, 'exprs, E> CoerceMany<'gcx, 'tcx, 'exprs, E> cause: &ObligationCause<'tcx>, expression: Option<&'gcx hir::Expr>, mut expression_ty: Ty<'tcx>, - augment_error: Option<&mut dyn FnMut(&mut DiagnosticBuilder)>, + augment_error: Option<&mut dyn FnMut(&mut DiagnosticBuilder<'_>)>, label_expression_as_expected: bool) { // Incorporate whatever type inference information we have @@ -1143,7 +1163,6 @@ impl<'gcx, 'tcx, 'exprs, E> CoerceMany<'gcx, 'tcx, 'exprs, E> // `expression_ty` will be unit). // // Another example is `break` with no argument expression. - assert!(expression_ty.is_unit()); assert!(expression_ty.is_unit(), "if let hack without unit type"); fcx.at(cause, fcx.param_env) .eq_exp(label_expression_as_expected, expression_ty, self.merged_ty()) @@ -1162,7 +1181,8 @@ impl<'gcx, 'tcx, 'exprs, E> CoerceMany<'gcx, 'tcx, 'exprs, E> Expressions::UpFront(coercion_sites) => { // if the user gave us an array to validate, check that we got // the next expression in the list, as expected - assert_eq!(coercion_sites[self.pushed].as_coercion_site().id, e.id); + assert_eq!(coercion_sites[self.pushed].as_coercion_site().hir_id, + e.hir_id); } } self.pushed += 1; @@ -1190,25 +1210,24 @@ impl<'gcx, 'tcx, 'exprs, E> CoerceMany<'gcx, 'tcx, 'exprs, E> db = struct_span_err!( fcx.tcx.sess, cause.span, E0069, "`return;` in a function whose return type is not `()`"); - db.span_label(cause.span, "return type is not ()"); + db.span_label(cause.span, "return type is not `()`"); } ObligationCauseCode::BlockTailExpression(blk_id) => { - db = fcx.report_mismatched_types(cause, expected, found, err); - - let expr = expression.unwrap_or_else(|| { - span_bug!(cause.span, - "supposed to be part of a block tail expression, but the \ - expression is empty"); - }); - fcx.suggest_mismatched_types_on_tail( - &mut db, - expr, + let parent_id = fcx.tcx.hir().get_parent_node_by_hir_id(blk_id); + db = self.report_return_mismatched_types( + cause, expected, found, - cause.span, - blk_id, + err, + fcx, + parent_id, + expression.map(|expr| (expr, blk_id)), ); } + ObligationCauseCode::ReturnType(id) => { + db = self.report_return_mismatched_types( + cause, expected, found, err, fcx, id, None); + } _ => { db = fcx.report_mismatched_types(cause, expected, found, err); } @@ -1218,13 +1237,71 @@ impl<'gcx, 'tcx, 'exprs, E> CoerceMany<'gcx, 'tcx, 'exprs, E> augment_error(&mut db); } - db.emit(); + if expression.filter(|e| fcx.is_assign_to_bool(e, expected)).is_some() { + // Error reported in `check_assign` so avoid emitting error again. + db.delay_as_bug(); + } else { + db.emit(); + } self.final_ty = Some(fcx.tcx.types.err); } } } + fn report_return_mismatched_types<'a>( + &self, + cause: &ObligationCause<'tcx>, + expected: Ty<'tcx>, + found: Ty<'tcx>, + err: TypeError<'tcx>, + fcx: &FnCtxt<'a, 'gcx, 'tcx>, + id: hir::HirId, + expression: Option<(&'gcx hir::Expr, hir::HirId)>, + ) -> DiagnosticBuilder<'a> { + let mut db = fcx.report_mismatched_types(cause, expected, found, err); + + let mut pointing_at_return_type = false; + let mut return_sp = None; + + // Verify that this is a tail expression of a function, otherwise the + // label pointing out the cause for the type coercion will be wrong + // as prior return coercions would not be relevant (#57664). + let parent_id = fcx.tcx.hir().get_parent_node_by_hir_id(id); + let fn_decl = if let Some((expr, blk_id)) = expression { + pointing_at_return_type = fcx.suggest_mismatched_types_on_tail( + &mut db, + expr, + expected, + found, + cause.span, + blk_id, + ); + let parent = fcx.tcx.hir().get_by_hir_id(parent_id); + fcx.get_node_fn_decl(parent).map(|(fn_decl, _, is_main)| (fn_decl, is_main)) + } else { + fcx.get_fn_decl(parent_id) + }; + + if let (Some((fn_decl, can_suggest)), _) = (fn_decl, pointing_at_return_type) { + if expression.is_none() { + pointing_at_return_type |= fcx.suggest_missing_return_type( + &mut db, &fn_decl, expected, found, can_suggest); + } + if !pointing_at_return_type { + return_sp = Some(fn_decl.output.span()); // `impl Trait` return type + } + } + if let (Some(sp), Some(return_sp)) = (fcx.ret_coercion_span.borrow().as_ref(), return_sp) { + db.span_label(return_sp, "expected because this return type..."); + db.span_label( *sp, format!( + "...is found to be `{}` here", + fcx.resolve_type_vars_with_obligations(expected), + )); + } + db + } + pub fn complete<'a>(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { if let Some(final_ty) = self.final_ty { final_ty diff --git a/src/librustc_typeck/check/compare_method.rs b/src/librustc_typeck/check/compare_method.rs index 984f1ca65ce44..e6e5c46c473d5 100644 --- a/src/librustc_typeck/check/compare_method.rs +++ b/src/librustc_typeck/check/compare_method.rs @@ -1,22 +1,12 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir::{self, GenericParamKind, ImplItemKind, TraitItemKind}; use rustc::infer::{self, InferOk}; use rustc::ty::{self, TyCtxt, GenericParamDefKind}; use rustc::ty::util::ExplicitSelf; use rustc::traits::{self, ObligationCause, ObligationCauseCode, Reveal}; use rustc::ty::error::{ExpectedFound, TypeError}; -use rustc::ty::subst::{Subst, Substs}; +use rustc::ty::subst::{Subst, InternalSubsts, SubstsRef}; use rustc::util::common::ErrorReported; -use errors::Applicability; +use errors::{Applicability, DiagnosticId}; use syntax_pos::Span; @@ -27,10 +17,10 @@ use super::{Inherited, FnCtxt, potentially_plural_count}; /// /// # Parameters /// -/// - impl_m: type of the method we are checking -/// - impl_m_span: span to use for reporting errors -/// - trait_m: the method in the trait -/// - impl_trait_ref: the TraitRef corresponding to the trait implementation +/// - `impl_m`: type of the method we are checking +/// - `impl_m_span`: span to use for reporting errors +/// - `trait_m`: the method in the trait +/// - `impl_trait_ref`: the TraitRef corresponding to the trait implementation pub fn compare_impl_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_m: &ty::AssociatedItem, @@ -93,11 +83,11 @@ fn compare_predicate_entailment<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // This node-id should be used for the `body_id` field on each // `ObligationCause` (and the `FnCtxt`). This is what // `regionck_item` expects. - let impl_m_node_id = tcx.hir().as_local_node_id(impl_m.def_id).unwrap(); + let impl_m_hir_id = tcx.hir().as_local_hir_id(impl_m.def_id).unwrap(); let cause = ObligationCause { span: impl_m_span, - body_id: impl_m_node_id, + body_id: impl_m_hir_id, code: ObligationCauseCode::CompareImplMethodObligation { item_name: impl_m.ident.name, impl_item_def_id: impl_m.def_id, @@ -169,7 +159,7 @@ fn compare_predicate_entailment<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // a fresh FulfillmentCtxt, and invoke select_all_or_error. // Create mapping from impl to placeholder. - let impl_to_skol_substs = Substs::identity_for_item(tcx, impl_m.def_id); + let impl_to_skol_substs = InternalSubsts::identity_for_item(tcx, impl_m.def_id); // Create mapping from trait to placeholder. let trait_to_skol_substs = impl_to_skol_substs.rebase_onto(tcx, @@ -215,9 +205,12 @@ fn compare_predicate_entailment<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Construct trait parameter environment and then shift it into the placeholder viewpoint. // The key step here is to update the caller_bounds's predicates to be // the new hybrid bounds we computed. - let normalize_cause = traits::ObligationCause::misc(impl_m_span, impl_m_node_id); - let param_env = ty::ParamEnv::new(tcx.intern_predicates(&hybrid_preds.predicates), - Reveal::UserFacing); + let normalize_cause = traits::ObligationCause::misc(impl_m_span, impl_m_hir_id); + let param_env = ty::ParamEnv::new( + tcx.intern_predicates(&hybrid_preds.predicates), + Reveal::UserFacing, + None + ); let param_env = traits::normalize_param_env_or_error(tcx, impl_m.def_id, param_env, @@ -269,7 +262,7 @@ fn compare_predicate_entailment<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ); let impl_sig = inh.normalize_associated_types_in(impl_m_span, - impl_m_node_id, + impl_m_hir_id, param_env, &impl_sig); let impl_fty = tcx.mk_fn_ptr(ty::Binder::bind(impl_sig)); @@ -282,7 +275,7 @@ fn compare_predicate_entailment<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_sig.subst(tcx, trait_to_skol_substs); let trait_sig = inh.normalize_associated_types_in(impl_m_span, - impl_m_node_id, + impl_m_hir_id, param_env, &trait_sig); let trait_fty = tcx.mk_fn_ptr(ty::Binder::bind(trait_sig)); @@ -323,7 +316,7 @@ fn compare_predicate_entailment<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if let Some(trait_err_span) = trait_err_span { if let Ok(trait_err_str) = tcx.sess.source_map() .span_to_snippet(trait_err_span) { - diag.span_suggestion_with_applicability( + diag.span_suggestion( impl_err_span, "consider change the type to match the mutability in trait", trait_err_str, @@ -354,8 +347,8 @@ fn compare_predicate_entailment<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Finally, resolve all regions. This catches wily misuses of // lifetime parameters. - let fcx = FnCtxt::new(&inh, param_env, impl_m_node_id); - fcx.regionck_item(impl_m_node_id, impl_m_span, &[]); + let fcx = FnCtxt::new(&inh, param_env, impl_m_hir_id); + fcx.regionck_item(impl_m_hir_id, impl_m_span, &[]); Ok(()) }) @@ -367,7 +360,7 @@ fn check_region_bounds_on_impl_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_m: &ty::AssociatedItem, trait_generics: &ty::Generics, impl_generics: &ty::Generics, - trait_to_skol_substs: &Substs<'tcx>) + trait_to_skol_substs: SubstsRef<'tcx>) -> Result<(), ErrorReported> { let trait_params = trait_generics.own_counts().lifetimes; let impl_params = impl_generics.own_counts().lifetimes; @@ -414,7 +407,7 @@ fn check_region_bounds_on_impl_method<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn extract_spans_for_error_reporting<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a, 'gcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, - terr: &TypeError, + terr: &TypeError<'_>, cause: &ObligationCause<'tcx>, impl_m: &ty::AssociatedItem, impl_sig: ty::FnSig<'tcx>, @@ -422,8 +415,10 @@ fn extract_spans_for_error_reporting<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a trait_sig: ty::FnSig<'tcx>) -> (Span, Option) { let tcx = infcx.tcx; - let impl_m_node_id = tcx.hir().as_local_node_id(impl_m.def_id).unwrap(); - let (impl_m_output, impl_m_iter) = match tcx.hir().expect_impl_item(impl_m_node_id).node { + let impl_m_hir_id = tcx.hir().as_local_hir_id(impl_m.def_id).unwrap(); + let (impl_m_output, impl_m_iter) = match tcx.hir() + .expect_impl_item(impl_m_hir_id) + .node { ImplItemKind::Method(ref impl_m_sig, _) => { (&impl_m_sig.decl.output, impl_m_sig.decl.inputs.iter()) } @@ -432,8 +427,10 @@ fn extract_spans_for_error_reporting<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a match *terr { TypeError::Mutability => { - if let Some(trait_m_node_id) = tcx.hir().as_local_node_id(trait_m.def_id) { - let trait_m_iter = match tcx.hir().expect_trait_item(trait_m_node_id).node { + if let Some(trait_m_hir_id) = tcx.hir().as_local_hir_id(trait_m.def_id) { + let trait_m_iter = match tcx.hir() + .expect_trait_item(trait_m_hir_id) + .node { TraitItemKind::Method(ref trait_m_sig, _) => { trait_m_sig.decl.inputs.iter() } @@ -457,9 +454,9 @@ fn extract_spans_for_error_reporting<'a, 'gcx, 'tcx>(infcx: &infer::InferCtxt<'a } } TypeError::Sorts(ExpectedFound { .. }) => { - if let Some(trait_m_node_id) = tcx.hir().as_local_node_id(trait_m.def_id) { + if let Some(trait_m_hir_id) = tcx.hir().as_local_hir_id(trait_m.def_id) { let (trait_m_output, trait_m_iter) = - match tcx.hir().expect_trait_item(trait_m_node_id).node { + match tcx.hir().expect_trait_item(trait_m_hir_id).node { TraitItemKind::Method(ref trait_m_sig, _) => { (&trait_m_sig.decl.output, trait_m_sig.decl.inputs.iter()) } @@ -582,55 +579,78 @@ fn compare_self_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, Ok(()) } -fn compare_number_of_generics<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - impl_m: &ty::AssociatedItem, - impl_m_span: Span, - trait_m: &ty::AssociatedItem, - trait_item_span: Option) - -> Result<(), ErrorReported> { - let impl_m_generics = tcx.generics_of(impl_m.def_id); - let trait_m_generics = tcx.generics_of(trait_m.def_id); - let num_impl_m_type_params = impl_m_generics.own_counts().types; - let num_trait_m_type_params = trait_m_generics.own_counts().types; - - if num_impl_m_type_params != num_trait_m_type_params { - let impl_m_node_id = tcx.hir().as_local_node_id(impl_m.def_id).unwrap(); - let impl_m_item = tcx.hir().expect_impl_item(impl_m_node_id); - let span = if impl_m_item.generics.params.is_empty() - || impl_m_item.generics.span.is_dummy() // impl Trait in argument position (#55374) - { - impl_m_span - } else { - impl_m_item.generics.span - }; - - let mut err = struct_span_err!(tcx.sess, span, E0049, - "method `{}` has {} but its trait declaration has {}", - trait_m.ident, - potentially_plural_count(num_impl_m_type_params, "type parameter"), - potentially_plural_count(num_trait_m_type_params, "type parameter") - ); +fn compare_number_of_generics<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + impl_: &ty::AssociatedItem, + impl_span: Span, + trait_: &ty::AssociatedItem, + trait_span: Option, +) -> Result<(), ErrorReported> { + let trait_own_counts = tcx.generics_of(trait_.def_id).own_counts(); + let impl_own_counts = tcx.generics_of(impl_.def_id).own_counts(); + + let matchings = [ + ("type", trait_own_counts.types, impl_own_counts.types), + ("const", trait_own_counts.consts, impl_own_counts.consts), + ]; + + let mut err_occurred = false; + for &(kind, trait_count, impl_count) in &matchings { + if impl_count != trait_count { + err_occurred = true; + + let impl_hir_id = tcx.hir().as_local_hir_id(impl_.def_id).unwrap(); + let impl_item = tcx.hir().expect_impl_item(impl_hir_id); + let span = if impl_item.generics.params.is_empty() + || impl_item.generics.span.is_dummy() { // argument position impl Trait (#55374) + impl_span + } else { + impl_item.generics.span + }; - let mut suffix = None; + let mut err = tcx.sess.struct_span_err_with_code( + span, + &format!( + "method `{}` has {} {kind} parameter{} but its trait \ + declaration has {} {kind} parameter{}", + trait_.ident, + impl_count, + if impl_count != 1 { "s" } else { "" }, + trait_count, + if trait_count != 1 { "s" } else { "" }, + kind = kind, + ), + DiagnosticId::Error("E0049".into()), + ); - if let Some(span) = trait_item_span { - err.span_label(span, format!("expected {}", - potentially_plural_count(num_trait_m_type_params, "type parameter"))); - } else { - suffix = Some(format!(", expected {}", num_trait_m_type_params)); - } + let mut suffix = None; - err.span_label(span, - format!("found {}{}", - potentially_plural_count(num_impl_m_type_params, "type parameter"), - suffix.as_ref().map(|s| &s[..]).unwrap_or(""))); + if let Some(span) = trait_span { + err.span_label( + span, + format!("expected {} {} parameter{}", trait_count, kind, + if trait_count != 1 { "s" } else { "" }) + ); + } else { + suffix = Some(format!(", expected {}", trait_count)); + } - err.emit(); + err.span_label( + span, + format!("found {} {} parameter{}{}", impl_count, kind, + if impl_count != 1 { "s" } else { "" }, + suffix.unwrap_or_else(|| String::new())), + ); - return Err(ErrorReported); + err.emit(); + } } - Ok(()) + if err_occurred { + Err(ErrorReported) + } else { + Ok(()) + } } fn compare_number_of_method_arguments<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -644,8 +664,8 @@ fn compare_number_of_method_arguments<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let trait_number_args = trait_m_fty.inputs().skip_binder().len(); let impl_number_args = impl_m_fty.inputs().skip_binder().len(); if trait_number_args != impl_number_args { - let trait_m_node_id = tcx.hir().as_local_node_id(trait_m.def_id); - let trait_span = if let Some(trait_id) = trait_m_node_id { + let trait_m_hir_id = tcx.hir().as_local_hir_id(trait_m.def_id); + let trait_span = if let Some(trait_id) = trait_m_hir_id { match tcx.hir().expect_trait_item(trait_id).node { TraitItemKind::Method(ref trait_m_sig, _) => { let pos = if trait_number_args > 0 { @@ -670,8 +690,8 @@ fn compare_number_of_method_arguments<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } else { trait_item_span }; - let impl_m_node_id = tcx.hir().as_local_node_id(impl_m.def_id).unwrap(); - let impl_span = match tcx.hir().expect_impl_item(impl_m_node_id).node { + let impl_m_hir_id = tcx.hir().as_local_hir_id(impl_m.def_id).unwrap(); + let impl_span = match tcx.hir().expect_impl_item(impl_m_hir_id).node { ImplItemKind::Method(ref impl_m_sig, _) => { let pos = if impl_number_args > 0 { impl_number_args - 1 @@ -699,7 +719,7 @@ fn compare_number_of_method_arguments<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait `{}` has {}", trait_m.ident, potentially_plural_count(impl_number_args, "parameter"), - tcx.item_path_str(trait_m.def_id), + tcx.def_path_str(trait_m.def_id), trait_number_args); if let Some(trait_span) = trait_span { err.span_label(trait_span, format!("trait requires {}", @@ -731,20 +751,20 @@ fn compare_synthetic_generics<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let trait_m_generics = tcx.generics_of(trait_m.def_id); let impl_m_type_params = impl_m_generics.params.iter().filter_map(|param| match param.kind { GenericParamDefKind::Type { synthetic, .. } => Some((param.def_id, synthetic)), - GenericParamDefKind::Lifetime => None, + GenericParamDefKind::Lifetime | GenericParamDefKind::Const => None, }); let trait_m_type_params = trait_m_generics.params.iter().filter_map(|param| { match param.kind { GenericParamDefKind::Type { synthetic, .. } => Some((param.def_id, synthetic)), - GenericParamDefKind::Lifetime => None, + GenericParamDefKind::Lifetime | GenericParamDefKind::Const => None, } }); for ((impl_def_id, impl_synthetic), (trait_def_id, trait_synthetic)) in impl_m_type_params.zip(trait_m_type_params) { if impl_synthetic != trait_synthetic { - let impl_node_id = tcx.hir().as_local_node_id(impl_def_id).unwrap(); - let impl_span = tcx.hir().span(impl_node_id); + let impl_hir_id = tcx.hir().as_local_hir_id(impl_def_id).unwrap(); + let impl_span = tcx.hir().span_by_hir_id(impl_hir_id); let trait_span = tcx.def_span(trait_def_id); let mut err = struct_span_err!(tcx.sess, impl_span, @@ -766,11 +786,11 @@ fn compare_synthetic_generics<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, .source_map() .span_to_snippet(trait_span) .ok()?; - let trait_m = tcx.hir().as_local_node_id(trait_m.def_id)?; - let trait_m = tcx.hir().trait_item(hir::TraitItemId { node_id: trait_m }); + let trait_m = tcx.hir().as_local_hir_id(trait_m.def_id)?; + let trait_m = tcx.hir().trait_item(hir::TraitItemId { hir_id: trait_m }); - let impl_m = tcx.hir().as_local_node_id(impl_m.def_id)?; - let impl_m = tcx.hir().impl_item(hir::ImplItemId { node_id: impl_m }); + let impl_m = tcx.hir().as_local_hir_id(impl_m.def_id)?; + let impl_m = tcx.hir().impl_item(hir::ImplItemId { hir_id: impl_m }); // in case there are no generics, take the spot between the function name // and the opening paren of the argument list @@ -791,7 +811,7 @@ fn compare_synthetic_generics<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, .span_to_snippet(trait_m.generics.span) .ok()?; - err.multipart_suggestion_with_applicability( + err.multipart_suggestion( "try changing the `impl Trait` argument to a generic parameter", vec![ // replace `impl Trait` with `T` @@ -811,8 +831,8 @@ fn compare_synthetic_generics<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, (None, Some(hir::SyntheticTyParamKind::ImplTrait)) => { err.span_label(impl_span, "expected `impl Trait`, found generic parameter"); (|| { - let impl_m = tcx.hir().as_local_node_id(impl_m.def_id)?; - let impl_m = tcx.hir().impl_item(hir::ImplItemId { node_id: impl_m }); + let impl_m = tcx.hir().as_local_hir_id(impl_m.def_id)?; + let impl_m = tcx.hir().impl_item(hir::ImplItemId { hir_id: impl_m }); let input_tys = match impl_m.node { hir::ImplItemKind::Method(ref sig, _) => &sig.decl.inputs, _ => unreachable!(), @@ -846,8 +866,9 @@ fn compare_synthetic_generics<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let bounds = impl_m.generics.params.iter().find_map(|param| { match param.kind { GenericParamKind::Lifetime { .. } => None, - GenericParamKind::Type { .. } => { - if param.id == impl_node_id { + GenericParamKind::Type { .. } | + GenericParamKind::Const { .. } => { + if param.hir_id == impl_hir_id { Some(¶m.bounds) } else { None @@ -862,7 +883,7 @@ fn compare_synthetic_generics<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, .span_to_snippet(bounds) .ok()?; - err.multipart_suggestion_with_applicability( + err.multipart_suggestion( "try removing the generic parameter and using `impl Trait` instead", vec![ // delete generic parameters @@ -896,7 +917,7 @@ pub fn compare_const_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, debug!("compare_const_impl(impl_trait_ref={:?})", impl_trait_ref); tcx.infer_ctxt().enter(|infcx| { - let param_env = ty::ParamEnv::empty(); + let param_env = tcx.param_env(impl_c.def_id); let inh = Inherited::new(infcx, impl_c.def_id); let infcx = &inh.infcx; @@ -909,23 +930,23 @@ pub fn compare_const_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Create a parameter environment that represents the implementation's // method. - let impl_c_node_id = tcx.hir().as_local_node_id(impl_c.def_id).unwrap(); + let impl_c_hir_id = tcx.hir().as_local_hir_id(impl_c.def_id).unwrap(); // Compute placeholder form of impl and trait const tys. let impl_ty = tcx.type_of(impl_c.def_id); let trait_ty = tcx.type_of(trait_c.def_id).subst(tcx, trait_to_impl_substs); - let mut cause = ObligationCause::misc(impl_c_span, impl_c_node_id); + let mut cause = ObligationCause::misc(impl_c_span, impl_c_hir_id); // There is no "body" here, so just pass dummy id. let impl_ty = inh.normalize_associated_types_in(impl_c_span, - impl_c_node_id, + impl_c_hir_id, param_env, &impl_ty); debug!("compare_const_impl: impl_ty={:?}", impl_ty); let trait_ty = inh.normalize_associated_types_in(impl_c_span, - impl_c_node_id, + impl_c_hir_id, param_env, &trait_ty); @@ -941,7 +962,7 @@ pub fn compare_const_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_ty); // Locate the Span containing just the type of the offending impl - match tcx.hir().expect_impl_item(impl_c_node_id).node { + match tcx.hir().expect_impl_item(impl_c_hir_id).node { ImplItemKind::Const(ref ty, _) => cause.span = ty.span, _ => bug!("{:?} is not a impl const", impl_c), } @@ -953,10 +974,10 @@ pub fn compare_const_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait", trait_c.ident); - let trait_c_node_id = tcx.hir().as_local_node_id(trait_c.def_id); - let trait_c_span = trait_c_node_id.map(|trait_c_node_id| { + let trait_c_hir_id = tcx.hir().as_local_hir_id(trait_c.def_id); + let trait_c_span = trait_c_hir_id.map(|trait_c_hir_id| { // Add a label to the Span containing just the type of the const - match tcx.hir().expect_trait_item(trait_c_node_id).node { + match tcx.hir().expect_trait_item(trait_c_hir_id).node { TraitItemKind::Const(ref ty, _) => ty.span, _ => bug!("{:?} is not a trait const", trait_c), } @@ -980,7 +1001,7 @@ pub fn compare_const_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, return; } - let fcx = FnCtxt::new(&inh, param_env, impl_c_node_id); - fcx.regionck_item(impl_c_node_id, impl_c_span, &[]); + let fcx = FnCtxt::new(&inh, param_env, impl_c_hir_id); + fcx.regionck_item(impl_c_hir_id, impl_c_span, &[]); }); } diff --git a/src/librustc_typeck/check/demand.rs b/src/librustc_typeck/check/demand.rs index db4b68611c51b..8739147c621e6 100644 --- a/src/librustc_typeck/check/demand.rs +++ b/src/librustc_typeck/check/demand.rs @@ -1,18 +1,7 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use check::FnCtxt; +use crate::check::FnCtxt; use rustc::infer::InferOk; -use rustc::traits::ObligationCause; +use rustc::traits::{ObligationCause, ObligationCauseCode}; -use syntax::ast; use syntax::util::parser::PREC_POSTFIX; use syntax_pos::Span; use rustc::hir; @@ -21,7 +10,7 @@ use rustc::hir::Node; use rustc::hir::{Item, ItemKind, print}; use rustc::ty::{self, Ty, AssociatedItem}; use rustc::ty::adjustment::AllowTwoPhase; -use errors::{Applicability, DiagnosticBuilder, SourceMapper}; +use errors::{Applicability, DiagnosticBuilder}; use super::method::probe; @@ -76,6 +65,25 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } + pub fn demand_eqtype_pat( + &self, + cause_span: Span, + expected: Ty<'tcx>, + actual: Ty<'tcx>, + match_expr_span: Option, + ) { + let cause = if let Some(span) = match_expr_span { + self.cause( + cause_span, + ObligationCauseCode::MatchExpressionArmPattern { span, ty: expected }, + ) + } else { + self.misc(cause_span) + }; + self.demand_eqtype_with_origin(&cause, expected, actual).map(|mut err| err.emit()); + } + + pub fn demand_coerce(&self, expr: &hir::Expr, checked_ty: Ty<'tcx>, @@ -111,50 +119,72 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let expr_ty = self.resolve_type_vars_with_obligations(checked_ty); let mut err = self.report_mismatched_types(&cause, expected, expr_ty, e); - // If the expected type is an enum (Issue #55250) with any variants whose - // sole field is of the found type, suggest such variants. (Issue #42764) - if let ty::Adt(expected_adt, substs) = expected.sty { - if expected_adt.is_enum() { - let mut compatible_variants = expected_adt.variants - .iter() - .filter(|variant| variant.fields.len() == 1) - .filter_map(|variant| { - let sole_field = &variant.fields[0]; - let sole_field_ty = sole_field.ty(self.tcx, substs); - if self.can_coerce(expr_ty, sole_field_ty) { - let variant_path = self.tcx.item_path_str(variant.did); - Some(variant_path.trim_start_matches("std::prelude::v1::").to_string()) - } else { - None - } - }).peekable(); - - if compatible_variants.peek().is_some() { - let expr_text = print::to_string(print::NO_ANN, |s| s.print_expr(expr)); - let suggestions = compatible_variants - .map(|v| format!("{}({})", v, expr_text)); - err.span_suggestions_with_applicability( - expr.span, - "try using a variant of the expected type", - suggestions, - Applicability::MaybeIncorrect, - ); - } - } + if self.is_assign_to_bool(expr, expected) { + // Error reported in `check_assign` so avoid emitting error again. + err.delay_as_bug(); + return (expected, None) } + self.suggest_compatible_variants(&mut err, expr, expected, expr_ty); self.suggest_ref_or_into(&mut err, expr, expected, expr_ty); (expected, Some(err)) } + /// Returns whether the expected type is `bool` and the expression is `x = y`. + pub fn is_assign_to_bool(&self, expr: &hir::Expr, expected: Ty<'tcx>) -> bool { + if let hir::ExprKind::Assign(..) = expr.node { + return expected == self.tcx.types.bool; + } + false + } + + /// If the expected type is an enum (Issue #55250) with any variants whose + /// sole field is of the found type, suggest such variants. (Issue #42764) + fn suggest_compatible_variants( + &self, + err: &mut DiagnosticBuilder<'_>, + expr: &hir::Expr, + expected: Ty<'tcx>, + expr_ty: Ty<'tcx>, + ) { + if let ty::Adt(expected_adt, substs) = expected.sty { + if !expected_adt.is_enum() { + return; + } + + let mut compatible_variants = expected_adt.variants + .iter() + .filter(|variant| variant.fields.len() == 1) + .filter_map(|variant| { + let sole_field = &variant.fields[0]; + let sole_field_ty = sole_field.ty(self.tcx, substs); + if self.can_coerce(expr_ty, sole_field_ty) { + let variant_path = self.tcx.def_path_str(variant.def_id); + // FIXME #56861: DRYer prelude filtering + Some(variant_path.trim_start_matches("std::prelude::v1::").to_string()) + } else { + None + } + }).peekable(); + + if compatible_variants.peek().is_some() { + let expr_text = print::to_string(print::NO_ANN, |s| s.print_expr(expr)); + let suggestions = compatible_variants + .map(|v| format!("{}({})", v, expr_text)); + let msg = "try using a variant of the expected type"; + err.span_suggestions(expr.span, msg, suggestions, Applicability::MaybeIncorrect); + } + } + } + pub fn get_conversion_methods(&self, span: Span, expected: Ty<'tcx>, checked_ty: Ty<'tcx>) -> Vec { let mut methods = self.probe_for_return_type(span, probe::Mode::MethodCall, expected, checked_ty, - ast::DUMMY_NODE_ID); + hir::DUMMY_HIR_ID); methods.retain(|m| { self.has_no_input_arg(m) && self.tcx.get_attrs(m.def_id).iter() @@ -200,21 +230,24 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// ``` /// opt.map(|arg| { takes_ref(arg) }); /// ``` - fn can_use_as_ref(&self, expr: &hir::Expr) -> Option<(Span, &'static str, String)> { + fn can_use_as_ref( + &self, + expr: &hir::Expr, + ) -> Option<(Span, &'static str, String)> { if let hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) = expr.node { if let hir::def::Def::Local(id) = path.def { let parent = self.tcx.hir().get_parent_node(id); if let Some(Node::Expr(hir::Expr { - id, + hir_id, node: hir::ExprKind::Closure(_, decl, ..), .. })) = self.tcx.hir().find(parent) { - let parent = self.tcx.hir().get_parent_node(*id); + let parent = self.tcx.hir().get_parent_node_by_hir_id(*hir_id); if let (Some(Node::Expr(hir::Expr { node: hir::ExprKind::MethodCall(path, span, expr), .. - })), 1) = (self.tcx.hir().find(parent), decl.inputs.len()) { - let self_ty = self.tables.borrow().node_id_to_type(expr[0].hir_id); + })), 1) = (self.tcx.hir().find_by_hir_id(parent), decl.inputs.len()) { + let self_ty = self.tables.borrow().node_type(expr[0].hir_id); let self_ty = format!("{:?}", self_ty); let name = path.ident.as_str(); let is_as_ref_able = ( @@ -223,10 +256,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self_ty.starts_with("std::option::Option") || self_ty.starts_with("std::result::Result") ) && (name == "map" || name == "and_then"); - if is_as_ref_able { - return Some((span.shrink_to_lo(), - "consider using `as_ref` instead", - "as_ref().".into())); + match (is_as_ref_able, self.sess().source_map().span_to_snippet(*span)) { + (true, Ok(src)) => { + return Some((*span, "consider using `as_ref` instead", + format!("as_ref().{}", src))); + }, + _ => () } } } @@ -235,6 +270,26 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { None } + fn is_hir_id_from_struct_pattern_shorthand_field(&self, hir_id: hir::HirId, sp: Span) -> bool { + let cm = self.sess().source_map(); + let parent_id = self.tcx.hir().get_parent_node_by_hir_id(hir_id); + if let Some(parent) = self.tcx.hir().find_by_hir_id(parent_id) { + // Account for fields + if let Node::Expr(hir::Expr { + node: hir::ExprKind::Struct(_, fields, ..), .. + }) = parent { + if let Ok(src) = cm.span_to_snippet(sp) { + for field in fields { + if field.ident.as_str() == src.as_str() && field.is_shorthand { + return true; + } + } + } + } + } + false + } + /// This function is used to determine potential "simple" improvements or users' errors and /// provide them useful help. For example: /// @@ -257,12 +312,18 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { expected: Ty<'tcx>) -> Option<(Span, &'static str, String)> { let cm = self.sess().source_map(); - // Use the callsite's span if this is a macro call. #41858 - let sp = cm.call_span_if_macro(expr.span); + let sp = expr.span; if !cm.span_to_filename(sp).is_real() { + // Ignore if span is from within a macro #41858, #58298. We previously used the macro + // call span, but that breaks down when the type error comes from multiple calls down. return None; } + let is_struct_pat_shorthand_field = self.is_hir_id_from_struct_pattern_shorthand_field( + expr.hir_id, + sp, + ); + match (&expected.sty, &checked_ty.sty) { (&ty::Ref(_, exp, _), &ty::Ref(_, check, _)) => match (&exp.sty, &check.sty) { (&ty::Str, &ty::Array(arr, _)) | @@ -301,12 +362,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // bar(&x); // error, expected &mut // ``` let ref_ty = match mutability { - hir::Mutability::MutMutable => self.tcx.mk_mut_ref( - self.tcx.mk_region(ty::ReStatic), - checked_ty), - hir::Mutability::MutImmutable => self.tcx.mk_imm_ref( - self.tcx.mk_region(ty::ReStatic), - checked_ty), + hir::Mutability::MutMutable => { + self.tcx.mk_mut_ref(self.tcx.mk_region(ty::ReStatic), checked_ty) + } + hir::Mutability::MutImmutable => { + self.tcx.mk_imm_ref(self.tcx.mk_region(ty::ReStatic), checked_ty) + } }; if self.can_coerce(ref_ty, expected) { if let Ok(src) = cm.span_to_snippet(sp) { @@ -327,14 +388,22 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if let Some(sugg) = self.can_use_as_ref(expr) { return Some(sugg); } + let field_name = if is_struct_pat_shorthand_field { + format!("{}: ", sugg_expr) + } else { + String::new() + }; return Some(match mutability { - hir::Mutability::MutMutable => { - (sp, "consider mutably borrowing here", format!("&mut {}", - sugg_expr)) - } - hir::Mutability::MutImmutable => { - (sp, "consider borrowing here", format!("&{}", sugg_expr)) - } + hir::Mutability::MutMutable => ( + sp, + "consider mutably borrowing here", + format!("{}&mut {}", field_name, sugg_expr), + ), + hir::Mutability::MutImmutable => ( + sp, + "consider borrowing here", + format!("{}&{}", field_name, sugg_expr), + ), }); } } @@ -344,7 +413,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // we may want to suggest adding a `*`, or removing // a `&`. // - // (But, also check check the `expn_info()` to see if this is + // (But, also check the `expn_info()` to see if this is // a macro; if so, it's hard to extract the text and make a good // suggestion, so don't bother.) if self.infcx.can_sub(self.param_env, checked, &expected).is_ok() && @@ -353,6 +422,15 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Maybe remove `&`? hir::ExprKind::AddrOf(_, ref expr) => { if !cm.span_to_filename(expr.span).is_real() { + if let Ok(code) = cm.span_to_snippet(sp) { + if code.chars().next() == Some('&') { + return Some(( + sp, + "consider removing the borrow", + code[1..].to_string()), + ); + } + } return None; } if let Ok(code) = cm.span_to_snippet(expr.span) { @@ -362,16 +440,22 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Maybe add `*`? Only if `T: Copy`. _ => { - if !self.infcx.type_moves_by_default(self.param_env, - checked, - sp) { + if self.infcx.type_is_copy_modulo_regions(self.param_env, + checked, + sp) { // do not suggest if the span comes from a macro (#52783) - if let (Ok(code), - true) = (cm.span_to_snippet(sp), sp == expr.span) { + if let (Ok(code), true) = ( + cm.span_to_snippet(sp), + sp == expr.span, + ) { return Some(( sp, "consider dereferencing the borrow", - format!("*{}", code), + if is_struct_pat_shorthand_field { + format!("{}: *{}", code, code) + } else { + format!("*{}", code) + }, )); } } @@ -420,7 +504,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { match expr.node { // All built-in range literals but `..=` and `..` desugar to Structs - ExprKind::Struct(QPath::Resolved(None, ref path), _, _) | + ExprKind::Struct(ref qpath, _, _) => { + if let QPath::Resolved(None, ref path) = **qpath { + return is_range_path(&path) && span_is_range_literal(&expr.span); + } + } // `..` desugars to its struct path ExprKind::Path(QPath::Resolved(None, ref path)) => { return is_range_path(&path) && span_is_range_literal(&expr.span); @@ -444,14 +532,15 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { false } - pub fn check_for_cast(&self, - err: &mut DiagnosticBuilder<'tcx>, - expr: &hir::Expr, - checked_ty: Ty<'tcx>, - expected_ty: Ty<'tcx>) - -> bool { - let parent_id = self.tcx.hir().get_parent_node(expr.id); - if let Some(parent) = self.tcx.hir().find(parent_id) { + pub fn check_for_cast( + &self, + err: &mut DiagnosticBuilder<'tcx>, + expr: &hir::Expr, + checked_ty: Ty<'tcx>, + expected_ty: Ty<'tcx>, + ) -> bool { + let parent_id = self.tcx.hir().get_parent_node_by_hir_id(expr.hir_id); + if let Some(parent) = self.tcx.hir().find_by_hir_id(parent_id) { // Shouldn't suggest `.into()` on `const`s. if let Node::Item(Item { node: ItemKind::Const(_, _), .. }) = parent { // FIXME(estebank): modify once we decide to suggest `as` casts @@ -477,17 +566,40 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // For now, don't suggest casting with `as`. let can_cast = false; + let mut prefix = String::new(); + if let Some(hir::Node::Expr(hir::Expr { + node: hir::ExprKind::Struct(_, fields, _), + .. + })) = self.tcx.hir().find_by_hir_id(self.tcx.hir().get_parent_node_by_hir_id(expr.hir_id)) { + // `expr` is a literal field for a struct, only suggest if appropriate + for field in fields { + if field.expr.hir_id == expr.hir_id && field.is_shorthand { + // This is a field literal + prefix = format!("{}: ", field.ident); + break; + } + } + if &prefix == "" { + // Likely a field was meant, but this field wasn't found. Do not suggest anything. + return false; + } + } + let needs_paren = expr.precedence().order() < (PREC_POSTFIX as i8); if let Ok(src) = self.tcx.sess.source_map().span_to_snippet(expr.span) { let msg = format!("you can cast an `{}` to `{}`", checked_ty, expected_ty); - let cast_suggestion = format!("{}{}{} as {}", - if needs_paren { "(" } else { "" }, - src, - if needs_paren { ")" } else { "" }, - expected_ty); + let cast_suggestion = format!( + "{}{}{}{} as {}", + prefix, + if needs_paren { "(" } else { "" }, + src, + if needs_paren { ")" } else { "" }, + expected_ty, + ); let into_suggestion = format!( - "{}{}{}.into()", + "{}{}{}{}.into()", + prefix, if needs_paren { "(" } else { "" }, src, if needs_paren { ")" } else { "" }, @@ -501,7 +613,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }; let into_sugg = into_suggestion.clone(); - let suggest_to_change_suffix_or_into = |err: &mut DiagnosticBuilder, + let suggest_to_change_suffix_or_into = |err: &mut DiagnosticBuilder<'_>, note: Option<&str>| { let suggest_msg = if literal_is_ty_suffixed(expr) { format!( @@ -524,7 +636,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if needs_paren { ")" } else { "" }, ); - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &suggest_msg, if literal_is_ty_suffixed(expr) { @@ -541,7 +653,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { match (found.bit_width(), exp.bit_width()) { (Some(found), Some(exp)) if found > exp => { if can_cast { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, which {}", msg, will_truncate), cast_suggestion, @@ -551,7 +663,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } (None, _) | (_, None) => { if can_cast { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, which {}", msg, depending_on_isize), cast_suggestion, @@ -572,7 +684,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { match (found.bit_width(), exp.bit_width()) { (Some(found), Some(exp)) if found > exp => { if can_cast { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, which {}", msg, will_truncate), cast_suggestion, @@ -582,7 +694,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } (None, _) | (_, None) => { if can_cast { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, which {}", msg, depending_on_usize), cast_suggestion, @@ -603,7 +715,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if can_cast { match (found.bit_width(), exp.bit_width()) { (Some(found), Some(exp)) if found > exp - 1 => { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, which {}", msg, will_truncate), cast_suggestion, @@ -611,7 +723,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ); } (None, None) => { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, which {}", msg, will_truncate), cast_suggestion, @@ -619,7 +731,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ); } (None, _) => { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, which {}", msg, depending_on_isize), cast_suggestion, @@ -627,7 +739,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ); } (_, None) => { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, which {}", msg, depending_on_usize), cast_suggestion, @@ -635,7 +747,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ); } _ => { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, which {}", msg, will_zero_extend), cast_suggestion, @@ -650,7 +762,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if can_cast { match (found.bit_width(), exp.bit_width()) { (Some(found), Some(exp)) if found - 1 > exp => { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, which {}", msg, will_truncate), cast_suggestion, @@ -658,7 +770,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ); } (None, None) => { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, which {}", msg, will_sign_extend), cast_suggestion, @@ -666,7 +778,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ); } (None, _) => { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, which {}", msg, depending_on_usize), cast_suggestion, @@ -674,7 +786,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ); } (_, None) => { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, which {}", msg, depending_on_isize), cast_suggestion, @@ -682,7 +794,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ); } _ => { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, which {}", msg, will_sign_extend), cast_suggestion, @@ -700,7 +812,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { None, ); } else if can_cast { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, producing the closest possible value", msg), cast_suggestion, @@ -711,7 +823,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } (&ty::Uint(_), &ty::Float(_)) | (&ty::Int(_), &ty::Float(_)) => { if can_cast { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, rounding the float towards zero", msg), cast_suggestion, @@ -726,7 +838,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { (&ty::Float(ref exp), &ty::Uint(ref found)) => { // if `found` is `None` (meaning found is `usize`), don't suggest `.into()` if exp.bit_width() > found.bit_width().unwrap_or(256) { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, producing the floating point representation of the \ integer", @@ -735,7 +847,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { Applicability::MachineApplicable ); } else if can_cast { - err.span_suggestion_with_applicability(expr.span, + err.span_suggestion( + expr.span, &format!("{}, producing the floating point representation of the \ integer, rounded if necessary", msg), @@ -748,7 +861,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { (&ty::Float(ref exp), &ty::Int(ref found)) => { // if `found` is `None` (meaning found is `isize`), don't suggest `.into()` if exp.bit_width() > found.bit_width().unwrap_or(256) { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, producing the floating point representation of the \ integer", @@ -757,7 +870,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { Applicability::MachineApplicable ); } else if can_cast { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, &format!("{}, producing the floating point representation of the \ integer, rounded if necessary", diff --git a/src/librustc_typeck/check/dropck.rs b/src/librustc_typeck/check/dropck.rs index f59bc2d0c2dca..2184555a07d34 100644 --- a/src/librustc_typeck/check/dropck.rs +++ b/src/librustc_typeck/check/dropck.rs @@ -1,28 +1,18 @@ -// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +use crate::check::regionck::RegionCtxt; -use check::regionck::RegionCtxt; - -use hir::def_id::DefId; +use crate::hir; +use crate::hir::def_id::DefId; use rustc::infer::outlives::env::OutlivesEnvironment; use rustc::infer::{self, InferOk, SuppressRegionErrors}; use rustc::middle::region; use rustc::traits::{ObligationCause, TraitEngine, TraitEngineExt}; -use rustc::ty::subst::{Subst, Substs, UnpackedKind}; +use rustc::ty::subst::{Subst, SubstsRef, UnpackedKind}; use rustc::ty::{self, Ty, TyCtxt}; -use util::common::ErrorReported; +use crate::util::common::ErrorReported; -use syntax::ast; use syntax_pos::Span; -/// check_drop_impl confirms that the Drop implementation identified by +/// This function confirms that the `Drop` implementation identified by /// `drop_impl_did` is not any more specialized than the type it is /// attached to (Issue #8142). /// @@ -31,7 +21,7 @@ use syntax_pos::Span; /// 1. The self type must be nominal (this is already checked during /// coherence), /// -/// 2. The generic region/type parameters of the impl's self-type must +/// 2. The generic region/type parameters of the impl's self type must /// all be parameters of the Drop impl itself (i.e., no /// specialization like `impl Drop for Foo`), and, /// @@ -80,7 +70,7 @@ fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>( drop_impl_ty: Ty<'tcx>, self_type_did: DefId, ) -> Result<(), ErrorReported> { - let drop_impl_node_id = tcx.hir().as_local_node_id(drop_impl_did).unwrap(); + let drop_impl_hir_id = tcx.hir().as_local_hir_id(drop_impl_did).unwrap(); // check that the impl type can be made to match the trait type. @@ -95,7 +85,7 @@ fn ensure_drop_params_and_item_params_correspond<'a, 'tcx>( let fresh_impl_substs = infcx.fresh_substs_for_item(drop_impl_span, drop_impl_did); let fresh_impl_self_ty = drop_impl_ty.subst(tcx, fresh_impl_substs); - let cause = &ObligationCause::misc(drop_impl_span, drop_impl_node_id); + let cause = &ObligationCause::misc(drop_impl_span, drop_impl_hir_id); match infcx .at(cause, impl_param_env) .eq(named_type, fresh_impl_self_ty) @@ -155,7 +145,7 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>( drop_impl_did: DefId, dtor_predicates: &ty::GenericPredicates<'tcx>, self_type_did: DefId, - self_to_impl_substs: &Substs<'tcx>, + self_to_impl_substs: SubstsRef<'tcx>, ) -> Result<(), ErrorReported> { let mut result = Ok(()); @@ -194,7 +184,7 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>( // absent. So we report an error that the Drop impl injected a // predicate that is not present on the struct definition. - let self_type_node_id = tcx.hir().as_local_node_id(self_type_did).unwrap(); + let self_type_hir_id = tcx.hir().as_local_hir_id(self_type_did).unwrap(); let drop_impl_span = tcx.def_span(drop_impl_did); @@ -226,7 +216,7 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>( // repeated `contains` calls. if !assumptions_in_impl_context.contains(&predicate) { - let item_span = tcx.hir().span(self_type_node_id); + let item_span = tcx.hir().span_by_hir_id(self_type_hir_id); struct_span_err!( tcx.sess, drop_impl_span, @@ -246,9 +236,9 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>( result } -/// check_safety_of_destructor_if_necessary confirms that the type +/// This function confirms that the type /// expression `typ` conforms to the "Drop Check Rule" from the Sound -/// Generic Drop (RFC 769). +/// Generic Drop RFC (#769). /// /// ---- /// @@ -286,7 +276,7 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>( /// expected to break the needed parametricity property beyond /// repair.) /// -/// Therefore we have scaled back Drop-Check to a more conservative +/// Therefore, we have scaled back Drop-Check to a more conservative /// rule that does not attempt to deduce whether a `Drop` /// implementation could not possible access data of a given lifetime; /// instead Drop-Check now simply assumes that if a destructor has @@ -297,12 +287,11 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'a, 'tcx>( /// this conservative assumption (and thus assume the obligation of /// ensuring that they do not access data nor invoke methods of /// values that have been previously dropped). -/// pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>( rcx: &mut RegionCtxt<'a, 'gcx, 'tcx>, ty: Ty<'tcx>, span: Span, - body_id: ast::NodeId, + body_id: hir::HirId, scope: region::Scope, ) -> Result<(), ErrorReported> { debug!("check_safety_of_destructor_if_necessary typ: {:?} scope: {:?}", @@ -324,6 +313,9 @@ pub fn check_safety_of_destructor_if_necessary<'a, 'gcx, 'tcx>( match kind.unpack() { UnpackedKind::Lifetime(r) => rcx.sub_regions(origin(), parent_scope, r), UnpackedKind::Type(ty) => rcx.type_must_outlive(origin(), ty, parent_scope), + UnpackedKind::Const(_) => { + // Generic consts don't add constraints. + } } } Ok(()) diff --git a/src/librustc_typeck/check/generator_interior.rs b/src/librustc_typeck/check/generator_interior.rs index 55fceda1a5151..7f4b0a96a15ab 100644 --- a/src/librustc_typeck/check/generator_interior.rs +++ b/src/librustc_typeck/check/generator_interior.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This calculates the types which has storage which lives across a suspension point in a //! generator from the perspective of typeck. The actual types used at runtime //! is calculated in `rustc_mir::transform::generator` and may be a subset of the @@ -21,7 +11,7 @@ use rustc::ty::{self, Ty}; use rustc_data_structures::sync::Lrc; use syntax_pos::Span; use super::FnCtxt; -use util::nodemap::FxHashMap; +use crate::util::nodemap::FxHashMap; struct InteriorVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, diff --git a/src/librustc_typeck/check/intrinsic.rs b/src/librustc_typeck/check/intrinsic.rs index a40e56d68ae8b..40c60caffa42d 100644 --- a/src/librustc_typeck/check/intrinsic.rs +++ b/src/librustc_typeck/check/intrinsic.rs @@ -1,27 +1,13 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Type-checking for the rust-intrinsic and platform-intrinsic //! intrinsics that the compiler exposes. -use intrinsics; use rustc::traits::{ObligationCause, ObligationCauseCode}; use rustc::ty::{self, TyCtxt, Ty}; use rustc::ty::subst::Subst; -use rustc::util::nodemap::FxHashMap; -use require_same_types; +use crate::require_same_types; use rustc_target::spec::abi::Abi; -use syntax::ast; use syntax::symbol::Symbol; -use syntax_pos::Span; use rustc::hir; @@ -36,7 +22,7 @@ fn equate_intrinsic_type<'a, 'tcx>( inputs: Vec>, output: Ty<'tcx>, ) { - let def_id = tcx.hir().local_def_id(it.id); + let def_id = tcx.hir().local_def_id_from_hir_id(it.hir_id); match it.node { hir::ForeignItemKind::Fn(..) => {} @@ -72,16 +58,30 @@ fn equate_intrinsic_type<'a, 'tcx>( safety, abi ))); - let cause = ObligationCause::new(it.span, it.id, ObligationCauseCode::IntrinsicType); + let cause = ObligationCause::new(it.span, it.hir_id, ObligationCauseCode::IntrinsicType); require_same_types(tcx, &cause, tcx.mk_fn_ptr(tcx.fn_sig(def_id)), fty); } +/// Returns `true` if the given intrinsic is unsafe to call or not. +pub fn intrisic_operation_unsafety(intrinsic: &str) -> hir::Unsafety { + match intrinsic { + "size_of" | "min_align_of" | "needs_drop" | + "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" | + "overflowing_add" | "overflowing_sub" | "overflowing_mul" | + "saturating_add" | "saturating_sub" | + "rotate_left" | "rotate_right" | + "ctpop" | "ctlz" | "cttz" | "bswap" | "bitreverse" + => hir::Unsafety::Normal, + _ => hir::Unsafety::Unsafe, + } +} + /// Remember to add all intrinsics here, in librustc_codegen_llvm/intrinsic.rs, /// and in libcore/intrinsics.rs pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, it: &hir::ForeignItem) { let param = |n| tcx.mk_ty_param(n, Symbol::intern(&format!("P{}", n)).as_interned_str()); - let name = it.name.as_str(); + let name = it.ident.as_str(); let mk_va_list_ty = || { tcx.lang_items().va_list().map(|did| { @@ -127,10 +127,7 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } else if &name[..] == "abort" || &name[..] == "unreachable" { (0, Vec::new(), tcx.types.never, hir::Unsafety::Unsafe) } else { - let unsafety = match &name[..] { - "size_of" | "min_align_of" | "needs_drop" => hir::Unsafety::Normal, - _ => hir::Unsafety::Unsafe, - }; + let unsafety = intrisic_operation_unsafety(&name[..]); let (n_tps, inputs, output) = match &name[..] { "breakpoint" => (0, Vec::new(), tcx.mk_unit()), "size_of" | @@ -143,6 +140,7 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ], tcx.types.usize) } "rustc_peek" => (1, vec![param(0)], param(0)), + "panic_if_uninhabited" => (1, Vec::new(), tcx.mk_unit()), "init" => (1, Vec::new(), param(0)), "uninit" => (1, Vec::new(), param(0)), "forget" => (1, vec![param(0)], tcx.mk_unit()), @@ -310,6 +308,8 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, "overflowing_add" | "overflowing_sub" | "overflowing_mul" => (1, vec![param(0), param(0)], param(0)), + "saturating_add" | "saturating_sub" => + (1, vec![param(0), param(0)], param(0)), "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => (1, vec![param(0), param(0)], param(0)), @@ -337,7 +337,7 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, "va_start" | "va_end" => { match mk_va_list_ty() { Some(va_list_ty) => (0, vec![va_list_ty], tcx.mk_unit()), - None => bug!("va_list lang_item must be defined to use va_list intrinsics") + None => bug!("`va_list` language item needed for C-variadic intrinsics") } } @@ -364,14 +364,14 @@ pub fn check_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }; (0, vec![tcx.mk_imm_ref(tcx.mk_region(env_region), va_list_ty)], ret_ty) } - None => bug!("va_list lang_item must be defined to use va_list intrinsics") + None => bug!("`va_list` language item needed for C-variadic intrinsics") } } "va_arg" => { match mk_va_list_ty() { Some(va_list_ty) => (1, vec![va_list_ty], param(0)), - None => bug!("va_list lang_item must be defined to use va_list intrinsics") + None => bug!("`va_list` language item needed for C-variadic intrinsics") } } @@ -401,9 +401,7 @@ pub fn check_platform_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, tcx.mk_ty_param(n, name) }; - let def_id = tcx.hir().local_def_id(it.id); - let i_n_tps = tcx.generics_of(def_id).own_counts().types; - let name = it.name.as_str(); + let name = it.ident.as_str(); let (n_tps, inputs, output) = match &*name { "simd_eq" | "simd_ne" | "simd_lt" | "simd_le" | "simd_gt" | "simd_ge" => { @@ -412,7 +410,8 @@ pub fn check_platform_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, "simd_add" | "simd_sub" | "simd_mul" | "simd_rem" | "simd_div" | "simd_shl" | "simd_shr" | "simd_and" | "simd_or" | "simd_xor" | - "simd_fmin" | "simd_fmax" | "simd_fpow" => { + "simd_fmin" | "simd_fmax" | "simd_fpow" | + "simd_saturating_add" | "simd_saturating_sub" => { (1, vec![param(0), param(0)], param(0)) } "simd_fsqrt" | "simd_fsin" | "simd_fcos" | "simd_fexp" | "simd_fexp2" | @@ -435,6 +434,7 @@ pub fn check_platform_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, "simd_insert" => (2, vec![param(0), tcx.types.u32, param(1)], param(0)), "simd_extract" => (2, vec![param(0), tcx.types.u32], param(1)), "simd_cast" => (2, vec![param(0)], param(1)), + "simd_bitmask" => (2, vec![param(0)], param(1)), "simd_select" | "simd_select_bitmask" => (2, vec![param(0), param(1), param(1)], param(1)), "simd_reduce_all" | "simd_reduce_any" => (1, vec![param(0)], tcx.types.bool), @@ -460,159 +460,12 @@ pub fn check_platform_intrinsic_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } _ => { - match intrinsics::Intrinsic::find(&name) { - Some(intr) => { - // this function is a platform specific intrinsic - if i_n_tps != 0 { - span_err!(tcx.sess, it.span, E0440, - "platform-specific intrinsic has wrong number of type \ - parameters: found {}, expected 0", - i_n_tps); - return - } - - let mut structural_to_nomimal = FxHashMap::default(); - - let sig = tcx.fn_sig(def_id); - let sig = sig.no_bound_vars().unwrap(); - if intr.inputs.len() != sig.inputs().len() { - span_err!(tcx.sess, it.span, E0444, - "platform-specific intrinsic has invalid number of \ - arguments: found {}, expected {}", - sig.inputs().len(), intr.inputs.len()); - return - } - let input_pairs = intr.inputs.iter().zip(sig.inputs()); - for (i, (expected_arg, arg)) in input_pairs.enumerate() { - match_intrinsic_type_to_type(tcx, &format!("argument {}", i + 1), it.span, - &mut structural_to_nomimal, expected_arg, arg); - } - match_intrinsic_type_to_type(tcx, "return value", it.span, - &mut structural_to_nomimal, - &intr.output, sig.output()); - return - } - None => { - span_err!(tcx.sess, it.span, E0441, - "unrecognized platform-specific intrinsic function: `{}`", name); - return; - } - } + let msg = format!("unrecognized platform-specific intrinsic function: `{}`", name); + tcx.sess.span_err(it.span, &msg); + return; } }; equate_intrinsic_type(tcx, it, n_tps, Abi::PlatformIntrinsic, hir::Unsafety::Unsafe, inputs, output) } - -// walk the expected type and the actual type in lock step, checking they're -// the same, in a kinda-structural way, i.e., `Vector`s have to be simd structs with -// exactly the right element type -fn match_intrinsic_type_to_type<'a, 'tcx>( - tcx: TyCtxt<'a, 'tcx, 'tcx>, - position: &str, - span: Span, - structural_to_nominal: &mut FxHashMap<&'a intrinsics::Type, Ty<'tcx>>, - expected: &'a intrinsics::Type, t: Ty<'tcx>) -{ - use intrinsics::Type::*; - - let simple_error = |real: &str, expected: &str| { - span_err!(tcx.sess, span, E0442, - "intrinsic {} has wrong type: found {}, expected {}", - position, real, expected) - }; - - match *expected { - Void => match t.sty { - ty::Tuple(ref v) if v.is_empty() => {}, - _ => simple_error(&format!("`{}`", t), "()"), - }, - // (The width we pass to LLVM doesn't concern the type checker.) - Integer(signed, bits, _llvm_width) => match (signed, bits, &t.sty) { - (true, 8, &ty::Int(ast::IntTy::I8)) | - (false, 8, &ty::Uint(ast::UintTy::U8)) | - (true, 16, &ty::Int(ast::IntTy::I16)) | - (false, 16, &ty::Uint(ast::UintTy::U16)) | - (true, 32, &ty::Int(ast::IntTy::I32)) | - (false, 32, &ty::Uint(ast::UintTy::U32)) | - (true, 64, &ty::Int(ast::IntTy::I64)) | - (false, 64, &ty::Uint(ast::UintTy::U64)) | - (true, 128, &ty::Int(ast::IntTy::I128)) | - (false, 128, &ty::Uint(ast::UintTy::U128)) => {}, - _ => simple_error(&format!("`{}`", t), - &format!("`{}{n}`", - if signed {"i"} else {"u"}, - n = bits)), - }, - Float(bits) => match (bits, &t.sty) { - (32, &ty::Float(ast::FloatTy::F32)) | - (64, &ty::Float(ast::FloatTy::F64)) => {}, - _ => simple_error(&format!("`{}`", t), - &format!("`f{n}`", n = bits)), - }, - Pointer(ref inner_expected, ref _llvm_type, const_) => { - match t.sty { - ty::RawPtr(ty::TypeAndMut { ty, mutbl }) => { - if (mutbl == hir::MutImmutable) != const_ { - simple_error(&format!("`{}`", t), - if const_ {"const pointer"} else {"mut pointer"}) - } - match_intrinsic_type_to_type(tcx, position, span, structural_to_nominal, - inner_expected, ty) - } - _ => simple_error(&format!("`{}`", t), "raw pointer"), - } - } - Vector(ref inner_expected, ref _llvm_type, len) => { - if !t.is_simd() { - simple_error(&format!("non-simd type `{}`", t), "simd type"); - return; - } - let t_len = t.simd_size(tcx); - if len as usize != t_len { - simple_error(&format!("vector with length {}", t_len), - &format!("length {}", len)); - return; - } - let t_ty = t.simd_type(tcx); - { - // check that a given structural type always has the same an intrinsic definition - let previous = structural_to_nominal.entry(expected).or_insert(t); - if *previous != t { - // this gets its own error code because it is non-trivial - span_err!(tcx.sess, span, E0443, - "intrinsic {} has wrong type: found `{}`, expected `{}` which \ - was used for this vector type previously in this signature", - position, - t, - *previous); - return; - } - } - match_intrinsic_type_to_type(tcx, - position, - span, - structural_to_nominal, - inner_expected, - t_ty) - } - Aggregate(_flatten, ref expected_contents) => { - match t.sty { - ty::Tuple(contents) => { - if contents.len() != expected_contents.len() { - simple_error(&format!("tuple with length {}", contents.len()), - &format!("tuple with length {}", expected_contents.len())); - return - } - for (e, c) in expected_contents.iter().zip(contents) { - match_intrinsic_type_to_type(tcx, position, span, structural_to_nominal, - e, c) - } - } - _ => simple_error(&format!("`{}`", t), - "tuple"), - } - } - } -} diff --git a/src/librustc_typeck/check/method/confirm.rs b/src/librustc_typeck/check/method/confirm.rs index 11fb3889a748d..e0b96ae884f3a 100644 --- a/src/librustc_typeck/check/method/confirm.rs +++ b/src/librustc_typeck/check/method/confirm.rs @@ -1,23 +1,12 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::{probe, MethodCallee}; -use astconv::AstConv; -use check::{FnCtxt, PlaceOp, callee, Needs}; -use hir::GenericArg; -use hir::def_id::DefId; -use rustc::ty::subst::Substs; +use crate::astconv::AstConv; +use crate::check::{FnCtxt, PlaceOp, callee, Needs}; +use crate::hir::GenericArg; +use crate::hir::def_id::DefId; +use rustc::ty::subst::{Subst, SubstsRef}; use rustc::traits; use rustc::ty::{self, Ty, GenericParamDefKind}; -use rustc::ty::subst::Subst; use rustc::ty::adjustment::{Adjustment, Adjust, OverloadedDeref}; use rustc::ty::adjustment::{AllowTwoPhase, AutoBorrow, AutoBorrowMutability}; use rustc::ty::fold::TypeFoldable; @@ -161,9 +150,9 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { let (_, n) = autoderef.nth(pick.autoderefs).unwrap(); assert_eq!(n, pick.autoderefs); - let mut adjustments = autoderef.adjust_steps(Needs::None); + let mut adjustments = autoderef.adjust_steps(self, Needs::None); - let mut target = autoderef.unambiguous_final_ty(); + let mut target = autoderef.unambiguous_final_ty(self); if let Some(mutbl) = pick.autoref { let region = self.next_region_var(infer::Autoref(self.span)); @@ -202,7 +191,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { assert!(pick.unsize.is_none()); } - autoderef.finalize(); + autoderef.finalize(self); // Write out the final adjustments. self.apply_adjustments(self.self_expr, adjustments); @@ -219,7 +208,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { fn fresh_receiver_substs(&mut self, self_ty: Ty<'tcx>, pick: &probe::Pick<'tcx>) - -> &'tcx Substs<'tcx> { + -> SubstsRef<'tcx> { match pick.kind { probe::InherentImplPick => { let impl_def_id = pick.item.container.id(); @@ -290,7 +279,11 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { .include_raw_pointers() .filter_map(|(ty, _)| match ty.sty { - ty::Dynamic(ref data, ..) => Some(closure(self, ty, data.principal())), + ty::Dynamic(ref data, ..) => { + Some(closure(self, ty, data.principal().unwrap_or_else(|| { + span_bug!(self.span, "calling trait method on empty object?") + }))) + }, _ => None, } ) @@ -306,8 +299,8 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { &mut self, pick: &probe::Pick<'tcx>, seg: &hir::PathSegment, - parent_substs: &Substs<'tcx>, - ) -> &'tcx Substs<'tcx> { + parent_substs: SubstsRef<'tcx>, + ) -> SubstsRef<'tcx> { // Determine the values for the generic parameters of the method. // If they were not explicitly supplied, just construct fresh // variables. @@ -348,6 +341,9 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => { self.to_ty(ty).into() } + (GenericParamDefKind::Const, GenericArg::Const(ct)) => { + self.to_const(&ct.value, self.tcx.type_of(param.def_id)).into() + } _ => unreachable!(), } }, @@ -375,7 +371,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { // until we unify the `Self` type. fn instantiate_method_sig(&mut self, pick: &probe::Pick<'tcx>, - all_substs: &'tcx Substs<'tcx>) + all_substs: SubstsRef<'tcx>) -> (ty::FnSig<'tcx>, ty::InstantiatedPredicates<'tcx>) { debug!("instantiate_method_sig(pick={:?}, all_substs={:?})", pick, @@ -410,7 +406,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { fn add_obligations(&mut self, fty: Ty<'tcx>, - all_substs: &Substs<'tcx>, + all_substs: SubstsRef<'tcx>, method_predicates: &ty::InstantiatedPredicates<'tcx>) { debug!("add_obligations: fty={:?} all_substs={:?} method_predicates={:?}", fty, @@ -604,7 +600,7 @@ impl<'a, 'gcx, 'tcx> ConfirmContext<'a, 'gcx, 'tcx> { }) } - fn enforce_illegal_method_limitations(&self, pick: &probe::Pick) { + fn enforce_illegal_method_limitations(&self, pick: &probe::Pick<'_>) { // Disallow calls to the method `drop` defined in the `Drop` trait. match pick.item.container { ty::TraitContainer(trait_def_id) => { diff --git a/src/librustc_typeck/check/method/mod.rs b/src/librustc_typeck/check/method/mod.rs index 858d8c742dfd9..34a24308e491a 100644 --- a/src/librustc_typeck/check/method/mod.rs +++ b/src/librustc_typeck/check/method/mod.rs @@ -1,14 +1,4 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Method lookup: the secret sauce of Rust. See the [rustc guide] chapter. +//! Method lookup: the secret sauce of Rust. See the [rustc guide] for more information. //! //! [rustc guide]: https://rust-lang.github.io/rustc-guide/method-lookup.html @@ -18,16 +8,17 @@ mod suggest; pub use self::MethodError::*; pub use self::CandidateSource::*; -pub use self::suggest::TraitInfo; +pub use self::suggest::{SelfSource, TraitInfo}; -use check::FnCtxt; -use namespace::Namespace; +use crate::check::FnCtxt; +use crate::namespace::Namespace; +use errors::{Applicability, DiagnosticBuilder}; use rustc_data_structures::sync::Lrc; use rustc::hir; -use rustc::hir::def::Def; +use rustc::hir::def::{CtorOf, Def}; use rustc::hir::def_id::DefId; use rustc::traits; -use rustc::ty::subst::Substs; +use rustc::ty::subst::{InternalSubsts, SubstsRef}; use rustc::ty::{self, Ty, ToPredicate, ToPolyTraitRef, TraitRef, TypeFoldable}; use rustc::ty::GenericParamDefKind; use rustc::ty::subst::Subst; @@ -35,17 +26,19 @@ use rustc::infer::{self, InferOk}; use syntax::ast; use syntax_pos::Span; +use crate::{check_type_alias_enum_variants_enabled}; use self::probe::{IsSuggestion, ProbeScope}; -pub fn provide(providers: &mut ty::query::Providers) { +pub fn provide(providers: &mut ty::query::Providers<'_>) { suggest::provide(providers); + probe::provide(providers); } #[derive(Clone, Copy, Debug)] pub struct MethodCallee<'tcx> { /// Impl method ID, for inherent methods, or trait method ID, otherwise. pub def_id: DefId, - pub substs: &'tcx Substs<'tcx>, + pub substs: SubstsRef<'tcx>, /// Instantiated method signature, i.e., it has been /// substituted, normalized, and has had late-bound @@ -112,7 +105,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn method_exists(&self, method_name: ast::Ident, self_ty: Ty<'tcx>, - call_expr_id: ast::NodeId, + call_expr_id: hir::HirId, allow_private: bool) -> bool { let mode = probe::Mode::MethodCall; @@ -131,6 +124,42 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } + /// Adds a suggestion to call the given method to the provided diagnostic. + crate fn suggest_method_call( + &self, + err: &mut DiagnosticBuilder<'a>, + msg: &str, + method_name: ast::Ident, + self_ty: Ty<'tcx>, + call_expr_id: hir::HirId, + ) { + let has_params = self + .probe_for_name( + method_name.span, + probe::Mode::MethodCall, + method_name, + IsSuggestion(false), + self_ty, + call_expr_id, + ProbeScope::TraitsInScope, + ) + .and_then(|pick| { + let sig = self.tcx.fn_sig(pick.item.def_id); + Ok(sig.inputs().skip_binder().len() > 1) + }); + + let (suggestion, applicability) = if has_params.unwrap_or_default() { + ( + format!("{}(...)", method_name), + Applicability::HasPlaceholders, + ) + } else { + (format!("{}()", method_name), Applicability::MaybeIncorrect) + }; + + err.span_suggestion(method_name.span, msg, suggestion, applicability); + } + /// Performs method lookup. If lookup is successful, it will return the callee /// and store an appropriate adjustment for the self-expr. In some cases it may /// report an error (e.g., invoking the `drop` method). @@ -167,13 +196,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { )?; if let Some(import_id) = pick.import_id { - let import_def_id = self.tcx.hir().local_def_id(import_id); + let import_def_id = self.tcx.hir().local_def_id_from_hir_id(import_id); debug!("used_trait_import: {:?}", import_def_id); Lrc::get_mut(&mut self.tables.borrow_mut().used_trait_imports) .unwrap().insert(import_def_id); } - self.tcx.check_stability(pick.item.def_id, Some(call_expr.id), span); + self.tcx.check_stability(pick.item.def_id, Some(call_expr.hir_id), span); let result = self.confirm_method( span, @@ -226,18 +255,18 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let mode = probe::Mode::MethodCall; let self_ty = self.resolve_type_vars_if_possible(&self_ty); self.probe_for_name(span, mode, method_name, IsSuggestion(false), - self_ty, call_expr.id, scope) + self_ty, call_expr.hir_id, scope) } /// `lookup_method_in_trait` is used for overloaded operators. /// It does a very narrow slice of what the normal probe/confirm path does. /// In particular, it doesn't really do any probing: it simply constructs - /// an obligation for a particular trait with the given self-type and checks + /// an obligation for a particular trait with the given self type and checks /// whether that trait is implemented. - /// - /// FIXME(#18741): it seems likely that we can consolidate some of this - /// code with the other method-lookup code. In particular, the second half - /// of this method is basically the same as confirmation. + // + // FIXME(#18741): it seems likely that we can consolidate some of this + // code with the other method-lookup code. In particular, the second half + // of this method is basically the same as confirmation. pub fn lookup_method_in_trait(&self, span: Span, m_name: ast::Ident, @@ -252,10 +281,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { trait_def_id); // Construct a trait-reference `self_ty : Trait` - let substs = Substs::for_item(self.tcx, trait_def_id, |param, _| { + let substs = InternalSubsts::for_item(self.tcx, trait_def_id, |param, _| { match param.kind { - GenericParamDefKind::Lifetime => {} - GenericParamDefKind::Type {..} => { + GenericParamDefKind::Lifetime | GenericParamDefKind::Const => {} + GenericParamDefKind::Type { .. } => { if param.index == 0 { return self_ty.into(); } else if let Some(ref input_types) = opt_input_types { @@ -365,30 +394,58 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }) } - pub fn resolve_ufcs(&self, - span: Span, - method_name: ast::Ident, - self_ty: Ty<'tcx>, - expr_id: ast::NodeId) - -> Result> { - let mode = probe::Mode::Path; - let pick = self.probe_for_name(span, mode, method_name, IsSuggestion(false), - self_ty, expr_id, ProbeScope::TraitsInScope)?; + pub fn resolve_ufcs( + &self, + span: Span, + method_name: ast::Ident, + self_ty: Ty<'tcx>, + expr_id: hir::HirId + ) -> Result> { + debug!( + "resolve_ufcs: method_name={:?} self_ty={:?} expr_id={:?}", + method_name, self_ty, expr_id, + ); + + let tcx = self.tcx; + // Check if we have an enum variant. + if let ty::Adt(adt_def, _) = self_ty.sty { + if adt_def.is_enum() { + let variant_def = adt_def.variants.iter().find(|vd| { + tcx.hygienic_eq(method_name, vd.ident, adt_def.did) + }); + if let Some(variant_def) = variant_def { + check_type_alias_enum_variants_enabled(tcx, span); + + // Braced variants generate unusable names in value namespace (reserved for + // possible future use), so variants resolved as associated items may refer to + // them as well. It's ok to use the variant's id as a ctor id since an + // error will be reported on any use of such resolution anyway. + let ctor_def_id = variant_def.ctor_def_id.unwrap_or(variant_def.def_id); + let def = Def::Ctor(ctor_def_id, CtorOf::Variant, variant_def.ctor_kind); + tcx.check_stability(def.def_id(), Some(expr_id), span); + return Ok(def); + } + } + } + + let pick = self.probe_for_name(span, probe::Mode::Path, method_name, IsSuggestion(false), + self_ty, expr_id, ProbeScope::TraitsInScope)?; + debug!("resolve_ufcs: pick={:?}", pick); if let Some(import_id) = pick.import_id { - let import_def_id = self.tcx.hir().local_def_id(import_id); - debug!("used_trait_import: {:?}", import_def_id); + let import_def_id = tcx.hir().local_def_id_from_hir_id(import_id); + debug!("resolve_ufcs: used_trait_import: {:?}", import_def_id); Lrc::get_mut(&mut self.tables.borrow_mut().used_trait_imports) - .unwrap().insert(import_def_id); + .unwrap().insert(import_def_id); } let def = pick.item.def(); - self.tcx.check_stability(def.def_id(), Some(expr_id), span); - + debug!("resolve_ufcs: def={:?}", def); + tcx.check_stability(def.def_id(), Some(expr_id), span); Ok(def) } - /// Find item with name `item_name` defined in impl/trait `def_id` + /// Finds item with name `item_name` defined in impl/trait `def_id` /// and return it, or `None`, if no such item was defined there. pub fn associated_item(&self, def_id: DefId, item_name: ast::Ident, ns: Namespace) -> Option { diff --git a/src/librustc_typeck/check/method/probe.rs b/src/librustc_typeck/check/method/probe.rs index dd3c022d53bb3..42156213f21df 100644 --- a/src/librustc_typeck/check/method/probe.rs +++ b/src/librustc_typeck/check/method/probe.rs @@ -1,40 +1,37 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use super::MethodError; use super::NoMatchData; use super::{CandidateSource, ImplSource, TraitSource}; use super::suggest; -use check::FnCtxt; -use hir::def_id::DefId; -use hir::def::Def; -use namespace::Namespace; +use crate::check::autoderef::{self, Autoderef}; +use crate::check::FnCtxt; +use crate::hir::def_id::DefId; +use crate::hir::def::Def; +use crate::namespace::Namespace; + +use rustc_data_structures::sync::Lrc; use rustc::hir; use rustc::lint; use rustc::session::config::nightly_options; -use rustc::ty::subst::{Subst, Substs}; +use rustc::ty::subst::{Subst, InternalSubsts, SubstsRef}; use rustc::traits::{self, ObligationCause}; -use rustc::ty::{self, Ty, ToPolyTraitRef, ToPredicate, TraitRef, TypeFoldable}; +use rustc::traits::query::{CanonicalTyGoal}; +use rustc::traits::query::method_autoderef::{CandidateStep, MethodAutoderefStepsResult}; +use rustc::traits::query::method_autoderef::{MethodAutoderefBadTy}; +use rustc::ty::{self, ParamEnvAnd, Ty, TyCtxt, ToPolyTraitRef, ToPredicate, TraitRef, TypeFoldable}; use rustc::ty::GenericParamDefKind; use rustc::infer::type_variable::TypeVariableOrigin; use rustc::util::nodemap::FxHashSet; use rustc::infer::{self, InferOk}; +use rustc::infer::canonical::{Canonical, QueryResponse}; +use rustc::infer::canonical::{OriginalQueryValues}; use rustc::middle::stability; use syntax::ast; use syntax::util::lev_distance::{lev_distance, find_best_match_for_name}; -use syntax_pos::{Span, symbol::Symbol}; +use syntax_pos::{DUMMY_SP, Span, symbol::Symbol}; use std::iter; use std::mem; use std::ops::Deref; -use std::rc::Rc; use std::cmp::max; use self::CandidateKind::*; @@ -51,7 +48,12 @@ struct ProbeContext<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { mode: Mode, method_name: Option, return_type: Option>, - steps: Rc>>, + + /// This is the OriginalQueryValues for the steps queries + /// that are answered in steps. + orig_steps_var_values: OriginalQueryValues<'tcx>, + steps: Lrc>>, + inherent_candidates: Vec>, extension_candidates: Vec>, impl_dups: FxHashSet, @@ -81,31 +83,49 @@ impl<'a, 'gcx, 'tcx> Deref for ProbeContext<'a, 'gcx, 'tcx> { } } -#[derive(Debug)] -struct CandidateStep<'tcx> { - self_ty: Ty<'tcx>, - autoderefs: usize, - // true if the type results from a dereference of a raw pointer. - // when assembling candidates, we include these steps, but not when - // picking methods. This so that if we have `foo: *const Foo` and `Foo` has methods - // `fn by_raw_ptr(self: *const Self)` and `fn by_ref(&self)`, then - // `foo.by_raw_ptr()` will work and `foo.by_ref()` won't. - from_unsafe_deref: bool, - unsize: bool, -} - #[derive(Debug)] struct Candidate<'tcx> { + // Candidates are (I'm not quite sure, but they are mostly) basically + // some metadata on top of a `ty::AssociatedItem` (without substs). + // + // However, method probing wants to be able to evaluate the predicates + // for a function with the substs applied - for example, if a function + // has `where Self: Sized`, we don't want to consider it unless `Self` + // is actually `Sized`, and similarly, return-type suggestions want + // to consider the "actual" return type. + // + // The way this is handled is through `xform_self_ty`. It contains + // the receiver type of this candidate, but `xform_self_ty`, + // `xform_ret_ty` and `kind` (which contains the predicates) have the + // generic parameters of this candidate substituted with the *same set* + // of inference variables, which acts as some weird sort of "query". + // + // When we check out a candidate, we require `xform_self_ty` to be + // a subtype of the passed-in self-type, and this equates the type + // variables in the rest of the fields. + // + // For example, if we have this candidate: + // ``` + // trait Foo { + // fn foo(&self) where Self: Sized; + // } + // ``` + // + // Then `xform_self_ty` will be `&'erased ?X` and `kind` will contain + // the predicate `?X: Sized`, so if we are evaluating `Foo` for a + // the receiver `&T`, we'll do the subtyping which will make `?X` + // get the right value, then when we evaluate the predicate we'll check + // if `T: Sized`. xform_self_ty: Ty<'tcx>, xform_ret_ty: Option>, item: ty::AssociatedItem, kind: CandidateKind<'tcx>, - import_id: Option, + import_id: Option, } #[derive(Debug)] enum CandidateKind<'tcx> { - InherentImplCandidate(&'tcx Substs<'tcx>, + InherentImplCandidate(SubstsRef<'tcx>, // Normalize obligations Vec>), ObjectCandidate, @@ -125,7 +145,7 @@ enum ProbeResult { pub struct Pick<'tcx> { pub item: ty::AssociatedItem, pub kind: PickKind<'tcx>, - pub import_id: Option, + pub import_id: Option, // Indicates that the source expression should be autoderef'd N times // @@ -189,7 +209,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { mode: Mode, return_type: Ty<'tcx>, self_ty: Ty<'tcx>, - scope_expr_id: ast::NodeId) + scope_expr_id: hir::HirId) -> Vec { debug!("probe(self_ty={:?}, return_type={}, scope_expr_id={})", self_ty, @@ -218,7 +238,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { item_name: ast::Ident, is_suggestion: IsSuggestion, self_ty: Ty<'tcx>, - scope_expr_id: ast::NodeId, + scope_expr_id: hir::HirId, scope: ProbeScope) -> PickResult<'tcx> { debug!("probe(self_ty={:?}, item_name={}, scope_expr_id={})", @@ -243,48 +263,117 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { return_type: Option>, is_suggestion: IsSuggestion, self_ty: Ty<'tcx>, - scope_expr_id: ast::NodeId, + scope_expr_id: hir::HirId, scope: ProbeScope, op: OP) -> Result> where OP: FnOnce(ProbeContext<'a, 'gcx, 'tcx>) -> Result> { - // FIXME(#18741) -- right now, creating the steps involves evaluating the - // `*` operator, which registers obligations that then escape into - // the global fulfillment context and thus has global - // side-effects. This is a bit of a pain to refactor. So just let - // it ride, although it's really not great, and in fact could I - // think cause spurious errors. Really though this part should - // take place in the `self.probe` below. + let mut orig_values = OriginalQueryValues::default(); + let param_env_and_self_ty = + self.infcx.canonicalize_query( + &ParamEnvAnd { + param_env: self.param_env, + value: self_ty + }, &mut orig_values); + let steps = if mode == Mode::MethodCall { - match self.create_steps(span, scope_expr_id, self_ty, is_suggestion) { - Some(steps) => steps, - None => { - return Err(MethodError::NoMatch(NoMatchData::new(Vec::new(), - Vec::new(), - Vec::new(), - None, - mode))) - } - } + self.tcx.method_autoderef_steps(param_env_and_self_ty) } else { - vec![CandidateStep { - self_ty, - autoderefs: 0, - from_unsafe_deref: false, - unsize: false, - }] + self.infcx.probe(|_| { + // Mode::Path - the deref steps is "trivial". This turns + // our CanonicalQuery into a "trivial" QueryResponse. This + // is a bit inefficient, but I don't think that writing + // special handling for this "trivial case" is a good idea. + + let infcx = &self.infcx; + let (ParamEnvAnd { + param_env: _, + value: self_ty + }, canonical_inference_vars) = + infcx.instantiate_canonical_with_fresh_inference_vars( + span, ¶m_env_and_self_ty); + debug!("probe_op: Mode::Path, param_env_and_self_ty={:?} self_ty={:?}", + param_env_and_self_ty, self_ty); + MethodAutoderefStepsResult { + steps: Lrc::new(vec![CandidateStep { + self_ty: self.make_query_response_ignoring_pending_obligations( + canonical_inference_vars, self_ty), + autoderefs: 0, + from_unsafe_deref: false, + unsize: false, + }]), + opt_bad_ty: None, + reached_recursion_limit: false + } + }) }; + // If our autoderef loop had reached the recursion limit, + // report an overflow error, but continue going on with + // the truncated autoderef list. + if steps.reached_recursion_limit { + self.probe(|_| { + let ty = &steps.steps.last().unwrap_or_else(|| { + span_bug!(span, "reached the recursion limit in 0 steps?") + }).self_ty; + let ty = self.probe_instantiate_query_response(span, &orig_values, ty) + .unwrap_or_else(|_| span_bug!(span, "instantiating {:?} failed?", ty)); + autoderef::report_autoderef_recursion_limit_error(self.tcx, span, + ty.value); + }); + } + + + // If we encountered an `_` type or an error type during autoderef, this is + // ambiguous. + if let Some(bad_ty) = &steps.opt_bad_ty { + if is_suggestion.0 { + // Ambiguity was encountered during a suggestion. Just keep going. + debug!("ProbeContext: encountered ambiguity in suggestion"); + } else if bad_ty.reached_raw_pointer && !self.tcx.features().arbitrary_self_types { + // this case used to be allowed by the compiler, + // so we do a future-compat lint here for the 2015 edition + // (see https://github.com/rust-lang/rust/issues/46906) + if self.tcx.sess.rust_2018() { + span_err!(self.tcx.sess, span, E0699, + "the type of this value must be known \ + to call a method on a raw pointer on it"); + } else { + self.tcx.lint_hir( + lint::builtin::TYVAR_BEHIND_RAW_POINTER, + scope_expr_id, + span, + "type annotations needed"); + } + } else { + // Encountered a real ambiguity, so abort the lookup. If `ty` is not + // an `Err`, report the right "type annotations needed" error pointing + // to it. + let ty = &bad_ty.ty; + let ty = self.probe_instantiate_query_response(span, &orig_values, ty) + .unwrap_or_else(|_| span_bug!(span, "instantiating {:?} failed?", ty)); + let ty = self.structurally_resolved_type(span, ty.value); + assert_eq!(ty, self.tcx.types.err); + return Err(MethodError::NoMatch(NoMatchData::new(Vec::new(), + Vec::new(), + Vec::new(), + None, + mode))); + } + } + debug!("ProbeContext: steps for self_ty={:?} are {:?}", self_ty, steps); + // this creates one big transaction so that all type variables etc // that we create during the probe process are removed later self.probe(|_| { let mut probe_cx = ProbeContext::new( - self, span, mode, method_name, return_type, Rc::new(steps), is_suggestion, + self, span, mode, method_name, return_type, orig_values, + steps.steps, is_suggestion, ); probe_cx.assemble_inherent_candidates(); @@ -297,21 +386,30 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { op(probe_cx) }) } +} + +pub fn provide(providers: &mut ty::query::Providers<'_>) { + providers.method_autoderef_steps = method_autoderef_steps; +} + +fn method_autoderef_steps<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, + goal: CanonicalTyGoal<'tcx>) + -> MethodAutoderefStepsResult<'gcx> +{ + debug!("method_autoderef_steps({:?})", goal); - fn create_steps(&self, - span: Span, - scope_expr_id: ast::NodeId, - self_ty: Ty<'tcx>, - is_suggestion: IsSuggestion) - -> Option>> { - // FIXME: we don't need to create the entire steps in one pass + tcx.infer_ctxt().enter_with_canonical(DUMMY_SP, &goal, |ref infcx, goal, inference_vars| { + let ParamEnvAnd { param_env, value: self_ty } = goal; - let mut autoderef = self.autoderef(span, self_ty).include_raw_pointers(); + let mut autoderef = Autoderef::new(infcx, param_env, hir::DUMMY_HIR_ID, DUMMY_SP, self_ty) + .include_raw_pointers() + .silence_errors(); let mut reached_raw_pointer = false; let mut steps: Vec<_> = autoderef.by_ref() .map(|(ty, d)| { let step = CandidateStep { - self_ty: ty, + self_ty: infcx.make_query_response_ignoring_pending_obligations( + inference_vars.clone(), ty), autoderefs: d, from_unsafe_deref: reached_raw_pointer, unsize: false, @@ -325,68 +423,52 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { .collect(); let final_ty = autoderef.maybe_ambiguous_final_ty(); - match final_ty.sty { - ty::Infer(ty::TyVar(_)) => { - // Ended in an inference variable. If we are doing - // a real method lookup, this is a hard error because it's - // possible that there will be multiple applicable methods. - if !is_suggestion.0 { - if reached_raw_pointer - && !self.tcx.features().arbitrary_self_types { - // this case used to be allowed by the compiler, - // so we do a future-compat lint here for the 2015 edition - // (see https://github.com/rust-lang/rust/issues/46906) - if self.tcx.sess.rust_2018() { - span_err!(self.tcx.sess, span, E0699, - "the type of this value must be known \ - to call a method on a raw pointer on it"); - } else { - self.tcx.lint_node( - lint::builtin::TYVAR_BEHIND_RAW_POINTER, - scope_expr_id, - span, - "type annotations needed"); - } - } else { - let t = self.structurally_resolved_type(span, final_ty); - assert_eq!(t, self.tcx.types.err); - return None - } - } else { - // If we're just looking for suggestions, - // though, ambiguity is no big thing, we can - // just ignore it. - } + let opt_bad_ty = match final_ty.sty { + ty::Infer(ty::TyVar(_)) | + ty::Error => { + Some(MethodAutoderefBadTy { + reached_raw_pointer, + ty: infcx.make_query_response_ignoring_pending_obligations( + inference_vars, final_ty) + }) } ty::Array(elem_ty, _) => { let dereferences = steps.len() - 1; steps.push(CandidateStep { - self_ty: self.tcx.mk_slice(elem_ty), + self_ty: infcx.make_query_response_ignoring_pending_obligations( + inference_vars, infcx.tcx.mk_slice(elem_ty)), autoderefs: dereferences, // this could be from an unsafe deref if we had // a *mut/const [T; N] from_unsafe_deref: reached_raw_pointer, unsize: true, }); + + None } - ty::Error => return None, - _ => (), - } + _ => None + }; - debug!("create_steps: steps={:?}", steps); + debug!("method_autoderef_steps: steps={:?} opt_bad_ty={:?}", steps, opt_bad_ty); - Some(steps) - } + MethodAutoderefStepsResult { + steps: Lrc::new(steps), + opt_bad_ty: opt_bad_ty.map(Lrc::new), + reached_recursion_limit: autoderef.reached_recursion_limit() + } + }) } + impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { fn new(fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, span: Span, mode: Mode, method_name: Option, return_type: Option>, - steps: Rc>>, + orig_steps_var_values: OriginalQueryValues<'tcx>, + steps: Lrc>>, is_suggestion: IsSuggestion) -> ProbeContext<'a, 'gcx, 'tcx> { ProbeContext { @@ -398,7 +480,8 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { inherent_candidates: Vec::new(), extension_candidates: Vec::new(), impl_dups: FxHashSet::default(), - steps: steps, + orig_steps_var_values, + steps, static_candidates: Vec::new(), allow_similar_names: false, private_candidate: None, @@ -443,19 +526,41 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { fn assemble_inherent_candidates(&mut self) { let steps = self.steps.clone(); for step in steps.iter() { - self.assemble_probe(step.self_ty); + self.assemble_probe(&step.self_ty); } } - fn assemble_probe(&mut self, self_ty: Ty<'tcx>) { + fn assemble_probe(&mut self, self_ty: &Canonical<'gcx, QueryResponse<'gcx, Ty<'gcx>>>) { debug!("assemble_probe: self_ty={:?}", self_ty); let lang_items = self.tcx.lang_items(); - match self_ty.sty { + match self_ty.value.value.sty { ty::Dynamic(ref data, ..) => { - let p = data.principal(); - self.assemble_inherent_candidates_from_object(self_ty, p); - self.assemble_inherent_impl_candidates_for_type(p.def_id()); + if let Some(p) = data.principal() { + // Subtle: we can't use `instantiate_query_response` here: using it will + // commit to all of the type equalities assumed by inference going through + // autoderef (see the `method-probe-no-guessing` test). + // + // However, in this code, it is OK if we end up with an object type that is + // "more general" than the object type that we are evaluating. For *every* + // object type `MY_OBJECT`, a function call that goes through a trait-ref + // of the form `::func` is a valid + // `ObjectCandidate`, and it should be discoverable "exactly" through one + // of the iterations in the autoderef loop, so there is no problem with it + // being discoverable in another one of these iterations. + // + // Using `instantiate_canonical_with_fresh_inference_vars` on our + // `Canonical>>` and then *throwing away* the + // `CanonicalVarValues` will exactly give us such a generalization - it + // will still match the original object type, but it won't pollute our + // type variables in any form, so just do that! + let (QueryResponse { value: generalized_self_ty, .. }, _ignored_var_values) = + self.fcx.instantiate_canonical_with_fresh_inference_vars( + self.span, &self_ty); + + self.assemble_inherent_candidates_from_object(generalized_self_ty); + self.assemble_inherent_impl_candidates_for_type(p.def_id()); + } } ty::Adt(def, _) => { self.assemble_inherent_impl_candidates_for_type(def.did); @@ -464,7 +569,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { self.assemble_inherent_impl_candidates_for_type(did); } ty::Param(p) => { - self.assemble_inherent_candidates_from_param(self_ty, p); + self.assemble_inherent_candidates_from_param(p); } ty::Char => { let lang_def_id = lang_items.char_impl(); @@ -615,11 +720,18 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { } fn assemble_inherent_candidates_from_object(&mut self, - self_ty: Ty<'tcx>, - principal: ty::PolyExistentialTraitRef<'tcx>) { + self_ty: Ty<'tcx>) { debug!("assemble_inherent_candidates_from_object(self_ty={:?})", self_ty); + let principal = match self_ty.sty { + ty::Dynamic(ref data, ..) => Some(data), + _ => None + }.and_then(|data| data.principal()).unwrap_or_else(|| { + span_bug!(self.span, "non-object {:?} in assemble_inherent_candidates_from_object", + self_ty) + }); + // It is illegal to invoke a method on a trait instance that // refers to the `Self` type. An error will be reported by // `enforce_object_limitations()` if the method refers to the @@ -642,7 +754,6 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { } fn assemble_inherent_candidates_from_param(&mut self, - _rcvr_ty: Ty<'tcx>, param_ty: ty::ParamTy) { // FIXME -- Do we want to commit to this behavior for param bounds? @@ -714,19 +825,19 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { } fn assemble_extension_candidates_for_traits_in_scope(&mut self, - expr_id: ast::NodeId) + expr_hir_id: hir::HirId) -> Result<(), MethodError<'tcx>> { - if expr_id == ast::DUMMY_NODE_ID { + if expr_hir_id == hir::DUMMY_HIR_ID { return Ok(()) } let mut duplicates = FxHashSet::default(); - let expr_hir_id = self.tcx.hir().node_to_hir_id(expr_id); let opt_applicable_traits = self.tcx.in_scope_traits(expr_hir_id); if let Some(applicable_traits) = opt_applicable_traits { for trait_candidate in applicable_traits.iter() { let trait_did = trait_candidate.def_id; if duplicates.insert(trait_did) { - let import_id = trait_candidate.import_id; + let import_id = trait_candidate.import_id.map(|node_id| + self.fcx.tcx.hir().node_to_hir_id(node_id)); let result = self.assemble_extension_candidates_for_trait(import_id, trait_did); result?; } @@ -777,7 +888,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { } fn assemble_extension_candidates_for_trait(&mut self, - import_id: Option, + import_id: Option, trait_def_id: DefId) -> Result<(), MethodError<'tcx>> { debug!("assemble_extension_candidates_for_trait(trait_def_id={:?})", @@ -785,20 +896,36 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { let trait_substs = self.fresh_item_substs(trait_def_id); let trait_ref = ty::TraitRef::new(trait_def_id, trait_substs); - for item in self.impl_or_trait_item(trait_def_id) { - // Check whether `trait_def_id` defines a method with suitable name: - if !self.has_applicable_self(&item) { - debug!("method has inapplicable self"); - self.record_static_candidate(TraitSource(trait_def_id)); - continue; - } + if self.tcx.is_trait_alias(trait_def_id) { + // For trait aliases, assume all super-traits are relevant. + let bounds = iter::once(trait_ref.to_poly_trait_ref()); + self.elaborate_bounds(bounds, |this, new_trait_ref, item| { + let new_trait_ref = this.erase_late_bound_regions(&new_trait_ref); + + let (xform_self_ty, xform_ret_ty) = + this.xform_self_ty(&item, new_trait_ref.self_ty(), new_trait_ref.substs); + this.push_candidate(Candidate { + xform_self_ty, xform_ret_ty, item, import_id, + kind: TraitCandidate(new_trait_ref), + }, true); + }); + } else { + debug_assert!(self.tcx.is_trait(trait_def_id)); + for item in self.impl_or_trait_item(trait_def_id) { + // Check whether `trait_def_id` defines a method with suitable name. + if !self.has_applicable_self(&item) { + debug!("method has inapplicable self"); + self.record_static_candidate(TraitSource(trait_def_id)); + continue; + } - let (xform_self_ty, xform_ret_ty) = - self.xform_self_ty(&item, trait_ref.self_ty(), trait_substs); - self.push_candidate(Candidate { - xform_self_ty, xform_ret_ty, item, import_id, - kind: TraitCandidate(trait_ref), - }, false); + let (xform_self_ty, xform_ret_ty) = + self.xform_self_ty(&item, trait_ref.self_ty(), trait_substs); + self.push_candidate(Candidate { + xform_self_ty, xform_ret_ty, item, import_id, + kind: TraitCandidate(trait_ref), + }, false); + } } Ok(()) } @@ -819,7 +946,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { .filter(|&name| set.insert(name)) .collect(); - // sort them by the name so we have a stable result + // Sort them by the name so we have a stable result. names.sort_by_cached_key(|n| n.as_str()); names } @@ -834,6 +961,8 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { return r; } + debug!("pick: actual search failed, assemble diagnotics"); + let static_candidates = mem::replace(&mut self.static_candidates, vec![]); let private_candidate = self.private_candidate.take(); let unsatisfied_predicates = mem::replace(&mut self.unsatisfied_predicates, vec![]); @@ -898,14 +1027,22 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { // a raw pointer !step.self_ty.references_error() && !step.from_unsafe_deref }).flat_map(|step| { - self.pick_by_value_method(step).or_else(|| { - self.pick_autorefd_method(step, hir::MutImmutable).or_else(|| { - self.pick_autorefd_method(step, hir::MutMutable) + let InferOk { value: self_ty, obligations: _ } = + self.fcx.probe_instantiate_query_response( + self.span, &self.orig_steps_var_values, &step.self_ty + ).unwrap_or_else(|_| { + span_bug!(self.span, "{:?} was applicable but now isn't?", step.self_ty) + }); + self.pick_by_value_method(step, self_ty).or_else(|| { + self.pick_autorefd_method(step, self_ty, hir::MutImmutable).or_else(|| { + self.pick_autorefd_method(step, self_ty, hir::MutMutable) })})}) .next() } - fn pick_by_value_method(&mut self, step: &CandidateStep<'tcx>) -> Option> { + fn pick_by_value_method(&mut self, step: &CandidateStep<'gcx>, self_ty: Ty<'tcx>) + -> Option> + { //! For each type `T` in the step list, this attempts to find a //! method where the (transformed) self type is exactly `T`. We //! do however do one transformation on the adjustment: if we @@ -918,12 +1055,12 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { return None; } - self.pick_method(step.self_ty).map(|r| { + self.pick_method(self_ty).map(|r| { r.map(|mut pick| { pick.autoderefs = step.autoderefs; // Insert a `&*` or `&mut *` if this is a reference type: - if let ty::Ref(_, _, mutbl) = step.self_ty.sty { + if let ty::Ref(_, _, mutbl) = step.self_ty.value.value.sty { pick.autoderefs += 1; pick.autoref = Some(mutbl); } @@ -933,7 +1070,10 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { }) } - fn pick_autorefd_method(&mut self, step: &CandidateStep<'tcx>, mutbl: hir::Mutability) + fn pick_autorefd_method(&mut self, + step: &CandidateStep<'gcx>, + self_ty: Ty<'tcx>, + mutbl: hir::Mutability) -> Option> { let tcx = self.tcx; @@ -943,14 +1083,14 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { let autoref_ty = tcx.mk_ref(region, ty::TypeAndMut { - ty: step.self_ty, mutbl + ty: self_ty, mutbl }); self.pick_method(autoref_ty).map(|r| { r.map(|mut pick| { pick.autoderefs = step.autoderefs; pick.autoref = Some(mutbl); pick.unsize = if step.unsize { - Some(step.self_ty) + Some(self_ty) } else { None }; @@ -1058,23 +1198,23 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { fn emit_unstable_name_collision_hint( &self, - stable_pick: &Pick, + stable_pick: &Pick<'_>, unstable_candidates: &[(&Candidate<'tcx>, Symbol)], ) { - let mut diag = self.tcx.struct_span_lint_node( + let mut diag = self.tcx.struct_span_lint_hir( lint::builtin::UNSTABLE_NAME_COLLISIONS, self.fcx.body_id, self.span, "a method with this name may be added to the standard library in the future", ); - // FIXME: This should be a `span_suggestion_with_applicability` instead of `help` + // FIXME: This should be a `span_suggestion` instead of `help` // However `self.span` only // highlights the method name, so we can't use it. Also consider reusing the code from // `report_method_error()`. diag.help(&format!( "call with fully qualified syntax `{}(...)` to keep using the current method", - self.tcx.item_path_str(stable_pick.item.def_id), + self.tcx.def_path_str(stable_pick.item.def_id), )); if nightly_options::is_nightly_build() { @@ -1082,7 +1222,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { diag.help(&format!( "add #![feature({})] to the crate attributes to enable `{}`", feature, - self.tcx.item_path_str(candidate.item.def_id), + self.tcx.def_path_str(candidate.item.def_id), )); } } @@ -1288,10 +1428,12 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { let steps = self.steps.clone(); self.probe(|_| { let mut pcx = ProbeContext::new(self.fcx, self.span, self.mode, self.method_name, - self.return_type, steps, IsSuggestion(true)); + self.return_type, + self.orig_steps_var_values.clone(), + steps, IsSuggestion(true)); pcx.allow_similar_names = true; pcx.assemble_inherent_candidates(); - pcx.assemble_extension_candidates_for_traits_in_scope(ast::DUMMY_NODE_ID)?; + pcx.assemble_extension_candidates_for_traits_in_scope(hir::DUMMY_HIR_ID)?; let method_names = pcx.candidate_method_names(); pcx.allow_similar_names = false; @@ -1301,7 +1443,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { pcx.reset(); pcx.method_name = Some(method_name); pcx.assemble_inherent_candidates(); - pcx.assemble_extension_candidates_for_traits_in_scope(ast::DUMMY_NODE_ID) + pcx.assemble_extension_candidates_for_traits_in_scope(hir::DUMMY_HIR_ID) .ok().map_or(None, |_| { pcx.pick_core() .and_then(|pick| pick.ok()) @@ -1357,7 +1499,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { fn xform_self_ty(&self, item: &ty::AssociatedItem, impl_ty: Ty<'tcx>, - substs: &Substs<'tcx>) + substs: SubstsRef<'tcx>) -> (Ty<'tcx>, Option>) { if item.kind == ty::AssociatedKind::Method && self.mode == Mode::MethodCall { let sig = self.xform_method_sig(item.def_id, substs); @@ -1369,7 +1511,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { fn xform_method_sig(&self, method: DefId, - substs: &Substs<'tcx>) + substs: SubstsRef<'tcx>) -> ty::FnSig<'tcx> { let fn_sig = self.tcx.fn_sig(method); @@ -1394,7 +1536,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { if generics.params.is_empty() { xform_fn_sig.subst(self.tcx, substs) } else { - let substs = Substs::for_item(self.tcx, method, |param, _| { + let substs = InternalSubsts::for_item(self.tcx, method, |param, _| { let i = param.index as usize; if i < substs.len() { substs[i] @@ -1405,7 +1547,10 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { // `impl_self_ty()` for an explanation. self.tcx.types.re_erased.into() } - GenericParamDefKind::Type {..} => self.var_for_def(self.span, param), + GenericParamDefKind::Type { .. } + | GenericParamDefKind::Const => { + self.var_for_def(self.span, param) + } } } }); @@ -1413,24 +1558,27 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { } } - /// Get the type of an impl and generate substitutions with placeholders. - fn impl_ty_and_substs(&self, impl_def_id: DefId) -> (Ty<'tcx>, &'tcx Substs<'tcx>) { + /// Gets the type of an impl and generate substitutions with placeholders. + fn impl_ty_and_substs(&self, impl_def_id: DefId) -> (Ty<'tcx>, SubstsRef<'tcx>) { (self.tcx.type_of(impl_def_id), self.fresh_item_substs(impl_def_id)) } - fn fresh_item_substs(&self, def_id: DefId) -> &'tcx Substs<'tcx> { - Substs::for_item(self.tcx, def_id, |param, _| { + fn fresh_item_substs(&self, def_id: DefId) -> SubstsRef<'tcx> { + InternalSubsts::for_item(self.tcx, def_id, |param, _| { match param.kind { GenericParamDefKind::Lifetime => self.tcx.types.re_erased.into(), - GenericParamDefKind::Type {..} => { + GenericParamDefKind::Type { .. } => { self.next_ty_var(TypeVariableOrigin::SubstitutionPlaceholder( self.tcx.def_span(def_id))).into() } + GenericParamDefKind::Const { .. } => { + unimplemented!() // FIXME(const_generics) + } } }) } - /// Replace late-bound-regions bound by `value` with `'static` using + /// Replaces late-bound-regions bound by `value` with `'static` using /// `ty::erase_late_bound_regions`. /// /// This is only a reasonable thing to do during the *probe* phase, not the *confirm* phase, of @@ -1454,7 +1602,7 @@ impl<'a, 'gcx, 'tcx> ProbeContext<'a, 'gcx, 'tcx> { self.tcx.erase_late_bound_regions(value) } - /// Find the method with the appropriate name (or return type, as the case may be). If + /// Finds the method with the appropriate name (or return type, as the case may be). If /// `allow_similar_names` is set, find methods with close-matching names. fn impl_or_trait_item(&self, def_id: DefId) -> Vec { if let Some(name) = self.method_name { diff --git a/src/librustc_typeck/check/method/suggest.rs b/src/librustc_typeck/check/method/suggest.rs index 09063579c4214..31b7724d63816 100644 --- a/src/librustc_typeck/check/method/suggest.rs +++ b/src/librustc_typeck/check/method/suggest.rs @@ -1,34 +1,24 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Give useful errors and suggestions to users when an item can't be //! found or is otherwise invalid. -use check::FnCtxt; +use crate::check::FnCtxt; +use crate::middle::lang_items::FnOnceTraitLangItem; +use crate::namespace::Namespace; +use crate::util::nodemap::FxHashSet; use errors::{Applicability, DiagnosticBuilder}; -use middle::lang_items::FnOnceTraitLangItem; -use namespace::Namespace; use rustc_data_structures::sync::Lrc; -use rustc::hir::{self, Node}; +use rustc::hir::{self, ExprKind, Node, QPath}; use rustc::hir::def::Def; use rustc::hir::def_id::{CRATE_DEF_INDEX, LOCAL_CRATE, DefId}; use rustc::hir::map as hir_map; use rustc::hir::print; use rustc::infer::type_variable::TypeVariableOrigin; use rustc::traits::Obligation; -use rustc::ty::{self, Adt, Ty, TyCtxt, ToPolyTraitRef, ToPredicate, TypeFoldable}; -use rustc::ty::item_path::with_crate_prefix; -use util::nodemap::FxHashSet; +use rustc::ty::{self, Ty, TyCtxt, ToPolyTraitRef, ToPredicate, TypeFoldable}; +use rustc::ty::print::with_crate_prefix; use syntax_pos::{Span, FileName}; use syntax::ast; -use syntax::util::lev_distance::find_best_match_for_name; +use syntax::util::lev_distance; use std::cmp::Ordering; @@ -70,19 +60,27 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - pub fn report_method_error(&self, - span: Span, - rcvr_ty: Ty<'tcx>, - item_name: ast::Ident, - rcvr_expr: Option<&hir::Expr>, - error: MethodError<'tcx>, - args: Option<&'gcx [hir::Expr]>) { + pub fn report_method_error<'b>( + &self, + span: Span, + rcvr_ty: Ty<'tcx>, + item_name: ast::Ident, + source: SelfSource<'b>, + error: MethodError<'tcx>, + args: Option<&'gcx [hir::Expr]>, + ) { + let orig_span = span; + let mut span = span; // Avoid suggestions when we don't know what's going on. if rcvr_ty.references_error() { return; } - let report_candidates = |err: &mut DiagnosticBuilder, mut sources: Vec| { + let report_candidates = | + span: Span, + err: &mut DiagnosticBuilder<'_>, + mut sources: Vec, + | { sources.sort(); sources.dedup(); // Dynamic limit to avoid hiding just one candidate, which is silly. @@ -111,7 +109,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { None => String::new(), Some(trait_ref) => { format!(" of the trait `{}`", - self.tcx.item_path_str(trait_ref.def_id)) + self.tcx.def_path_str(trait_ref.def_id)) } }; @@ -144,16 +142,16 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { item_span, "candidate #{} is defined in the trait `{}`", idx + 1, - self.tcx.item_path_str(trait_did)); + self.tcx.def_path_str(trait_did)); } else { span_note!(err, item_span, "the candidate is defined in the trait `{}`", - self.tcx.item_path_str(trait_did)); + self.tcx.def_path_str(trait_did)); } err.help(&format!("to disambiguate the method call, write `{}::{}({}{})` \ instead", - self.tcx.item_path_str(trait_did), + self.tcx.def_path_str(trait_did), item_name, if rcvr_ty.is_region_ptr() && args.is_some() { if rcvr_ty.is_mutable_pointer() { @@ -190,17 +188,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let actual = self.resolve_type_vars_if_possible(&rcvr_ty); let ty_str = self.ty_to_string(actual); let is_method = mode == Mode::MethodCall; - let mut suggestion = None; let item_kind = if is_method { "method" } else if actual.is_enum() { - if let Adt(ref adt_def, _) = actual.sty { - let names = adt_def.variants.iter().map(|s| &s.name); - suggestion = find_best_match_for_name(names, - &item_name.as_str(), - None); - } - "variant" + "variant or associated item" } else { match (item_name.as_str().chars().next(), actual.is_fresh_ty()) { (Some(name), false) if name.is_lowercase() => { @@ -222,10 +213,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { .filter_map(|info| self.associated_item(info.def_id, item_name, Namespace::Value) ); - if let (true, false, Some(expr), Some(_)) = (actual.is_numeric(), - actual.has_concrete_skeleton(), - rcvr_expr, - candidates.next()) { + if let (true, false, SelfSource::MethodCall(expr), Some(_)) = + (actual.is_numeric(), + actual.has_concrete_skeleton(), + source, + candidates.next()) { let mut err = struct_span_err!( tcx.sess, span, @@ -241,25 +233,22 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { "f32" }; match expr.node { - hir::ExprKind::Lit(ref lit) => { + ExprKind::Lit(ref lit) => { // numeric literal let snippet = tcx.sess.source_map().span_to_snippet(lit.span) .unwrap_or_else(|_| "".to_owned()); - err.span_suggestion_with_applicability( - lit.span, - &format!("you must specify a concrete type for \ - this numeric value, like `{}`", - concrete_type), - format!("{}_{}", - snippet, - concrete_type), - Applicability::MaybeIncorrect, + err.span_suggestion( + lit.span, + &format!("you must specify a concrete type for \ + this numeric value, like `{}`", concrete_type), + format!("{}_{}", snippet, concrete_type), + Applicability::MaybeIncorrect, ); } - hir::ExprKind::Path(ref qpath) => { + ExprKind::Path(ref qpath) => { // local binding - if let &hir::QPath::Resolved(_, ref path) = &qpath { + if let &QPath::Resolved(_, ref path) = &qpath { if let hir::def::Def::Local(node_id) = path.def { let span = tcx.hir().span(node_id); let snippet = tcx.sess.source_map().span_to_snippet(span) @@ -280,7 +269,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ty, .. })) => { - err.span_suggestion_with_applicability( + err.span_suggestion( // account for `let x: _ = 42;` // ^^^^ span.to(ty.as_ref().map(|ty| ty.span) @@ -302,7 +291,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { err.emit(); return; } else { - let mut err = struct_span_err!( + span = item_name.span; + struct_span_err!( tcx.sess, span, E0599, @@ -310,11 +300,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { item_kind, item_name, ty_str - ); - if let Some(suggestion) = suggestion { - err.note(&format!("did you mean `{}::{}`?", ty_str, suggestion)); - } - err + ) } } else { tcx.sess.diagnostic().struct_dummy() @@ -336,47 +322,66 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // If the method name is the name of a field with a function or closure type, // give a helping note that it has to be called as `(x.f)(...)`. - if let Some(expr) = rcvr_expr { - for (ty, _) in self.autoderef(span, rcvr_ty) { - if let ty::Adt(def, substs) = ty.sty { - if !def.is_enum() { + if let SelfSource::MethodCall(expr) = source { + let field_receiver = self + .autoderef(span, rcvr_ty) + .find_map(|(ty, _)| match ty.sty { + ty::Adt(def, substs) if !def.is_enum() => { let variant = &def.non_enum_variant(); - if let Some(index) = self.tcx.find_field_index(item_name, variant) { + self.tcx.find_field_index(item_name, variant).map(|index| { let field = &variant.fields[index]; - let snippet = tcx.sess.source_map().span_to_snippet(expr.span); - let expr_string = match snippet { - Ok(expr_string) => expr_string, - _ => "s".into(), // Default to a generic placeholder for the - // expression when we can't generate a - // string snippet. - }; - let field_ty = field.ty(tcx, substs); - let scope = self.tcx.hir().get_module_parent(self.body_id); - if field.vis.is_accessible_from(scope, self.tcx) { - if self.is_fn_ty(&field_ty, span) { - err.help(&format!("use `({0}.{1})(...)` if you \ - meant to call the function \ - stored in the `{1}` field", - expr_string, - item_name)); - } else { - err.help(&format!("did you mean to write `{0}.{1}` \ - instead of `{0}.{1}(...)`?", - expr_string, - item_name)); - } - err.span_label(span, "field, not a method"); - } else { - err.span_label(span, "private field, not a method"); - } - break; - } + (field, field_ty) + }) + } + _ => None, + }); + + if let Some((field, field_ty)) = field_receiver { + let scope = self.tcx.hir().get_module_parent_by_hir_id(self.body_id); + let is_accessible = field.vis.is_accessible_from(scope, self.tcx); + + if is_accessible { + if self.is_fn_ty(&field_ty, span) { + let expr_span = expr.span.to(item_name.span); + err.multipart_suggestion( + &format!( + "to call the function stored in `{}`, \ + surround the field access with parentheses", + item_name, + ), + vec![ + (expr_span.shrink_to_lo(), '('.to_string()), + (expr_span.shrink_to_hi(), ')'.to_string()), + ], + Applicability::MachineApplicable, + ); + } else { + let call_expr = self.tcx.hir().expect_expr_by_hir_id( + self.tcx.hir().get_parent_node_by_hir_id(expr.hir_id), + ); + + let span = call_expr.span.trim_start(item_name.span).unwrap(); + + err.span_suggestion( + span, + "remove the arguments", + String::new(), + Applicability::MaybeIncorrect, + ); } } + + let field_kind = if is_accessible { + "field" + } else { + "private field" + }; + err.span_label(item_name.span, format!("{}, not a method", field_kind)); } } else { err.span_label(span, format!("{} not found in `{}`", item_kind, ty_str)); + self.tcx.sess.trait_methods_not_found.borrow_mut().insert(orig_span); } if self.is_fn_ty(&rcvr_ty, span) { @@ -387,10 +392,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - if let Some(expr) = rcvr_expr { + if let SelfSource::MethodCall(expr) = source { if let Ok(expr_string) = tcx.sess.source_map().span_to_snippet(expr.span) { report_function!(expr.span, expr_string); - } else if let hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) = + } else if let ExprKind::Path(QPath::Resolved(_, ref path)) = expr.node { if let Some(segment) = path.segments.last() { @@ -406,8 +411,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { err.span_label(span, "this is an associated function, not a method"); } if static_sources.len() == 1 { - if let Some(expr) = rcvr_expr { - err.span_suggestion_with_applicability(expr.span.to(span), + if let SelfSource::MethodCall(expr) = source { + err.span_suggestion(expr.span.to(span), "use associated function syntax instead", format!("{}::{}", self.ty_to_string(actual), @@ -418,9 +423,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.ty_to_string(actual), item_name)); } - report_candidates(&mut err, static_sources); + report_candidates(span, &mut err, static_sources); } else if static_sources.len() > 1 { - report_candidates(&mut err, static_sources); + report_candidates(span, &mut err, static_sources); } if !unsatisfied_predicates.is_empty() { @@ -443,13 +448,40 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { span, rcvr_ty, item_name, - rcvr_expr, + source, out_of_scope_traits); } + if actual.is_enum() { + let adt_def = actual.ty_adt_def().expect("enum is not an ADT"); + if let Some(suggestion) = lev_distance::find_best_match_for_name( + adt_def.variants.iter().map(|s| &s.ident.name), + &item_name.as_str(), + None, + ) { + err.span_suggestion( + span, + "there is a variant with a similar name", + suggestion.to_string(), + Applicability::MaybeIncorrect, + ); + } + } + if let Some(lev_candidate) = lev_candidate { - err.help(&format!("did you mean `{}`?", lev_candidate.ident)); + let def = lev_candidate.def(); + err.span_suggestion( + span, + &format!( + "there is {} {} with a similar name", + def.article(), + def.kind_name(), + ), + lev_candidate.ident.to_string(), + Applicability::MaybeIncorrect, + ); } + err.emit(); } @@ -460,7 +492,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { "multiple applicable items in scope"); err.span_label(span, format!("multiple `{}` found", item_name)); - report_candidates(&mut err, sources); + report_candidates(span, &mut err, sources); err.emit(); } @@ -497,11 +529,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } fn suggest_use_candidates(&self, - err: &mut DiagnosticBuilder, + err: &mut DiagnosticBuilder<'_>, mut msg: String, candidates: Vec) { - let module_did = self.tcx.hir().get_module_parent(self.body_id); - let module_id = self.tcx.hir().as_local_node_id(module_did).unwrap(); + let module_did = self.tcx.hir().get_module_parent_by_hir_id(self.body_id); + let module_id = self.tcx.hir().as_local_hir_id(module_did).unwrap(); let krate = self.tcx.hir().krate(); let (span, found_use) = UsePlacementFinder::check(self.tcx, krate, module_id); if let Some(span) = span { @@ -515,17 +547,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }; format!( "use {};\n{}", - with_crate_prefix(|| self.tcx.item_path_str(*did)), + with_crate_prefix(|| self.tcx.def_path_str(*did)), additional_newline ) }); - err.span_suggestions_with_applicability( - span, - &msg, - path_strings, - Applicability::MaybeIncorrect, - ); + err.span_suggestions(span, &msg, path_strings, Applicability::MaybeIncorrect); } else { let limit = if candidates.len() == 5 { 5 } else { 4 }; for (i, trait_did) in candidates.iter().take(limit).enumerate() { @@ -534,14 +561,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { &format!( "\ncandidate #{}: `use {};`", i + 1, - with_crate_prefix(|| self.tcx.item_path_str(*trait_did)) + with_crate_prefix(|| self.tcx.def_path_str(*trait_did)) ) ); } else { msg.push_str( &format!( "\n`use {};`", - with_crate_prefix(|| self.tcx.item_path_str(*trait_did)) + with_crate_prefix(|| self.tcx.def_path_str(*trait_did)) ) ); } @@ -554,7 +581,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } fn suggest_valid_traits(&self, - err: &mut DiagnosticBuilder, + err: &mut DiagnosticBuilder<'_>, valid_out_of_scope_traits: Vec) -> bool { if !valid_out_of_scope_traits.is_empty() { let mut candidates = valid_out_of_scope_traits; @@ -581,18 +608,18 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - fn suggest_traits_to_import(&self, - err: &mut DiagnosticBuilder, - span: Span, - rcvr_ty: Ty<'tcx>, - item_name: ast::Ident, - rcvr_expr: Option<&hir::Expr>, - valid_out_of_scope_traits: Vec) { + fn suggest_traits_to_import<'b>(&self, + err: &mut DiagnosticBuilder<'_>, + span: Span, + rcvr_ty: Ty<'tcx>, + item_name: ast::Ident, + source: SelfSource<'b>, + valid_out_of_scope_traits: Vec) { if self.suggest_valid_traits(err, valid_out_of_scope_traits) { return; } - let type_is_local = self.type_derefs_to_local(span, rcvr_ty, rcvr_expr); + let type_is_local = self.type_derefs_to_local(span, rcvr_ty, source); // There are no traits implemented, so lets suggest some traits to // implement, by finding ones that have the item name, and are @@ -642,7 +669,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { for (i, trait_info) in candidates.iter().enumerate() { msg.push_str(&format!("\ncandidate #{}: `{}`", i + 1, - self.tcx.item_path_str(trait_info.def_id))); + self.tcx.def_path_str(trait_info.def_id))); } err.note(&msg[..]); } @@ -653,13 +680,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn type_derefs_to_local(&self, span: Span, rcvr_ty: Ty<'tcx>, - rcvr_expr: Option<&hir::Expr>) -> bool { - fn is_local(ty: Ty) -> bool { + source: SelfSource<'_>) -> bool { + fn is_local(ty: Ty<'_>) -> bool { match ty.sty { ty::Adt(def, _) => def.did.is_local(), ty::Foreign(did) => did.is_local(), - ty::Dynamic(ref tr, ..) => tr.principal().def_id().is_local(), + ty::Dynamic(ref tr, ..) => + tr.principal().map(|d| d.def_id().is_local()).unwrap_or(false), ty::Param(_) => true, @@ -673,7 +701,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // This occurs for UFCS desugaring of `T::method`, where there is no // receiver expression for the method call, and thus no autoderef. - if rcvr_expr.is_none() { + if let SelfSource::QPath(_) = source { return is_local(self.resolve_type_vars_with_obligations(rcvr_ty)); } @@ -681,6 +709,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } +#[derive(Copy, Clone)] +pub enum SelfSource<'a> { + QPath(&'a hir::Ty), + MethodCall(&'a hir::Expr /* rcvr */), +} + #[derive(Copy, Clone)] pub struct TraitInfo { pub def_id: DefId, @@ -708,12 +742,12 @@ impl Ord for TraitInfo { } } -/// Retrieve all traits in this crate and any dependent crates. +/// Retrieves all traits in this crate and any dependent crates. pub fn all_traits<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Vec { tcx.all_traits(LOCAL_CRATE).iter().map(|&def_id| TraitInfo { def_id }).collect() } -/// Compute all traits in this crate and any dependent crates. +/// Computes all traits in this crate and any dependent crates. fn compute_all_traits<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Vec { use hir::itemlikevisit; @@ -728,9 +762,13 @@ fn compute_all_traits<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Vec impl<'v, 'a, 'tcx> itemlikevisit::ItemLikeVisitor<'v> for Visitor<'a, 'tcx> { fn visit_item(&mut self, i: &'v hir::Item) { - if let hir::ItemKind::Trait(..) = i.node { - let def_id = self.map.local_def_id(i.id); - self.traits.push(def_id); + match i.node { + hir::ItemKind::Trait(..) | + hir::ItemKind::TraitAlias(..) => { + let def_id = self.map.local_def_id_from_hir_id(i.hir_id); + self.traits.push(def_id); + } + _ => () } } @@ -747,16 +785,16 @@ fn compute_all_traits<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Vec // Cross-crate: let mut external_mods = FxHashSet::default(); - fn handle_external_def(tcx: TyCtxt, + fn handle_external_def(tcx: TyCtxt<'_, '_, '_>, traits: &mut Vec, external_mods: &mut FxHashSet, def: Def) { - let def_id = def.def_id(); match def { - Def::Trait(..) => { + Def::Trait(def_id) | + Def::TraitAlias(def_id) => { traits.push(def_id); } - Def::Mod(..) => { + Def::Mod(def_id) => { if !external_mods.insert(def_id) { return; } @@ -778,7 +816,7 @@ fn compute_all_traits<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Vec traits } -pub fn provide(providers: &mut ty::query::Providers) { +pub fn provide(providers: &mut ty::query::Providers<'_>) { providers.all_traits = |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); Lrc::new(compute_all_traits(tcx)) @@ -786,7 +824,7 @@ pub fn provide(providers: &mut ty::query::Providers) { } struct UsePlacementFinder<'a, 'tcx: 'a, 'gcx: 'tcx> { - target_module: ast::NodeId, + target_module: hir::HirId, span: Option, found_use: bool, tcx: TyCtxt<'a, 'gcx, 'tcx> @@ -796,7 +834,7 @@ impl<'a, 'tcx, 'gcx> UsePlacementFinder<'a, 'tcx, 'gcx> { fn check( tcx: TyCtxt<'a, 'gcx, 'tcx>, krate: &'tcx hir::Crate, - target_module: ast::NodeId, + target_module: hir::HirId, ) -> (Option, bool) { let mut finder = UsePlacementFinder { target_module, @@ -814,18 +852,18 @@ impl<'a, 'tcx, 'gcx> hir::intravisit::Visitor<'tcx> for UsePlacementFinder<'a, ' &mut self, module: &'tcx hir::Mod, _: Span, - node_id: ast::NodeId, + hir_id: hir::HirId, ) { if self.span.is_some() { return; } - if node_id != self.target_module { - hir::intravisit::walk_mod(self, module, node_id); + if hir_id != self.target_module { + hir::intravisit::walk_mod(self, module, hir_id); return; } // Find a `use` statement. for item_id in &module.item_ids { - let item = self.tcx.hir().expect_item(item_id.id); + let item = self.tcx.hir().expect_item_by_hir_id(item_id.id); match item.node { hir::ItemKind::Use(..) => { // Don't suggest placing a `use` before the prelude diff --git a/src/librustc_typeck/check/mod.rs b/src/librustc_typeck/check/mod.rs index 957c8d9f19f0e..313ed19b945d1 100644 --- a/src/librustc_typeck/check/mod.rs +++ b/src/librustc_typeck/check/mod.rs @@ -1,16 +1,6 @@ -// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /*! -# check.rs +# typeck: check phase Within the check phase of type check, we check each item one at a time (bodies of function expressions are checked as part of the containing @@ -90,34 +80,36 @@ mod closure; mod callee; mod compare_method; mod generator_interior; -mod intrinsic; +pub mod intrinsic; mod op; -use astconv::AstConv; +use crate::astconv::{AstConv, PathSeg}; use errors::{Applicability, DiagnosticBuilder, DiagnosticId}; -use rustc::hir::{self, GenericArg, ItemKind, Node, PatKind}; -use rustc::hir::def::Def; +use rustc::hir::{self, ExprKind, GenericArg, ItemKind, Node, PatKind, QPath}; +use rustc::hir::def::{CtorOf, CtorKind, Def}; use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; use rustc::hir::itemlikevisit::ItemLikeVisitor; -use middle::lang_items; -use namespace::Namespace; +use crate::middle::lang_items; +use crate::namespace::Namespace; +use rustc::infer::{self, InferCtxt, InferOk, InferResult}; +use rustc::infer::canonical::{Canonical, OriginalQueryValues, QueryResponse}; use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::sync::Lrc; use rustc_target::spec::abi::Abi; -use rustc::infer::{self, InferCtxt, InferOk, RegionVariableOrigin}; use rustc::infer::opaque_types::OpaqueTypeDecl; use rustc::infer::type_variable::{TypeVariableOrigin}; use rustc::middle::region; use rustc::mir::interpret::{ConstValue, GlobalId}; -use rustc::ty::subst::{CanonicalUserSubsts, UnpackedKind, Subst, Substs, - UserSelfTy, UserSubsts}; use rustc::traits::{self, ObligationCause, ObligationCauseCode, TraitEngine}; -use rustc::ty::{self, AdtKind, Ty, TyCtxt, GenericParamDefKind, Visibility, ToPredicate, - RegionKind}; +use rustc::ty::{ + self, AdtKind, CanonicalUserType, Ty, TyCtxt, GenericParamDefKind, Visibility, + ToPolyTraitRef, ToPredicate, RegionKind, UserType +}; use rustc::ty::adjustment::{Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability}; use rustc::ty::fold::TypeFoldable; use rustc::ty::query::Providers; +use rustc::ty::subst::{UnpackedKind, Subst, InternalSubsts, SubstsRef, UserSelfTy, UserSubsts}; use rustc::ty::util::{Representability, IntTypeExt, Discr}; use rustc::ty::layout::VariantIdx; use syntax_pos::{self, BytePos, Span, MultiSpan}; @@ -138,19 +130,21 @@ use std::mem::replace; use std::ops::{self, Deref}; use std::slice; -use require_c_abi_if_variadic; -use session::{CompileIncomplete, config, Session}; -use TypeAndSubsts; -use lint; -use util::common::{ErrorReported, indenter}; -use util::nodemap::{DefIdMap, DefIdSet, FxHashMap, FxHashSet, NodeMap}; +use crate::require_c_abi_if_c_variadic; +use crate::session::Session; +use crate::session::config::EntryFnType; +use crate::TypeAndSubsts; +use crate::lint; +use crate::util::captures::Captures; +use crate::util::common::{ErrorReported, indenter}; +use crate::util::nodemap::{DefIdMap, DefIdSet, FxHashMap, FxHashSet, HirIdMap}; pub use self::Expectation::*; use self::autoderef::Autoderef; use self::callee::DeferredCallResolution; use self::coercion::{CoerceMany, DynamicCoerceMany}; pub use self::compare_method::{compare_impl_method, compare_const_impl}; -use self::method::MethodCallee; +use self::method::{MethodCallee, SelfSource}; use self::TupleArgumentsFlag::*; /// The type of a local binding, including the revealed type for anon types. @@ -160,7 +154,7 @@ pub struct LocalTy<'tcx> { revealed_ty: Ty<'tcx> } -/// A wrapper for InferCtxt's `in_progress_tables` field. +/// A wrapper for `InferCtxt`'s `in_progress_tables` field. #[derive(Copy, Clone)] struct MaybeInProgressTables<'a, 'tcx: 'a> { maybe_tables: Option<&'a RefCell>>, @@ -186,7 +180,7 @@ impl<'a, 'tcx> MaybeInProgressTables<'a, 'tcx> { } } -/// closures defined within the function. For example: +/// Closures defined within the function. For example: /// /// fn foo() { /// bar(move|| { ... }) @@ -200,7 +194,7 @@ pub struct Inherited<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { tables: MaybeInProgressTables<'a, 'tcx>, - locals: RefCell>>, + locals: RefCell>>, fulfillment_cx: RefCell>>, @@ -252,13 +246,10 @@ pub enum Expectation<'tcx> { /// We know nothing about what type this expression should have. NoExpectation, - /// This expression is an `if` condition, it must resolve to `bool`. - ExpectIfCondition, - - /// This expression should have the type given (or some subtype) + /// This expression should have the type given (or some subtype). ExpectHasType(Ty<'tcx>), - /// This expression will be cast to the `Ty` + /// This expression will be cast to the `Ty`. ExpectCastableToType(Ty<'tcx>), /// This rvalue expression will be wrapped in `&` or `Box` and coerced @@ -300,7 +291,7 @@ impl<'a, 'gcx, 'tcx> Expectation<'tcx> { } } - /// Provide an expectation for an rvalue expression given an *optional* + /// Provides an expectation for an rvalue expression given an *optional* /// hint, which is not required for type safety (the resulting type might /// be checked higher up, as is the case with `&expr` and `box expr`), but /// is useful in determining the concrete type. @@ -334,7 +325,6 @@ impl<'a, 'gcx, 'tcx> Expectation<'tcx> { fn resolve(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Expectation<'tcx> { match self { NoExpectation => NoExpectation, - ExpectIfCondition => ExpectIfCondition, ExpectCastableToType(t) => { ExpectCastableToType(fcx.resolve_type_vars_if_possible(&t)) } @@ -350,7 +340,6 @@ impl<'a, 'gcx, 'tcx> Expectation<'tcx> { fn to_option(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Option> { match self.resolve(fcx) { NoExpectation => None, - ExpectIfCondition => Some(fcx.tcx.types.bool), ExpectCastableToType(ty) | ExpectHasType(ty) | ExpectRvalueLikeUnsized(ty) => Some(ty), @@ -364,7 +353,6 @@ impl<'a, 'gcx, 'tcx> Expectation<'tcx> { fn only_has_type(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Option> { match self.resolve(fcx) { ExpectHasType(ty) => Some(ty), - ExpectIfCondition => Some(fcx.tcx.types.bool), NoExpectation | ExpectCastableToType(_) | ExpectRvalueLikeUnsized(_) => None, } } @@ -394,14 +382,14 @@ impl Needs { #[derive(Copy, Clone)] pub struct UnsafetyState { - pub def: ast::NodeId, + pub def: hir::HirId, pub unsafety: hir::Unsafety, pub unsafe_push_count: u32, from_fn: bool } impl UnsafetyState { - pub fn function(unsafety: hir::Unsafety, def: ast::NodeId) -> UnsafetyState { + pub fn function(unsafety: hir::Unsafety, def: hir::HirId) -> UnsafetyState { UnsafetyState { def: def, unsafety: unsafety, unsafe_push_count: 0, from_fn: true } } @@ -416,11 +404,11 @@ impl UnsafetyState { unsafety => { let (unsafety, def, count) = match blk.rules { hir::PushUnsafeBlock(..) => - (unsafety, blk.id, self.unsafe_push_count.checked_add(1).unwrap()), + (unsafety, blk.hir_id, self.unsafe_push_count.checked_add(1).unwrap()), hir::PopUnsafeBlock(..) => - (unsafety, blk.id, self.unsafe_push_count.checked_sub(1).unwrap()), + (unsafety, blk.hir_id, self.unsafe_push_count.checked_sub(1).unwrap()), hir::UnsafeBlock(..) => - (hir::Unsafety::Unsafe, blk.id, self.unsafe_push_count), + (hir::Unsafety::Unsafe, blk.hir_id, self.unsafe_push_count), hir::DefaultBlock => (unsafety, self.def, self.unsafe_push_count), }; @@ -455,7 +443,7 @@ pub enum Diverges { Always, /// Same as `Always` but with a reachability - /// warning already emitted + /// warning already emitted. WarnedAlways } @@ -503,11 +491,11 @@ pub struct BreakableCtxt<'gcx: 'tcx, 'tcx> { pub struct EnclosingBreakables<'gcx: 'tcx, 'tcx> { stack: Vec>, - by_id: NodeMap, + by_id: HirIdMap, } impl<'gcx, 'tcx> EnclosingBreakables<'gcx, 'tcx> { - fn find_breakable(&mut self, target_id: ast::NodeId) -> &mut BreakableCtxt<'gcx, 'tcx> { + fn find_breakable(&mut self, target_id: hir::HirId) -> &mut BreakableCtxt<'gcx, 'tcx> { let ix = *self.by_id.get(&target_id).unwrap_or_else(|| { bug!("could not find enclosing breakable with id {}", target_id); }); @@ -515,11 +503,8 @@ impl<'gcx, 'tcx> EnclosingBreakables<'gcx, 'tcx> { } } -#[derive(Debug)] -struct PathSeg(DefId, usize); - pub struct FnCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { - body_id: ast::NodeId, + body_id: hir::HirId, /// The parameter environment used for proving trait obligations /// in this function. This can change when we descend into @@ -536,22 +521,23 @@ pub struct FnCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { err_count_on_creation: usize, ret_coercion: Option>>, + ret_coercion_span: RefCell>, yield_ty: Option>, ps: RefCell, /// Whether the last checked node generates a divergence (e.g., - /// `return` will set this to Always). In general, when entering + /// `return` will set this to `Always`). In general, when entering /// an expression or other node in the tree, the initial value /// indicates whether prior parts of the containing expression may /// have diverged. It is then typically set to `Maybe` (and the /// old value remembered) for processing the subparts of the /// current expression. As each subpart is processed, they may set - /// the flag to `Always` etc. Finally, at the end, we take the + /// the flag to `Always`, etc. Finally, at the end, we take the /// result and "union" it with the original value, so that when we /// return the flag indicates if any subpart of the parent - /// expression (up to and including this part) has diverged. So, + /// expression (up to and including this part) has diverged. So, /// if you read it after evaluating a subexpression `X`, the value /// you get indicates whether any subexpression that was /// evaluating up to and including `X` diverged. @@ -570,7 +556,7 @@ pub struct FnCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { /// foo();}` or `{return; 22}`, where we would warn on the /// `foo()` or `22`. /// - /// An expression represents dead-code if, after checking it, + /// An expression represents dead code if, after checking it, /// the diverges flag is set to something other than `Maybe`. diverges: Cell, @@ -589,9 +575,9 @@ impl<'a, 'gcx, 'tcx> Deref for FnCtxt<'a, 'gcx, 'tcx> { } } -/// Helper type of a temporary returned by Inherited::build(...). +/// Helper type of a temporary returned by `Inherited::build(...)`. /// Necessary because we can't write the following bound: -/// F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(Inherited<'b, 'gcx, 'tcx>). +/// `F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(Inherited<'b, 'gcx, 'tcx>)`. pub struct InheritedBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { infcx: infer::InferCtxtBuilder<'a, 'gcx, 'tcx>, def_id: DefId, @@ -601,8 +587,7 @@ impl<'a, 'gcx, 'tcx> Inherited<'a, 'gcx, 'tcx> { pub fn build(tcx: TyCtxt<'a, 'gcx, 'gcx>, def_id: DefId) -> InheritedBuilder<'a, 'gcx, 'tcx> { let hir_id_root = if def_id.is_local() { - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); - let hir_id = tcx.hir().definitions().node_to_hir_id(node_id); + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); DefId::local(hir_id.owner) } else { def_id @@ -627,8 +612,8 @@ impl<'a, 'gcx, 'tcx> InheritedBuilder<'a, 'gcx, 'tcx> { impl<'a, 'gcx, 'tcx> Inherited<'a, 'gcx, 'tcx> { fn new(infcx: InferCtxt<'a, 'gcx, 'tcx>, def_id: DefId) -> Self { let tcx = infcx.tcx; - let item_id = tcx.hir().as_local_node_id(def_id); - let body_id = item_id.and_then(|id| tcx.hir().maybe_body_owned_by(id)); + let item_id = tcx.hir().as_local_hir_id(def_id); + let body_id = item_id.and_then(|id| tcx.hir().maybe_body_owned_by_by_hir_id(id)); let implicit_region_bound = body_id.map(|body_id| { let body = tcx.hir().body(body_id); tcx.mk_region(ty::ReScope(region::Scope { @@ -680,7 +665,7 @@ impl<'a, 'gcx, 'tcx> Inherited<'a, 'gcx, 'tcx> { fn normalize_associated_types_in(&self, span: Span, - body_id: ast::NodeId, + body_id: hir::HirId, param_env: ty::ParamEnv<'tcx>, value: &T) -> T where T : TypeFoldable<'tcx> @@ -703,29 +688,19 @@ impl<'a, 'tcx> ItemLikeVisitor<'tcx> for CheckItemTypesVisitor<'a, 'tcx> { pub fn check_wf_new<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Result<(), ErrorReported> { tcx.sess.track_errors(|| { let mut visit = wfcheck::CheckTypeWellFormedVisitor::new(tcx); - tcx.hir().krate().visit_all_item_likes(&mut visit.as_deep_visitor()); - }) -} - -pub fn check_item_types<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Result<(), ErrorReported> { - tcx.sess.track_errors(|| { - tcx.hir().krate().visit_all_item_likes(&mut CheckItemTypesVisitor { tcx }); + tcx.hir().krate().par_visit_all_item_likes(&mut visit); }) } -pub fn check_item_bodies<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Result<(), CompileIncomplete> { - tcx.typeck_item_bodies(LOCAL_CRATE) +fn check_mod_item_types<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>, module_def_id: DefId) { + tcx.hir().visit_item_likes_in_module(module_def_id, &mut CheckItemTypesVisitor { tcx }); } -fn typeck_item_bodies<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) - -> Result<(), CompileIncomplete> -{ +fn typeck_item_bodies<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) { debug_assert!(crate_num == LOCAL_CRATE); - Ok(tcx.sess.track_errors(|| { - tcx.par_body_owners(|body_owner_def_id| { - ty::query::queries::typeck_tables_of::ensure(tcx, body_owner_def_id); - }); - })?) + tcx.par_body_owners(|body_owner_def_id| { + tcx.ensure().typeck_tables_of(body_owner_def_id); + }); } fn check_item_well_formed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) { @@ -740,7 +715,7 @@ fn check_impl_item_well_formed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: De wfcheck::check_impl_item(tcx, def_id); } -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { method::provide(providers); *providers = Providers { typeck_item_bodies, @@ -751,6 +726,7 @@ pub fn provide(providers: &mut Providers) { check_item_well_formed, check_trait_item_well_formed, check_impl_item_well_formed, + check_mod_item_types, ..*providers }; } @@ -761,20 +737,20 @@ fn adt_destructor<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, tcx.calculate_dtor(def_id, &mut dropck::check_drop_impl) } -/// If this def-id is a "primary tables entry", returns `Some((body_id, decl))` +/// If this `DefId` is a "primary tables entry", returns `Some((body_id, decl))` /// with information about it's body-id and fn-decl (if any). Otherwise, /// returns `None`. /// /// If this function returns "some", then `typeck_tables(def_id)` will /// succeed; if it returns `None`, then `typeck_tables(def_id)` may or -/// may not succeed. In some cases where this function returns `None` +/// may not succeed. In some cases where this function returns `None` /// (notably closures), `typeck_tables(def_id)` would wind up /// redirecting to the owning function. fn primary_body_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: ast::NodeId) + id: hir::HirId) -> Option<(hir::BodyId, Option<&'tcx hir::FnDecl>)> { - match tcx.hir().get(id) { + match tcx.hir().get_by_hir_id(id) { Node::Item(item) => { match item.node { hir::ItemKind::Const(_, body) | @@ -821,7 +797,7 @@ fn has_typeck_tables<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, return tcx.has_typeck_tables(outer_def_id); } - let id = tcx.hir().as_local_node_id(def_id).unwrap(); + let id = tcx.hir().as_local_hir_id(def_id).unwrap(); primary_body_of(tcx, id).is_some() } @@ -841,8 +817,8 @@ fn typeck_tables_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, return tcx.typeck_tables_of(outer_def_id); } - let id = tcx.hir().as_local_node_id(def_id).unwrap(); - let span = tcx.hir().span(id); + let id = tcx.hir().as_local_hir_id(def_id).unwrap(); + let span = tcx.hir().span_by_hir_id(id); // Figure out what primary body this item has. let (body_id, fn_decl) = primary_body_of(tcx, id).unwrap_or_else(|| { @@ -862,14 +838,14 @@ fn typeck_tables_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, tcx.liberate_late_bound_regions(def_id, &fn_sig); let fn_sig = inh.normalize_associated_types_in(body.value.span, - body_id.node_id, + body_id.hir_id, param_env, &fn_sig); let fcx = check_fn(&inh, param_env, fn_sig, decl, id, body, None).0; fcx } else { - let fcx = FnCtxt::new(&inh, param_env, body.value.id); + let fcx = FnCtxt::new(&inh, param_env, body.value.hir_id); let expected_type = tcx.type_of(def_id); let expected_type = fcx.normalize_associated_types_in(body.value.span, &expected_type); fcx.require_type_is_sized(expected_type, body.value.span, traits::ConstSized); @@ -926,8 +902,8 @@ fn typeck_tables_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Consistency check our TypeckTables instance can hold all ItemLocalIds // it will need to hold. - assert_eq!(tables.local_id_root, - Some(DefId::local(tcx.hir().definitions().node_to_hir_id(id).owner))); + assert_eq!(tables.local_id_root, Some(DefId::local(id.owner))); + tables } @@ -940,11 +916,11 @@ fn check_abi<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, span: Span, abi: Abi) { struct GatherLocalsVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, - parent_id: ast::NodeId, + parent_id: hir::HirId, } impl<'a, 'gcx, 'tcx> GatherLocalsVisitor<'a, 'gcx, 'tcx> { - fn assign(&mut self, span: Span, nid: ast::NodeId, ty_opt: Option>) -> Ty<'tcx> { + fn assign(&mut self, span: Span, nid: hir::HirId, ty_opt: Option>) -> Ty<'tcx> { match ty_opt { None => { // infer the variable's type @@ -984,38 +960,41 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for GatherLocalsVisitor<'a, 'gcx, 'tcx> { o_ty }; - let c_ty = self.fcx.inh.infcx.canonicalize_user_type_annotation(&revealed_ty); + let c_ty = self.fcx.inh.infcx.canonicalize_user_type_annotation( + &UserType::Ty(revealed_ty) + ); debug!("visit_local: ty.hir_id={:?} o_ty={:?} revealed_ty={:?} c_ty={:?}", ty.hir_id, o_ty, revealed_ty, c_ty); - self.fcx.tables.borrow_mut().user_provided_tys_mut().insert(ty.hir_id, c_ty); + self.fcx.tables.borrow_mut().user_provided_types_mut().insert(ty.hir_id, c_ty); Some(LocalTy { decl_ty: o_ty, revealed_ty }) }, None => None, }; - self.assign(local.span, local.id, local_ty); + self.assign(local.span, local.hir_id, local_ty); debug!("Local variable {:?} is assigned type {}", local.pat, self.fcx.ty_to_string( - self.fcx.locals.borrow().get(&local.id).unwrap().clone().decl_ty)); + self.fcx.locals.borrow().get(&local.hir_id).unwrap().clone().decl_ty)); intravisit::walk_local(self, local); } // Add pattern bindings. fn visit_pat(&mut self, p: &'gcx hir::Pat) { if let PatKind::Binding(_, _, ident, _) = p.node { - let var_ty = self.assign(p.span, p.id, None); + let var_ty = self.assign(p.span, p.hir_id, None); + let node_id = self.fcx.tcx.hir().hir_to_node_id(p.hir_id); if !self.fcx.tcx.features().unsized_locals { self.fcx.require_type_is_sized(var_ty, p.span, - traits::VariableType(p.id)); + traits::VariableType(node_id)); } debug!("Pattern binding {} is assigned to {} with type {:?}", ident, self.fcx.ty_to_string( - self.fcx.locals.borrow().get(&p.id).unwrap().clone().decl_ty), + self.fcx.locals.borrow().get(&p.hir_id).unwrap().clone().decl_ty), var_ty); } intravisit::walk_pat(self, p); @@ -1023,7 +1002,7 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for GatherLocalsVisitor<'a, 'gcx, 'tcx> { // Don't descend into the bodies of nested closures fn visit_fn(&mut self, _: intravisit::FnKind<'gcx>, _: &'gcx hir::FnDecl, - _: hir::BodyId, _: Span, _: ast::NodeId) { } + _: hir::BodyId, _: Span, _: hir::HirId) { } } /// When `check_fn` is invoked on a generator (i.e., a body that @@ -1036,7 +1015,7 @@ struct GeneratorTypes<'tcx> { /// Types that are captured (see `GeneratorInterior` for more). interior: ty::Ty<'tcx>, - /// Indicates if the generator is movable or static (immovable) + /// Indicates if the generator is movable or static (immovable). movability: hir::GeneratorMovability, } @@ -1050,7 +1029,7 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, fn_sig: ty::FnSig<'tcx>, decl: &'gcx hir::FnDecl, - fn_id: ast::NodeId, + fn_id: hir::HirId, body: &'gcx hir::Body, can_be_generator: Option) -> (FnCtxt<'a, 'gcx, 'tcx>, Option>) @@ -1061,7 +1040,7 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, // Create the function context. This is either derived from scratch or, // in the case of closures, based on the outer context. - let mut fcx = FnCtxt::new(inherited, param_env, body.value.id); + let mut fcx = FnCtxt::new(inherited, param_env, body.value.hir_id); *fcx.ps.borrow_mut() = UnsafetyState::function(fn_sig.unsafety, fn_id); let declared_ret_ty = fn_sig.output(); @@ -1071,7 +1050,7 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, fn_sig = fcx.tcx.mk_fn_sig( fn_sig.inputs().iter().cloned(), revealed_ret_ty, - fn_sig.variadic, + fn_sig.c_variadic, fn_sig.unsafety, fn_sig.abi ); @@ -1084,15 +1063,19 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, fcx.yield_ty = Some(yield_ty); } - let outer_def_id = fcx.tcx.closure_base_def_id(fcx.tcx.hir().local_def_id(fn_id)); - let outer_node_id = fcx.tcx.hir().as_local_node_id(outer_def_id).unwrap(); - GatherLocalsVisitor { fcx: &fcx, parent_id: outer_node_id, }.visit_body(body); + let outer_def_id = fcx.tcx.closure_base_def_id(fcx.tcx.hir().local_def_id_from_hir_id(fn_id)); + let outer_hir_id = fcx.tcx.hir().as_local_hir_id(outer_def_id).unwrap(); + GatherLocalsVisitor { fcx: &fcx, parent_id: outer_hir_id, }.visit_body(body); // Add formal parameters. for (arg_ty, arg) in fn_sig.inputs().iter().zip(&body.arguments) { // Check the pattern. - fcx.check_pat_walk(&arg.pat, arg_ty, - ty::BindingMode::BindByValue(hir::Mutability::MutImmutable), true); + fcx.check_pat_walk( + &arg.pat, + arg_ty, + ty::BindingMode::BindByValue(hir::Mutability::MutImmutable), + None, + ); // Check that argument is Sized. // The check for a non-trivial pattern is a hack to avoid duplicate warnings @@ -1105,8 +1088,7 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, fcx.write_ty(arg.hir_id, arg_ty); } - let fn_hir_id = fcx.tcx.hir().node_to_hir_id(fn_id); - inherited.tables.borrow_mut().liberated_fn_sigs_mut().insert(fn_hir_id, fn_sig); + inherited.tables.borrow_mut().liberated_fn_sigs_mut().insert(fn_id, fn_sig); fcx.check_return_expr(&body.value); @@ -1158,26 +1140,25 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, // Check that the main return type implements the termination trait. if let Some(term_id) = fcx.tcx.lang_items().termination() { - if let Some((id, _, entry_type)) = *fcx.tcx.sess.entry_fn.borrow() { - if id == fn_id { - if let config::EntryFnType::Main = entry_type { - let substs = fcx.tcx.mk_substs_trait(declared_ret_ty, &[]); - let trait_ref = ty::TraitRef::new(term_id, substs); - let return_ty_span = decl.output.span(); - let cause = traits::ObligationCause::new( - return_ty_span, fn_id, ObligationCauseCode::MainFunctionType); - - inherited.register_predicate( - traits::Obligation::new( - cause, param_env, trait_ref.to_predicate())); - } + if let Some((def_id, EntryFnType::Main)) = fcx.tcx.entry_fn(LOCAL_CRATE) { + let main_id = fcx.tcx.hir().as_local_hir_id(def_id).unwrap(); + if main_id == fn_id { + let substs = fcx.tcx.mk_substs_trait(declared_ret_ty, &[]); + let trait_ref = ty::TraitRef::new(term_id, substs); + let return_ty_span = decl.output.span(); + let cause = traits::ObligationCause::new( + return_ty_span, fn_id, ObligationCauseCode::MainFunctionType); + + inherited.register_predicate( + traits::Obligation::new( + cause, param_env, trait_ref.to_predicate())); } } } // Check that a function marked as `#[panic_handler]` has signature `fn(&PanicInfo) -> !` if let Some(panic_impl_did) = fcx.tcx.lang_items().panic_impl() { - if panic_impl_did == fcx.tcx.hir().local_def_id(fn_id) { + if panic_impl_did == fcx.tcx.hir().local_def_id_from_hir_id(fn_id) { if let Some(panic_info_did) = fcx.tcx.lang_items().panic_info() { // at this point we don't care if there are duplicate handlers or if the handler has // the wrong signature as this value we'll be used when writing metadata and that @@ -1192,7 +1173,7 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, } let inputs = fn_sig.inputs(); - let span = fcx.tcx.hir().span(fn_id); + let span = fcx.tcx.hir().span_by_hir_id(fn_id); if inputs.len() == 1 { let arg_is_panic_info = match inputs[0].sty { ty::Ref(region, ty, mutbl) => match ty.sty { @@ -1213,7 +1194,7 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, ); } - if let Node::Item(item) = fcx.tcx.hir().get(fn_id) { + if let Node::Item(item) = fcx.tcx.hir().get_by_hir_id(fn_id) { if let ItemKind::Fn(_, _, ref generics, _) = item.node { if !generics.params.is_empty() { fcx.tcx.sess.span_err( @@ -1235,7 +1216,7 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, // Check that a function marked as `#[alloc_error_handler]` has signature `fn(Layout) -> !` if let Some(alloc_error_handler_did) = fcx.tcx.lang_items().oom() { - if alloc_error_handler_did == fcx.tcx.hir().local_def_id(fn_id) { + if alloc_error_handler_did == fcx.tcx.hir().local_def_id_from_hir_id(fn_id) { if let Some(alloc_layout_did) = fcx.tcx.lang_items().alloc_layout() { if declared_ret_ty.sty != ty::Never { fcx.tcx.sess.span_err( @@ -1245,7 +1226,7 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, } let inputs = fn_sig.inputs(); - let span = fcx.tcx.hir().span(fn_id); + let span = fcx.tcx.hir().span_by_hir_id(fn_id); if inputs.len() == 1 { let arg_is_alloc_layout = match inputs[0].sty { ty::Adt(ref adt, _) => { @@ -1261,7 +1242,7 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, ); } - if let Node::Item(item) = fcx.tcx.hir().get(fn_id) { + if let Node::Item(item) = fcx.tcx.hir().get_by_hir_id(fn_id) { if let ItemKind::Fn(_, _, ref generics, _) = item.node { if !generics.params.is_empty() { fcx.tcx.sess.span_err( @@ -1286,9 +1267,9 @@ fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>, } fn check_struct<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: ast::NodeId, + id: hir::HirId, span: Span) { - let def_id = tcx.hir().local_def_id(id); + let def_id = tcx.hir().local_def_id_from_hir_id(id); let def = tcx.adt_def(def_id); def.destructor(tcx); // force the destructor to be evaluated check_representable(tcx, span, def_id); @@ -1302,9 +1283,9 @@ fn check_struct<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } fn check_union<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - id: ast::NodeId, + id: hir::HirId, span: Span) { - let def_id = tcx.hir().local_def_id(id); + let def_id = tcx.hir().local_def_id_from_hir_id(id); let def = tcx.adt_def(def_id); def.destructor(tcx); // force the destructor to be evaluated check_representable(tcx, span, def_id); @@ -1312,30 +1293,51 @@ fn check_union<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, check_packed(tcx, span, def_id); } +fn check_opaque<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + substs: SubstsRef<'tcx>, + span: Span, +) { + if let Err(partially_expanded_type) = tcx.try_expand_impl_trait_type(def_id, substs) { + let mut err = struct_span_err!( + tcx.sess, span, E0720, + "opaque type expands to a recursive type", + ); + err.span_label(span, "expands to self-referential type"); + if let ty::Opaque(..) = partially_expanded_type.sty { + err.note("type resolves to itself"); + } else { + err.note(&format!("expanded type is `{}`", partially_expanded_type)); + } + err.emit(); + } +} + pub fn check_item_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, it: &'tcx hir::Item) { debug!( - "check_item_type(it.id={}, it.name={})", - it.id, - tcx.item_path_str(tcx.hir().local_def_id(it.id)) + "check_item_type(it.hir_id={}, it.name={})", + it.hir_id, + tcx.def_path_str(tcx.hir().local_def_id_from_hir_id(it.hir_id)) ); let _indenter = indenter(); match it.node { // Consts can play a role in type-checking, so they are included here. hir::ItemKind::Static(..) => { - let def_id = tcx.hir().local_def_id(it.id); + let def_id = tcx.hir().local_def_id_from_hir_id(it.hir_id); tcx.typeck_tables_of(def_id); maybe_check_static_with_link_section(tcx, def_id, it.span); } hir::ItemKind::Const(..) => { - tcx.typeck_tables_of(tcx.hir().local_def_id(it.id)); + tcx.typeck_tables_of(tcx.hir().local_def_id_from_hir_id(it.hir_id)); } hir::ItemKind::Enum(ref enum_definition, _) => { - check_enum(tcx, it.span, &enum_definition.variants, it.id); + check_enum(tcx, it.span, &enum_definition.variants, it.hir_id); } hir::ItemKind::Fn(..) => {} // entirely within check_item_body hir::ItemKind::Impl(.., ref impl_item_refs) => { - debug!("ItemKind::Impl {} with id {}", it.name, it.id); - let impl_def_id = tcx.hir().local_def_id(it.id); + debug!("ItemKind::Impl {} with id {}", it.ident, it.hir_id); + let impl_def_id = tcx.hir().local_def_id_from_hir_id(it.hir_id); if let Some(impl_trait_ref) = tcx.impl_trait_ref(impl_def_id) { check_impl_items_against_trait( tcx, @@ -1349,17 +1351,23 @@ pub fn check_item_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, it: &'tcx hir::Ite } } hir::ItemKind::Trait(..) => { - let def_id = tcx.hir().local_def_id(it.id); + let def_id = tcx.hir().local_def_id_from_hir_id(it.hir_id); check_on_unimplemented(tcx, def_id, it); } hir::ItemKind::Struct(..) => { - check_struct(tcx, it.id, it.span); + check_struct(tcx, it.hir_id, it.span); } hir::ItemKind::Union(..) => { - check_union(tcx, it.id, it.span); + check_union(tcx, it.hir_id, it.span); } - hir::ItemKind::Existential(..) | hir::ItemKind::Ty(..) => { - let def_id = tcx.hir().local_def_id(it.id); + hir::ItemKind::Existential(..) => { + let def_id = tcx.hir().local_def_id_from_hir_id(it.hir_id); + + let substs = InternalSubsts::identity_for_item(tcx, def_id); + check_opaque(tcx, def_id, substs, it.span); + } + hir::ItemKind::Ty(..) => { + let def_id = tcx.hir().local_def_id_from_hir_id(it.hir_id); let pty_ty = tcx.type_of(def_id); let generics = tcx.generics_of(def_id); check_bounds_are_used(tcx, &generics, pty_ty); @@ -1377,7 +1385,7 @@ pub fn check_item_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, it: &'tcx hir::Ite } } else { for item in &m.items { - let generics = tcx.generics_of(tcx.hir().local_def_id(item.id)); + let generics = tcx.generics_of(tcx.hir().local_def_id_from_hir_id(item.hir_id)); if generics.params.len() - generics.own_counts().lifetimes != 0 { let mut err = struct_span_err!( tcx.sess, @@ -1396,7 +1404,7 @@ pub fn check_item_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, it: &'tcx hir::Ite } if let hir::ForeignItemKind::Fn(ref fn_decl, _, _) = item.node { - require_c_abi_if_variadic(tcx, fn_decl, m.abi, item.span); + require_c_abi_if_c_variadic(tcx, fn_decl, m.abi, item.span); } } } @@ -1405,7 +1413,7 @@ pub fn check_item_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, it: &'tcx hir::Ite } } -fn maybe_check_static_with_link_section(tcx: TyCtxt, id: DefId, span: Span) { +fn maybe_check_static_with_link_section(tcx: TyCtxt<'_, '_, '_>, id: DefId, span: Span) { // Only restricted on wasm32 target for now if !tcx.sess.opts.target_triple.triple().starts_with("wasm32") { return @@ -1429,7 +1437,7 @@ fn maybe_check_static_with_link_section(tcx: TyCtxt, id: DefId, span: Span) { }; let param_env = ty::ParamEnv::reveal_all(); if let Ok(static_) = tcx.const_eval(param_env.and(cid)) { - let alloc = if let ConstValue::ByRef(_, allocation, _) = static_.val { + let alloc = if let ConstValue::ByRef(_, allocation) = static_.val { allocation } else { bug!("Matching on non-ByRef static") @@ -1446,7 +1454,7 @@ fn maybe_check_static_with_link_section(tcx: TyCtxt, id: DefId, span: Span) { fn check_on_unimplemented<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_def_id: DefId, item: &hir::Item) { - let item_def_id = tcx.hir().local_def_id(item.id); + let item_def_id = tcx.hir().local_def_id_from_hir_id(item.hir_id); // an error would be reported if this fails. let _ = traits::OnUnimplementedDirective::of_item(tcx, trait_def_id, item_def_id); } @@ -1524,7 +1532,8 @@ fn check_impl_items_against_trait<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // Check existing impl methods to see if they are both present in trait // and compatible with trait signature for impl_item in impl_items() { - let ty_impl_item = tcx.associated_item(tcx.hir().local_def_id(impl_item.id)); + let ty_impl_item = tcx.associated_item( + tcx.hir().local_def_id_from_hir_id(impl_item.hir_id)); let ty_trait_item = tcx.associated_items(impl_trait_ref.def_id) .find(|ac| Namespace::from(&impl_item.node) == Namespace::from(ac.kind) && tcx.hygienic_eq(ty_impl_item.ident, ac.ident, impl_trait_ref.def_id)) @@ -1779,7 +1788,7 @@ fn check_transparent<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: De // For each field, figure out if it's known to be a ZST and align(1) let field_infos = adt.non_enum_variant().fields.iter().map(|field| { - let ty = field.ty(tcx, Substs::identity_for_item(tcx, field.did)); + let ty = field.ty(tcx, InternalSubsts::identity_for_item(tcx, field.did)); let param_env = tcx.param_env(field.did); let layout = tcx.layout_of(param_env.and(ty)); // We are currently checking the type this field came from, so it must be local @@ -1811,8 +1820,8 @@ fn check_transparent<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: De pub fn check_enum<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, vs: &'tcx [hir::Variant], - id: ast::NodeId) { - let def_id = tcx.hir().local_def_id(id); + id: hir::HirId) { + let def_id = tcx.hir().local_def_id_from_hir_id(id); let def = tcx.adt_def(def_id); def.destructor(tcx); // force the destructor to be evaluated @@ -1840,7 +1849,7 @@ pub fn check_enum<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, for v in vs { if let Some(ref e) = v.node.disr_expr { - tcx.typeck_tables_of(tcx.hir().local_def_id(e.id)); + tcx.typeck_tables_of(tcx.hir().local_def_id_from_hir_id(e.hir_id)); } } @@ -1848,15 +1857,15 @@ pub fn check_enum<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, for ((_, discr), v) in def.discriminants(tcx).zip(vs) { // Check for duplicate discriminant values if let Some(i) = disr_vals.iter().position(|&x| x.val == discr.val) { - let variant_did = def.variants[VariantIdx::new(i)].did; - let variant_i_node_id = tcx.hir().as_local_node_id(variant_did).unwrap(); - let variant_i = tcx.hir().expect_variant(variant_i_node_id); + let variant_did = def.variants[VariantIdx::new(i)].def_id; + let variant_i_hir_id = tcx.hir().as_local_hir_id(variant_did).unwrap(); + let variant_i = tcx.hir().expect_variant(variant_i_hir_id); let i_span = match variant_i.node.disr_expr { - Some(ref expr) => tcx.hir().span(expr.id), - None => tcx.hir().span(variant_i_node_id) + Some(ref expr) => tcx.hir().span_by_hir_id(expr.hir_id), + None => tcx.hir().span_by_hir_id(variant_i_hir_id) }; let span = match v.node.disr_expr { - Some(ref expr) => tcx.hir().span(expr.id), + Some(ref expr) => tcx.hir().span_by_hir_id(expr.hir_id), None => v.span }; struct_span_err!(tcx.sess, span, E0081, @@ -1871,6 +1880,16 @@ pub fn check_enum<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, check_representable(tcx, sp, def_id); } +fn report_unexpected_variant_def<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + def: &Def, + span: Span, + qpath: &QPath) { + span_err!(tcx.sess, span, E0533, + "expected unit struct/variant or constant, found {} `{}`", + def.kind_name(), + hir::print::to_string(tcx.hir(), |s| s.print_qpath(qpath, false))); +} + impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx } @@ -1878,9 +1897,9 @@ impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> { -> Lrc> { let tcx = self.tcx; - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); - let item_id = tcx.hir().ty_param_owner(node_id); - let item_def_id = tcx.hir().local_def_id(item_id); + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); + let item_id = tcx.hir().ty_param_owner(hir_id); + let item_def_id = tcx.hir().local_def_id_from_hir_id(item_id); let generics = tcx.generics_of(item_def_id); let index = generics.param_def_id_to_index[&def_id]; Lrc::new(ty::GenericPredicates { @@ -1978,16 +1997,17 @@ enum TupleArgumentsFlag { impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn new(inh: &'a Inherited<'a, 'gcx, 'tcx>, param_env: ty::ParamEnv<'tcx>, - body_id: ast::NodeId) + body_id: hir::HirId) -> FnCtxt<'a, 'gcx, 'tcx> { FnCtxt { body_id, param_env, err_count_on_creation: inh.tcx.sess.err_count(), ret_coercion: None, + ret_coercion_span: RefCell::new(None), yield_ty: None, ps: RefCell::new(UnsafetyState::function(hir::Unsafety::Normal, - ast::CRATE_NODE_ID)), + hir::CRATE_HIR_ID)), diverges: Cell::new(Diverges::Maybe), has_errors: Cell::new(false), enclosing_breakables: RefCell::new(EnclosingBreakables { @@ -2006,15 +2026,15 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.tcx.sess.err_count() - self.err_count_on_creation } - /// Produce warning on the given node, if the current point in the + /// Produces warning on the given node, if the current point in the /// function is unreachable, and there hasn't been another warning. - fn warn_if_unreachable(&self, id: ast::NodeId, span: Span, kind: &str) { + fn warn_if_unreachable(&self, id: hir::HirId, span: Span, kind: &str) { if self.diverges.get() == Diverges::Always { self.diverges.set(Diverges::WarnedAlways); debug!("warn_if_unreachable: id={:?} span={:?} kind={}", id, span, kind); - self.tcx().lint_node( + self.tcx().lint_hir( lint::builtin::UNREACHABLE_CODE, id, span, &format!("unreachable {}", kind)); @@ -2079,14 +2099,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } pub fn tag(&self) -> String { - let self_ptr: *const FnCtxt = self; + let self_ptr: *const FnCtxt<'_, '_, '_> = self; format!("{:?}", self_ptr) } - pub fn local_ty(&self, span: Span, nid: ast::NodeId) -> LocalTy<'tcx> { + pub fn local_ty(&self, span: Span, nid: hir::HirId) -> LocalTy<'tcx> { self.locals.borrow().get(&nid).cloned().unwrap_or_else(|| span_bug!(span, "no type for local variable {}", - self.tcx.hir().node_to_string(nid)) + self.tcx.hir().hir_to_string(nid)) ) } @@ -2102,13 +2122,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - pub fn write_field_index(&self, node_id: ast::NodeId, index: usize) { - let hir_id = self.tcx.hir().node_to_hir_id(node_id); + pub fn write_field_index(&self, hir_id: hir::HirId, index: usize) { self.tables.borrow_mut().field_indices_mut().insert(hir_id, index); } - // The NodeId and the ItemLocalId must identify the same item. We just pass - // both of them for consistency checking. pub fn write_method_call(&self, hir_id: hir::HirId, method: MethodCallee<'tcx>) { @@ -2137,28 +2154,32 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if !method.substs.is_noop() { let method_generics = self.tcx.generics_of(method.def_id); if !method_generics.params.is_empty() { - let user_substs = self.infcx.probe(|_| { - let just_method_substs = Substs::for_item(self.tcx, method.def_id, |param, _| { - let i = param.index as usize; - if i < method_generics.parent_count { - self.infcx.var_for_def(DUMMY_SP, param) - } else { - method.substs[i] - } - }); - self.infcx.canonicalize_user_type_annotation(&UserSubsts { - substs: just_method_substs, + let user_type_annotation = self.infcx.probe(|_| { + let user_substs = UserSubsts { + substs: InternalSubsts::for_item(self.tcx, method.def_id, |param, _| { + let i = param.index as usize; + if i < method_generics.parent_count { + self.infcx.var_for_def(DUMMY_SP, param) + } else { + method.substs[i] + } + }), user_self_ty: None, // not relevant here - }) + }; + + self.infcx.canonicalize_user_type_annotation(&UserType::TypeOf( + method.def_id, + user_substs, + )) }); - debug!("write_method_call: user_substs = {:?}", user_substs); - self.write_user_substs(hir_id, user_substs); + debug!("write_method_call: user_type_annotation={:?}", user_type_annotation); + self.write_user_type_annotation(hir_id, user_type_annotation); } } } - pub fn write_substs(&self, node_id: hir::HirId, substs: &'tcx Substs<'tcx>) { + pub fn write_substs(&self, node_id: hir::HirId, substs: SubstsRef<'tcx>) { if !substs.is_noop() { debug!("write_substs({:?}, {:?}) in fcx {}", node_id, @@ -2176,41 +2197,47 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// This should be invoked **before any unifications have /// occurred**, so that annotations like `Vec<_>` are preserved /// properly. - pub fn write_user_substs_from_substs( + pub fn write_user_type_annotation_from_substs( &self, hir_id: hir::HirId, - substs: &'tcx Substs<'tcx>, + def_id: DefId, + substs: SubstsRef<'tcx>, user_self_ty: Option>, ) { debug!( - "write_user_substs_from_substs({:?}, {:?}) in fcx {}", - hir_id, - substs, - self.tag(), + "write_user_type_annotation_from_substs: hir_id={:?} def_id={:?} substs={:?} \ + user_self_ty={:?} in fcx {}", + hir_id, def_id, substs, user_self_ty, self.tag(), ); - if !substs.is_noop() { - let user_substs = self.infcx.canonicalize_user_type_annotation(&UserSubsts { - substs, - user_self_ty, - }); - debug!("instantiate_value_path: user_substs = {:?}", user_substs); - self.write_user_substs(hir_id, user_substs); + if Self::can_contain_user_lifetime_bounds((substs, user_self_ty)) { + let canonicalized = self.infcx.canonicalize_user_type_annotation( + &UserType::TypeOf(def_id, UserSubsts { + substs, + user_self_ty, + }) + ); + debug!("write_user_type_annotation_from_substs: canonicalized={:?}", canonicalized); + self.write_user_type_annotation(hir_id, canonicalized); } } - pub fn write_user_substs(&self, hir_id: hir::HirId, substs: CanonicalUserSubsts<'tcx>) { + pub fn write_user_type_annotation( + &self, + hir_id: hir::HirId, + canonical_user_type_annotation: CanonicalUserType<'tcx>, + ) { debug!( - "write_user_substs({:?}, {:?}) in fcx {}", - hir_id, - substs, - self.tag(), + "write_user_type_annotation: hir_id={:?} canonical_user_type_annotation={:?} tag={}", + hir_id, canonical_user_type_annotation, self.tag(), ); - if !substs.is_identity() { - self.tables.borrow_mut().user_substs_mut().insert(hir_id, substs); + if !canonical_user_type_annotation.is_identity() { + self.tables.borrow_mut().user_provided_types_mut().insert( + hir_id, canonical_user_type_annotation + ); } else { - debug!("write_user_substs: skipping identity substs"); + debug!("write_user_type_annotation: skipping identity substs"); } } @@ -2255,7 +2282,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// types as well. This function combines the two. fn instantiate_type_scheme(&self, span: Span, - substs: &Substs<'tcx>, + substs: SubstsRef<'tcx>, value: &T) -> T where T : TypeFoldable<'tcx> @@ -2271,7 +2298,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// As `instantiate_type_scheme`, but for the bounds found in a /// generic type scheme. - fn instantiate_bounds(&self, span: Span, def_id: DefId, substs: &Substs<'tcx>) + fn instantiate_bounds(&self, span: Span, def_id: DefId, substs: SubstsRef<'tcx>) -> ty::InstantiatedPredicates<'tcx> { let bounds = self.tcx.predicates_of(def_id); let result = bounds.instantiate(self.tcx, substs); @@ -2283,15 +2310,15 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { result } - /// Replace the opaque types from the given value with type variables, + /// Replaces the opaque types from the given value with type variables, /// and records the `OpaqueTypeMap` for later use during writeback. See /// `InferCtxt::instantiate_opaque_types` for more details. fn instantiate_opaque_types_from_value>( &self, - parent_id: ast::NodeId, + parent_id: hir::HirId, value: &T, ) -> T { - let parent_def_id = self.tcx.hir().local_def_id(parent_id); + let parent_def_id = self.tcx.hir().local_def_id_from_hir_id(parent_id); debug!("instantiate_opaque_types_from_value(parent_def_id={:?}, value={:?})", parent_def_id, value); @@ -2376,22 +2403,35 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn to_ty_saving_user_provided_ty(&self, ast_ty: &hir::Ty) -> Ty<'tcx> { let ty = self.to_ty(ast_ty); + debug!("to_ty_saving_user_provided_ty: ty={:?}", ty); - // If the type given by the user has free regions, save it for - // later, since NLL would like to enforce those. Also pass in - // types that involve projections, since those can resolve to - // `'static` bounds (modulo #54940, which hopefully will be - // fixed by the time you see this comment, dear reader, - // although I have my doubts). Other sorts of things are - // already sufficiently enforced with erased regions. =) - if ty.has_free_regions() || ty.has_projections() { - let c_ty = self.infcx.canonicalize_response(&ty); - self.tables.borrow_mut().user_provided_tys_mut().insert(ast_ty.hir_id, c_ty); + if Self::can_contain_user_lifetime_bounds(ty) { + let c_ty = self.infcx.canonicalize_response(&UserType::Ty(ty)); + debug!("to_ty_saving_user_provided_ty: c_ty={:?}", c_ty); + self.tables.borrow_mut().user_provided_types_mut().insert(ast_ty.hir_id, c_ty); } ty } + pub fn to_const(&self, ast_c: &hir::AnonConst, ty: Ty<'tcx>) -> &'tcx ty::Const<'tcx> { + AstConv::ast_const_to_const(self, ast_c, ty) + } + + // If the type given by the user has free regions, save it for later, since + // NLL would like to enforce those. Also pass in types that involve + // projections, since those can resolve to `'static` bounds (modulo #54940, + // which hopefully will be fixed by the time you see this comment, dear + // reader, although I have my doubts). Also pass in types with inference + // types, because they may be repeated. Other sorts of things are already + // sufficiently enforced with erased regions. =) + fn can_contain_user_lifetime_bounds(t: T) -> bool + where + T: TypeFoldable<'tcx> + { + t.has_free_regions() || t.has_projections() || t.has_infer_types() + } + pub fn node_ty(&self, id: hir::HirId) -> Ty<'tcx> { match self.tables.borrow().node_types().get(id) { Some(&t) => t, @@ -2420,7 +2460,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } /// Registers obligations that all types appearing in `substs` are well-formed. - pub fn add_wf_bounds(&self, substs: &Substs<'tcx>, expr: &hir::Expr) { + pub fn add_wf_bounds(&self, substs: SubstsRef<'tcx>, expr: &hir::Expr) { for ty in substs.types() { self.register_wf_obligation(ty, expr.span, traits::MiscObligation); } @@ -2464,7 +2504,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn field_ty(&self, span: Span, field: &'tcx ty::FieldDef, - substs: &Substs<'tcx>) + substs: SubstsRef<'tcx>) -> Ty<'tcx> { self.normalize_associated_types_in(span, &field.ty(self.tcx, substs)) @@ -2504,7 +2544,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { Neither if self.type_var_diverges(ty) => self.tcx.mk_diverging_default(), Neither => return false, }; - debug!("default_type_parameters: defaulting `{:?}` to `{:?}`", ty, fallback); + debug!("fallback_if_possible: defaulting `{:?}` to `{:?}`", ty, fallback); self.demand_eqtype(syntax_pos::DUMMY_SP, ty, fallback); true } @@ -2555,7 +2595,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { while result.is_none() && autoderef.next().is_some() { result = self.try_index_step(expr, base_expr, &autoderef, needs, idx_ty); } - autoderef.finalize(); + autoderef.finalize(self); result } @@ -2572,7 +2612,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { index_ty: Ty<'tcx>) -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)> { - let adjusted_ty = autoderef.unambiguous_final_ty(); + let adjusted_ty = autoderef.unambiguous_final_ty(self); debug!("try_index_step(expr={:?}, base_expr={:?}, adjusted_ty={:?}, \ index_ty={:?})", expr, @@ -2602,7 +2642,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { debug!("try_index_step: success, using overloaded indexing"); let method = self.register_infer_ok_obligations(ok); - let mut adjustments = autoderef.adjust_steps(needs); + let mut adjustments = autoderef.adjust_steps(self, needs); if let ty::Ref(region, _, r_mutbl) = method.sig.inputs()[0].sty { let mutbl = match r_mutbl { hir::MutImmutable => AutoBorrowMutability::Immutable, @@ -2726,11 +2766,77 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { &method.sig.inputs()[1..] ); self.check_argument_types(sp, expr_sp, &method.sig.inputs()[1..], &expected_arg_tys[..], - args_no_rcvr, method.sig.variadic, tuple_arguments, + args_no_rcvr, method.sig.c_variadic, tuple_arguments, self.tcx.hir().span_if_local(method.def_id)); method.sig.output() } + fn self_type_matches_expected_vid( + &self, + trait_ref: ty::PolyTraitRef<'tcx>, + expected_vid: ty::TyVid, + ) -> bool { + let self_ty = self.shallow_resolve(trait_ref.self_ty()); + debug!( + "self_type_matches_expected_vid(trait_ref={:?}, self_ty={:?}, expected_vid={:?})", + trait_ref, self_ty, expected_vid + ); + match self_ty.sty { + ty::Infer(ty::TyVar(found_vid)) => { + // FIXME: consider using `sub_root_var` here so we + // can see through subtyping. + let found_vid = self.root_var(found_vid); + debug!("self_type_matches_expected_vid - found_vid={:?}", found_vid); + expected_vid == found_vid + } + _ => false + } + } + + fn obligations_for_self_ty<'b>(&'b self, self_ty: ty::TyVid) + -> impl Iterator, traits::PredicateObligation<'tcx>)> + + Captures<'gcx> + 'b + { + // FIXME: consider using `sub_root_var` here so we + // can see through subtyping. + let ty_var_root = self.root_var(self_ty); + debug!("obligations_for_self_ty: self_ty={:?} ty_var_root={:?} pending_obligations={:?}", + self_ty, ty_var_root, + self.fulfillment_cx.borrow().pending_obligations()); + + self.fulfillment_cx + .borrow() + .pending_obligations() + .into_iter() + .filter_map(move |obligation| match obligation.predicate { + ty::Predicate::Projection(ref data) => + Some((data.to_poly_trait_ref(self.tcx), obligation)), + ty::Predicate::Trait(ref data) => + Some((data.to_poly_trait_ref(), obligation)), + ty::Predicate::Subtype(..) => None, + ty::Predicate::RegionOutlives(..) => None, + ty::Predicate::TypeOutlives(..) => None, + ty::Predicate::WellFormed(..) => None, + ty::Predicate::ObjectSafe(..) => None, + ty::Predicate::ConstEvaluatable(..) => None, + // N.B., this predicate is created by breaking down a + // `ClosureType: FnFoo()` predicate, where + // `ClosureType` represents some `Closure`. It can't + // possibly be referring to the current closure, + // because we haven't produced the `Closure` for + // this closure yet; this is exactly why the other + // code is looking for a self type of a unresolved + // inference variable. + ty::Predicate::ClosureKind(..) => None, + }).filter(move |(tr, _)| self.self_type_matches_expected_vid(*tr, ty_var_root)) + } + + fn type_var_is_sized(&self, self_ty: ty::TyVid) -> bool { + self.obligations_for_self_ty(self_ty).any(|(tr, _)| { + Some(tr.def_id()) == self.tcx.lang_items().sized_trait() + }) + } + /// Generic function that factors out common logic from function calls, /// method calls and overloaded operators. fn check_argument_types(&self, @@ -2739,7 +2845,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn_inputs: &[Ty<'tcx>], mut expected_arg_tys: &[Ty<'tcx>], args: &'gcx [hir::Expr], - variadic: bool, + c_variadic: bool, tuple_arguments: TupleArgumentsFlag, def_span: Option) { let tcx = self.tcx; @@ -2763,11 +2869,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let param_count_error = |expected_count: usize, arg_count: usize, error_code: &str, - variadic: bool, + c_variadic: bool, sugg_unit: bool| { let mut err = tcx.sess.struct_span_err_with_code(sp, &format!("this function takes {}{} but {} {} supplied", - if variadic {"at least "} else {""}, + if c_variadic { "at least " } else { "" }, potentially_plural_count(expected_count, "parameter"), potentially_plural_count(arg_count, "parameter"), if arg_count == 1 {"was"} else {"were"}), @@ -2780,14 +2886,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let sugg_span = tcx.sess.source_map().end_point(expr_sp); // remove closing `)` from the span let sugg_span = sugg_span.shrink_to_lo(); - err.span_suggestion_with_applicability( + err.span_suggestion( sugg_span, "expected the unit value `()`; create it with empty parentheses", String::from("()"), Applicability::MachineApplicable); } else { err.span_label(sp, format!("expected {}{}", - if variadic {"at least "} else {""}, + if c_variadic { "at least " } else { "" }, potentially_plural_count(expected_count, "parameter"))); } err.emit(); @@ -2821,7 +2927,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } else if expected_arg_count == supplied_arg_count { fn_inputs.to_vec() - } else if variadic { + } else if c_variadic { if supplied_arg_count >= expected_arg_count { fn_inputs.to_vec() } else { @@ -2868,10 +2974,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.select_obligations_where_possible(false); } - // For variadic functions, we don't have a declared type for all of + // For C-variadic functions, we don't have a declared type for all of // the arguments hence we only do our usual type checking with // the arguments who's types we do know. - let t = if variadic { + let t = if c_variadic { expected_arg_count } else if tuple_arguments == TupleArguments { args.len() @@ -2883,11 +2989,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Closure arguments themselves can't be diverging, but // a previous argument can, e.g., `foo(panic!(), || {})`. if !check_closures { - self.warn_if_unreachable(arg.id, arg.span, "expression"); + self.warn_if_unreachable(arg.hir_id, arg.span, "expression"); } let is_closure = match arg.node { - hir::ExprKind::Closure(..) => true, + ExprKind::Closure(..) => true, _ => false }; @@ -2920,9 +3026,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // We also need to make sure we at least write the ty of the other // arguments which we skipped above. - if variadic { + if c_variadic { fn variadic_error<'tcx>(s: &Session, span: Span, t: Ty<'tcx>, cast_ty: &str) { - use structured_errors::{VariadicError, StructuredDiagnostic}; + use crate::structured_errors::{VariadicError, StructuredDiagnostic}; VariadicError::new(s, span, t, cast_ty).diagnostic().emit(); } @@ -2985,8 +3091,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { _ => None } }); - opt_ty.unwrap_or_else( - || tcx.mk_int_var(self.next_int_var_id())) + opt_ty.unwrap_or_else(|| self.next_int_var()) } ast::LitKind::Float(_, t) => tcx.mk_mach_float(t), ast::LitKind::FloatUnsuffixed(_) => { @@ -2996,10 +3101,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { _ => None } }); - opt_ty.unwrap_or_else( - || tcx.mk_float_var(self.next_float_var_id())) + opt_ty.unwrap_or_else(|| self.next_float_var()) } - ast::LitKind::Bool(_) => tcx.types.bool + ast::LitKind::Bool(_) => tcx.types.bool, + ast::LitKind::Err(_) => tcx.types.err, } } @@ -3037,25 +3142,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } if let Some(mut err) = self.demand_suptype_diag(expr.span, expected_ty, ty) { - // Add help to type error if this is an `if` condition with an assignment - if let (ExpectIfCondition, &hir::ExprKind::Assign(ref lhs, ref rhs)) - = (expected, &expr.node) - { - let msg = "try comparing for equality"; - if let (Ok(left), Ok(right)) = ( - self.tcx.sess.source_map().span_to_snippet(lhs.span), - self.tcx.sess.source_map().span_to_snippet(rhs.span)) - { - err.span_suggestion_with_applicability( - expr.span, - msg, - format!("{} == {}", left, right), - Applicability::MaybeIncorrect); - } else { - err.help(msg); - } + if self.is_assign_to_bool(expr, expected_ty) { + // Error reported in `check_assign` so avoid emitting error again. + // FIXME(centril): Consider removing if/when `if` desugars to `match`. + err.delay_as_bug(); + } else { + err.emit(); } - err.emit(); } ty } @@ -3118,7 +3211,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { Some(ret) => ret, None => return Vec::new() }; - let expect_args = self.fudge_regions_if_ok(&RegionVariableOrigin::Coercion(call_span), || { + let expect_args = self.fudge_inference_if_ok(|| { // Attempt to apply a subtyping relationship between the formal // return type (likely containing type variables if the function // is polymorphic) and the expected return type. @@ -3184,7 +3277,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.report_method_error(span, rcvr_t, segment.ident, - Some(rcvr), + SelfSource::MethodCall(rcvr), error, Some(args)); } @@ -3213,7 +3306,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ret_coercion.borrow_mut() .coerce(self, &self.cause(return_expr.span, - ObligationCauseCode::ReturnType(return_expr.id)), + ObligationCauseCode::ReturnType(return_expr.hir_id)), return_expr, return_expr_ty); } @@ -3226,7 +3319,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { opt_else_expr: Option<&'gcx hir::Expr>, sp: Span, expected: Expectation<'tcx>) -> Ty<'tcx> { - let cond_ty = self.check_expr_meets_expectation_or_error(cond_expr, ExpectIfCondition); + let cond_ty = self.check_expr_has_type_or_error(cond_expr, self.tcx.types.bool); let cond_diverges = self.diverges.get(); self.diverges.set(Diverges::Maybe); @@ -3242,22 +3335,126 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // (`only_has_type`); otherwise, we just go with a // fresh type variable. let coerce_to_ty = expected.coercion_target_type(self, sp); - let mut coerce: DynamicCoerceMany = CoerceMany::new(coerce_to_ty); + let mut coerce: DynamicCoerceMany<'_, '_> = CoerceMany::new(coerce_to_ty); - let if_cause = self.cause(sp, ObligationCauseCode::IfExpression); - coerce.coerce(self, &if_cause, then_expr, then_ty); + coerce.coerce(self, &self.misc(sp), then_expr, then_ty); if let Some(else_expr) = opt_else_expr { let else_ty = self.check_expr_with_expectation(else_expr, expected); let else_diverges = self.diverges.get(); + let mut outer_sp = if self.tcx.sess.source_map().is_multiline(sp) { + // The `if`/`else` isn't in one line in the output, include some context to make it + // clear it is an if/else expression: + // ``` + // LL | let x = if true { + // | _____________- + // LL || 10i32 + // || ----- expected because of this + // LL || } else { + // LL || 10u32 + // || ^^^^^ expected i32, found u32 + // LL || }; + // ||_____- if and else have incompatible types + // ``` + Some(sp) + } else { + // The entire expression is in one line, only point at the arms + // ``` + // LL | let x = if true { 10i32 } else { 10u32 }; + // | ----- ^^^^^ expected i32, found u32 + // | | + // | expected because of this + // ``` + None + }; + let mut remove_semicolon = None; + let error_sp = if let ExprKind::Block(block, _) = &else_expr.node { + if let Some(expr) = &block.expr { + expr.span + } else if let Some(stmt) = block.stmts.last() { + // possibly incorrect trailing `;` in the else arm + remove_semicolon = self.could_remove_semicolon(block, then_ty); + stmt.span + } else { // empty block, point at its entirety + // Avoid overlapping spans that aren't as readable: + // ``` + // 2 | let x = if true { + // | _____________- + // 3 | | 3 + // | | - expected because of this + // 4 | | } else { + // | |____________^ + // 5 | || + // 6 | || }; + // | || ^ + // | ||_____| + // | |______if and else have incompatible types + // | expected integer, found () + // ``` + // by not pointing at the entire expression: + // ``` + // 2 | let x = if true { + // | ------- if and else have incompatible types + // 3 | 3 + // | - expected because of this + // 4 | } else { + // | ____________^ + // 5 | | + // 6 | | }; + // | |_____^ expected integer, found () + // ``` + if outer_sp.is_some() { + outer_sp = Some(self.tcx.sess.source_map().def_span(sp)); + } + else_expr.span + } + } else { // shouldn't happen unless the parser has done something weird + else_expr.span + }; + let then_sp = if let ExprKind::Block(block, _) = &then_expr.node { + if let Some(expr) = &block.expr { + expr.span + } else if let Some(stmt) = block.stmts.last() { + // possibly incorrect trailing `;` in the else arm + remove_semicolon = remove_semicolon.or( + self.could_remove_semicolon(block, else_ty)); + stmt.span + } else { // empty block, point at its entirety + outer_sp = None; // same as in `error_sp`, cleanup output + then_expr.span + } + } else { // shouldn't happen unless the parser has done something weird + then_expr.span + }; + + let if_cause = self.cause(error_sp, ObligationCauseCode::IfExpression { + then: then_sp, + outer: outer_sp, + semicolon: remove_semicolon, + }); + coerce.coerce(self, &if_cause, else_expr, else_ty); // We won't diverge unless both branches do (or the condition does). self.diverges.set(cond_diverges | then_diverges & else_diverges); } else { + // If this `if` expr is the parent's function return expr, the cause of the type + // coercion is the return type, point at it. (#25228) + let ret_reason = self.maybe_get_coercion_reason(then_expr.hir_id, sp); + let else_cause = self.cause(sp, ObligationCauseCode::IfExpressionWithNoElse); - coerce.coerce_forced_unit(self, &else_cause, &mut |_| (), true); + coerce.coerce_forced_unit(self, &else_cause, &mut |err| { + if let Some((sp, msg)) = &ret_reason { + err.span_label(*sp, msg.as_str()); + } else if let ExprKind::Block(block, _) = &then_expr.node { + if let Some(expr) = &block.expr { + err.span_label(expr.span, "found here".to_string()); + } + } + err.note("`if` expressions without `else` evaluate to `()`"); + err.help("consider adding an `else` block that evaluates to the expected type"); + }, ret_reason.is_none()); // If the condition is false we can't diverge. self.diverges.set(cond_diverges); @@ -3271,6 +3468,37 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } + fn maybe_get_coercion_reason(&self, hir_id: hir::HirId, sp: Span) -> Option<(Span, String)> { + let node = self.tcx.hir().get_by_hir_id(self.tcx.hir().get_parent_node_by_hir_id( + self.tcx.hir().get_parent_node_by_hir_id(hir_id), + )); + if let Node::Block(block) = node { + // check that the body's parent is an fn + let parent = self.tcx.hir().get_by_hir_id( + self.tcx.hir().get_parent_node_by_hir_id( + self.tcx.hir().get_parent_node_by_hir_id(block.hir_id), + ), + ); + if let (Some(expr), Node::Item(hir::Item { + node: hir::ItemKind::Fn(..), .. + })) = (&block.expr, parent) { + // check that the `if` expr without `else` is the fn body's expr + if expr.span == sp { + return self.get_fn_decl(hir_id).map(|(fn_decl, _)| ( + fn_decl.output.span(), + format!("expected `{}` because of this return type", fn_decl.output), + )); + } + } + } + if let Node::Local(hir::Local { + ty: Some(_), pat, .. + }) = node { + return Some((pat.span, "expected because of this assignment".to_string())); + } + None + } + // Check field access expressions fn check_field(&self, expr: &'gcx hir::Expr, @@ -3294,13 +3522,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let field_ty = self.field_ty(expr.span, field, substs); // Save the index of all fields regardless of their visibility in case // of error recovery. - self.write_field_index(expr.id, index); + self.write_field_index(expr.hir_id, index); if field.vis.is_accessible_from(def_scope, self.tcx) { - let adjustments = autoderef.adjust_steps(needs); + let adjustments = autoderef.adjust_steps(self, needs); self.apply_adjustments(base, adjustments); - autoderef.finalize(); + autoderef.finalize(self); - self.tcx.check_stability(field.did, Some(expr.id), expr.span); + self.tcx.check_stability(field.did, Some(expr.hir_id), expr.span); return field_ty; } private_candidate = Some((base_def.did, field_ty)); @@ -3311,11 +3539,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if let Ok(index) = fstr.parse::() { if fstr == index.to_string() { if let Some(field_ty) = tys.get(index) { - let adjustments = autoderef.adjust_steps(needs); + let adjustments = autoderef.adjust_steps(self, needs); self.apply_adjustments(base, adjustments); - autoderef.finalize(); + autoderef.finalize(self); - self.write_field_index(expr.id, index); + self.write_field_index(expr.hir_id, index); return field_ty; } } @@ -3324,27 +3552,47 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { _ => {} } } - autoderef.unambiguous_final_ty(); + autoderef.unambiguous_final_ty(self); if let Some((did, field_ty)) = private_candidate { - let struct_path = self.tcx().item_path_str(did); + let struct_path = self.tcx().def_path_str(did); let mut err = struct_span_err!(self.tcx().sess, expr.span, E0616, "field `{}` of struct `{}` is private", field, struct_path); // Also check if an accessible method exists, which is often what is meant. - if self.method_exists(field, expr_t, expr.id, false) { - err.note(&format!("a method `{}` also exists, perhaps you wish to call it", field)); + if self.method_exists(field, expr_t, expr.hir_id, false) + && !self.expr_in_place(expr.hir_id) + { + self.suggest_method_call( + &mut err, + &format!("a method `{}` also exists, call it with parentheses", field), + field, + expr_t, + expr.hir_id, + ); } err.emit(); field_ty } else if field.name == keywords::Invalid.name() { self.tcx().types.err - } else if self.method_exists(field, expr_t, expr.id, true) { - type_error_struct!(self.tcx().sess, field.span, expr_t, E0615, + } else if self.method_exists(field, expr_t, expr.hir_id, true) { + let mut err = type_error_struct!(self.tcx().sess, field.span, expr_t, E0615, "attempted to take value of method `{}` on type `{}`", - field, expr_t) - .help("maybe a `()` to call it is missing?") - .emit(); + field, expr_t); + + if !self.expr_in_place(expr.hir_id) { + self.suggest_method_call( + &mut err, + "use parentheses to call the method", + field, + expr_t, + expr.hir_id + ); + } else { + err.help("methods are immutable and cannot be assigned to"); + } + + err.emit(); self.tcx().types.err } else { if !expr_t.is_primitive_ty() { @@ -3355,8 +3603,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if let Some(suggested_field_name) = Self::suggest_field_name(def.non_enum_variant(), &field.as_str(), vec![]) { - err.span_label(field.span, - format!("did you mean `{}`?", suggested_field_name)); + err.span_suggestion( + field.span, + "a field with a similar name exists", + suggested_field_name.to_string(), + Applicability::MaybeIncorrect, + ); } else { err.span_label(field.span, "unknown field"); let struct_variant_def = def.non_enum_variant(); @@ -3372,7 +3624,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { len.assert_usize(self.tcx), field.as_str().parse::() ) { - let base = self.tcx.hir().node_to_pretty_string(base.id); + let base = self.tcx.sess.source_map() + .span_to_snippet(base.span) + .unwrap_or_else(|_| + self.tcx.hir().hir_to_pretty_string(base.hir_id)); let help = "instead of using tuple indexing, use array indexing"; let suggestion = format!("{}[{}]", base, field); let applicability = if len < user_index { @@ -3380,17 +3635,19 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } else { Applicability::MaybeIncorrect }; - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, help, suggestion, applicability ); } } ty::RawPtr(..) => { - let base = self.tcx.hir().node_to_pretty_string(base.id); - let msg = format!("`{}` is a native pointer; try dereferencing it", base); + let base = self.tcx.sess.source_map() + .span_to_snippet(base.span) + .unwrap_or_else(|_| self.tcx.hir().hir_to_pretty_string(base.hir_id)); + let msg = format!("`{}` is a raw pointer; try dereferencing it", base); let suggestion = format!("(*{}).{}", base, field); - err.span_suggestion_with_applicability( - field.span, + err.span_suggestion( + expr.span, &msg, suggestion, Applicability::MaybeIncorrect, @@ -3416,7 +3673,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let names = variant.fields.iter().filter_map(|field| { // ignore already set fields and private fields from non-local crates if skip.iter().any(|x| *x == field.ident.as_str()) || - (variant.did.krate != LOCAL_CRATE && field.vis != Visibility::Public) { + (!variant.def_id.is_local() && field.vis != Visibility::Public) + { None } else { Some(&field.ident.name) @@ -3428,7 +3686,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn available_field_names(&self, variant: &'tcx ty::VariantDef) -> Vec { variant.fields.iter().filter(|field| { - let def_scope = self.tcx.adjust_ident(field.ident, variant.did, self.body_id).1; + let def_scope = self.tcx.adjust_ident(field.ident, variant.def_id, self.body_id).1; field.vis.is_accessible_from(def_scope, self.tcx) }) .map(|field| field.ident.name) @@ -3446,26 +3704,31 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { display } - fn no_such_field_err(&self, span: Span, field: T, expr_t: &ty::TyS) - -> DiagnosticBuilder { + fn no_such_field_err(&self, span: Span, field: T, expr_t: &ty::TyS<'_>) + -> DiagnosticBuilder<'_> { type_error_struct!(self.tcx().sess, span, expr_t, E0609, "no field `{}` on type `{}`", field, expr_t) } - fn report_unknown_field(&self, - ty: Ty<'tcx>, - variant: &'tcx ty::VariantDef, - field: &hir::Field, - skip_fields: &[hir::Field], - kind_name: &str) { + fn report_unknown_field( + &self, + ty: Ty<'tcx>, + variant: &'tcx ty::VariantDef, + field: &hir::Field, + skip_fields: &[hir::Field], + kind_name: &str, + ) { + if variant.recovered { + return; + } let mut err = self.type_error_struct_with_diag( field.ident.span, |actual| match ty.sty { ty::Adt(adt, ..) if adt.is_enum() => { struct_span_err!(self.tcx.sess, field.ident.span, E0559, "{} `{}::{}` has no field named `{}`", - kind_name, actual, variant.name, field.ident) + kind_name, actual, variant.ident, field.ident) } _ => { struct_span_err!(self.tcx.sess, field.ident.span, E0560, @@ -3479,15 +3742,19 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if let Some(field_name) = Self::suggest_field_name(variant, &field.ident.as_str(), skip_fields.collect()) { - err.span_label(field.ident.span, - format!("field does not exist - did you mean `{}`?", field_name)); + err.span_suggestion( + field.ident.span, + "a field with a similar name exists", + field_name.to_string(), + Applicability::MaybeIncorrect, + ); } else { match ty.sty { ty::Adt(adt, ..) => { if adt.is_enum() { err.span_label(field.ident.span, format!("`{}::{}` does not have this field", - ty, variant.name)); + ty, variant.ident)); } else { err.span_label(field.ident.span, format!("`{}` does not have this field", ty)); @@ -3507,7 +3774,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn check_expr_struct_fields(&self, adt_ty: Ty<'tcx>, expected: Expectation<'tcx>, - expr_id: ast::NodeId, + expr_id: hir::HirId, span: Span, variant: &'tcx ty::VariantDef, ast_fields: &'gcx [hir::Field], @@ -3537,10 +3804,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Type-check each field. for field in ast_fields { - let ident = tcx.adjust_ident(field.ident, variant.did, self.body_id).0; + let ident = tcx.adjust_ident(field.ident, variant.def_id, self.body_id).0; let field_type = if let Some((i, v_field)) = remaining_fields.remove(&ident) { seen_fields.insert(ident, field.span); - self.write_field_index(field.id, i); + self.write_field_index(field.hir_id, i); // We don't look at stability attributes on // struct-like enums (yet...), but it's definitely not @@ -3627,14 +3894,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } pub fn check_struct_path(&self, - qpath: &hir::QPath, - node_id: ast::NodeId) + qpath: &QPath, + hir_id: hir::HirId) -> Option<(&'tcx ty::VariantDef, Ty<'tcx>)> { let path_span = match *qpath { - hir::QPath::Resolved(_, ref path) => path.span, - hir::QPath::TypeRelative(ref qself, _) => qself.span + QPath::Resolved(_, ref path) => path.span, + QPath::TypeRelative(ref qself, _) => qself.span }; - let (def, ty) = self.finish_resolving_struct_path(qpath, path_span, node_id); + let (def, ty) = self.finish_resolving_struct_path(qpath, path_span, hir_id); let variant = match def { Def::Err => { self.set_tainted_by_errors(); @@ -3645,7 +3912,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ty::Adt(adt, substs) => { Some((adt.variant_of_def(def), adt.did, substs)) } - _ => bug!("unexpected type: {:?}", ty.sty) + _ => bug!("unexpected type: {:?}", ty) } } Def::Struct(..) | Def::Union(..) | Def::TyAlias(..) | @@ -3662,8 +3929,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if let Some((variant, did, substs)) = variant { debug!("check_struct_path: did={:?} substs={:?}", did, substs); - let hir_id = self.tcx.hir().node_to_hir_id(node_id); - self.write_user_substs_from_substs(hir_id, substs, None); + self.write_user_type_annotation_from_substs(hir_id, did, substs, None); // Check bounds on type arguments used in the path. let bounds = self.instantiate_bounds(path_span, did, substs); @@ -3685,13 +3951,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn check_expr_struct(&self, expr: &hir::Expr, expected: Expectation<'tcx>, - qpath: &hir::QPath, + qpath: &QPath, fields: &'gcx [hir::Field], base_expr: &'gcx Option>) -> Ty<'tcx> { // Find the relevant variant let (variant, adt_ty) = - if let Some(variant_ty) = self.check_struct_path(qpath, expr.id) { + if let Some(variant_ty) = self.check_struct_path(qpath, expr.hir_id) { variant_ty } else { self.check_struct_fields_on_error(fields, base_expr); @@ -3699,8 +3965,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }; let path_span = match *qpath { - hir::QPath::Resolved(_, ref path) => path.span, - hir::QPath::TypeRelative(ref qself, _) => qself.span + QPath::Resolved(_, ref path) => path.span, + QPath::TypeRelative(ref qself, _) => qself.span }; // Prohibit struct expressions when non-exhaustive flag is set. @@ -3711,7 +3977,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { adt.variant_descr()); } - let error_happened = self.check_expr_struct_fields(adt_ty, expected, expr.id, path_span, + let error_happened = self.check_expr_struct_fields(adt_ty, expected, expr.hir_id, path_span, variant, fields, base_expr.is_none()); if let &Some(ref base_expr) = base_expr { // If check_expr_struct_fields hit an error, do not attempt to populate @@ -3760,7 +4026,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { expr, expected); // Warn for expressions after diverging siblings. - self.warn_if_unreachable(expr.id, expr.span, "expression"); + self.warn_if_unreachable(expr.hir_id, expr.span, "expression"); // Hide the outer diverging and has_errors flags. let old_diverges = self.diverges.get(); @@ -3772,11 +4038,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // Warn for non-block expressions with diverging children. match expr.node { - hir::ExprKind::Block(..) | - hir::ExprKind::Loop(..) | hir::ExprKind::While(..) | - hir::ExprKind::If(..) | hir::ExprKind::Match(..) => {} + ExprKind::Block(..) | + ExprKind::Loop(..) | ExprKind::While(..) | + ExprKind::If(..) | ExprKind::Match(..) => {} - _ => self.warn_if_unreachable(expr.id, expr.span, "expression") + _ => self.warn_if_unreachable(expr.hir_id, expr.span, "expression") } // Any expression that produces a value of type `!` must have diverged @@ -3793,7 +4059,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.diverges.set(self.diverges.get() | old_diverges); self.has_errors.set(self.has_errors.get() | old_has_errors); - debug!("type of {} is...", self.tcx.hir().node_to_string(expr.id)); + debug!("type of {} is...", self.tcx.hir().hir_to_string(expr.hir_id)); debug!("... {:?}, expected is {:?}", ty, expected); ty @@ -3813,9 +4079,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ); let tcx = self.tcx; - let id = expr.id; + let id = expr.hir_id; match expr.node { - hir::ExprKind::Box(ref subexpr) => { + ExprKind::Box(ref subexpr) => { let expected_inner = expected.to_option(self).map_or(NoExpectation, |ty| { match ty.sty { ty::Adt(def, _) if def.is_box() @@ -3827,16 +4093,16 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { tcx.mk_box(referent_ty) } - hir::ExprKind::Lit(ref lit) => { + ExprKind::Lit(ref lit) => { self.check_lit(&lit, expected) } - hir::ExprKind::Binary(op, ref lhs, ref rhs) => { + ExprKind::Binary(op, ref lhs, ref rhs) => { self.check_binop(expr, op, lhs, rhs) } - hir::ExprKind::AssignOp(op, ref lhs, ref rhs) => { + ExprKind::AssignOp(op, ref lhs, ref rhs) => { self.check_binop_assign(expr, op, lhs, rhs) } - hir::ExprKind::Unary(unop, ref oprnd) => { + ExprKind::Unary(unop, ref oprnd) => { let expected_inner = match unop { hir::UnNot | hir::UnNeg => { expected @@ -3904,7 +4170,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } oprnd_t } - hir::ExprKind::AddrOf(mutbl, ref oprnd) => { + ExprKind::AddrOf(mutbl, ref oprnd) => { let hint = expected.only_has_type(self).map_or(NoExpectation, |ty| { match ty.sty { ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { @@ -3944,13 +4210,19 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { tcx.mk_ref(region, tm) } } - hir::ExprKind::Path(ref qpath) => { - let (def, opt_ty, segs) = self.resolve_ty_and_def_ufcs(qpath, expr.id, expr.span); - let ty = if def != Def::Err { - self.instantiate_value_path(segs, opt_ty, def, expr.span, id).0 - } else { - self.set_tainted_by_errors(); - tcx.types.err + ExprKind::Path(ref qpath) => { + let (def, opt_ty, segs) = self.resolve_ty_and_def_ufcs(qpath, expr.hir_id, + expr.span); + let ty = match def { + Def::Err => { + self.set_tainted_by_errors(); + tcx.types.err + } + Def::Ctor(_, _, CtorKind::Fictive) => { + report_unexpected_variant_def(tcx, &def, expr.span, qpath); + tcx.types.err + } + _ => self.instantiate_value_path(segs, opt_ty, def, expr.span, id).0, }; if let ty::FnDef(..) = ty.sty { @@ -3997,13 +4269,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ty } - hir::ExprKind::InlineAsm(_, ref outputs, ref inputs) => { + ExprKind::InlineAsm(_, ref outputs, ref inputs) => { for expr in outputs.iter().chain(inputs.iter()) { self.check_expr(expr); } tcx.mk_unit() } - hir::ExprKind::Break(destination, ref expr_opt) => { + ExprKind::Break(destination, ref expr_opt) => { if let Ok(target_id) = destination.target_id { let (e_ty, cause); if let Some(ref e) = *expr_opt { @@ -4076,7 +4348,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // ... except when we try to 'break rust;'. // ICE this expression in particular (see #43162). - if let hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) = e.node { + if let ExprKind::Path(QPath::Resolved(_, ref path)) = e.node { if path.segments.len() == 1 && path.segments[0].ident.name == "rust" { fatally_break_rust(self.tcx.sess); } @@ -4087,7 +4359,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - hir::ExprKind::Continue(destination) => { + ExprKind::Continue(destination) => { if destination.target_id.is_ok() { tcx.types.never } else { @@ -4095,61 +4367,57 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { tcx.types.err } } - hir::ExprKind::Ret(ref expr_opt) => { + ExprKind::Ret(ref expr_opt) => { if self.ret_coercion.is_none() { struct_span_err!(self.tcx.sess, expr.span, E0572, "return statement outside of function body").emit(); } else if let Some(ref e) = *expr_opt { + if self.ret_coercion_span.borrow().is_none() { + *self.ret_coercion_span.borrow_mut() = Some(e.span); + } self.check_return_expr(e); } else { let mut coercion = self.ret_coercion.as_ref().unwrap().borrow_mut(); + if self.ret_coercion_span.borrow().is_none() { + *self.ret_coercion_span.borrow_mut() = Some(expr.span); + } let cause = self.cause(expr.span, ObligationCauseCode::ReturnNoExpression); - coercion.coerce_forced_unit(self, &cause, &mut |_| (), true); + if let Some((fn_decl, _)) = self.get_fn_decl(expr.hir_id) { + coercion.coerce_forced_unit( + self, + &cause, + &mut |db| { + db.span_label( + fn_decl.output.span(), + format!( + "expected `{}` because of this return type", + fn_decl.output, + ), + ); + }, + true, + ); + } else { + coercion.coerce_forced_unit(self, &cause, &mut |_| (), true); + } } tcx.types.never } - hir::ExprKind::Assign(ref lhs, ref rhs) => { - let lhs_ty = self.check_expr_with_needs(&lhs, Needs::MutPlace); - - let rhs_ty = self.check_expr_coercable_to_type(&rhs, lhs_ty); - - match expected { - ExpectIfCondition => { - self.tcx.sess.delay_span_bug(lhs.span, "invalid lhs expression in if;\ - expected error elsehwere"); - } - _ => { - // Only check this if not in an `if` condition, as the - // mistyped comparison help is more appropriate. - if !lhs.is_place_expr() { - struct_span_err!(self.tcx.sess, expr.span, E0070, - "invalid left-hand side expression") - .span_label(expr.span, "left-hand of expression not valid") - .emit(); - } - } - } - - self.require_type_is_sized(lhs_ty, lhs.span, traits::AssignmentLhsSized); - - if lhs_ty.references_error() || rhs_ty.references_error() { - tcx.types.err - } else { - tcx.mk_unit() - } + ExprKind::Assign(ref lhs, ref rhs) => { + self.check_assign(expr, expected, lhs, rhs) } - hir::ExprKind::If(ref cond, ref then_expr, ref opt_else_expr) => { + ExprKind::If(ref cond, ref then_expr, ref opt_else_expr) => { self.check_then_else(&cond, then_expr, opt_else_expr.as_ref().map(|e| &**e), expr.span, expected) } - hir::ExprKind::While(ref cond, ref body, _) => { + ExprKind::While(ref cond, ref body, _) => { let ctxt = BreakableCtxt { // cannot use break with a value from a while loop coerce: None, may_break: false, // Will get updated if/when we find a `break`. }; - let (ctxt, ()) = self.with_breakable_ctxt(expr.id, ctxt, || { + let (ctxt, ()) = self.with_breakable_ctxt(expr.hir_id, ctxt, || { self.check_expr_has_type_or_error(&cond, tcx.types.bool); let cond_diverging = self.diverges.get(); self.check_block_no_value(&body); @@ -4166,7 +4434,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.tcx.mk_unit() } - hir::ExprKind::Loop(ref body, _, source) => { + ExprKind::Loop(ref body, _, source) => { let coerce = match source { // you can only use break with a value from a normal `loop { }` hir::LoopSource::Loop => { @@ -4185,7 +4453,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { may_break: false, // Will get updated if/when we find a `break`. }; - let (ctxt, ()) = self.with_breakable_ctxt(expr.id, ctxt, || { + let (ctxt, ()) = self.with_breakable_ctxt(expr.hir_id, ctxt, || { self.check_block_no_value(&body); }); @@ -4206,22 +4474,22 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } ctxt.coerce.map(|c| c.complete(self)).unwrap_or_else(|| self.tcx.mk_unit()) } - hir::ExprKind::Match(ref discrim, ref arms, match_src) => { + ExprKind::Match(ref discrim, ref arms, match_src) => { self.check_match(expr, &discrim, arms, expected, match_src) } - hir::ExprKind::Closure(capture, ref decl, body_id, _, gen) => { + ExprKind::Closure(capture, ref decl, body_id, _, gen) => { self.check_expr_closure(expr, capture, &decl, body_id, gen, expected) } - hir::ExprKind::Block(ref body, _) => { + ExprKind::Block(ref body, _) => { self.check_block_with_expected(&body, expected) } - hir::ExprKind::Call(ref callee, ref args) => { + ExprKind::Call(ref callee, ref args) => { self.check_call(expr, &callee, args, expected) } - hir::ExprKind::MethodCall(ref segment, span, ref args) => { + ExprKind::MethodCall(ref segment, span, ref args) => { self.check_method_call(expr, segment, span, args, expected, needs) } - hir::ExprKind::Cast(ref e, ref t) => { + ExprKind::Cast(ref e, ref t) => { // Find the type of `e`. Supply hints based on the type we are casting to, // if appropriate. let t_cast = self.to_ty_saving_user_provided_ty(t); @@ -4246,12 +4514,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } } - hir::ExprKind::Type(ref e, ref t) => { + ExprKind::Type(ref e, ref t) => { let ty = self.to_ty_saving_user_provided_ty(&t); self.check_expr_eq_type(&e, ty); ty } - hir::ExprKind::Array(ref args) => { + ExprKind::Array(ref args) => { let uty = expected.to_option(self).and_then(|uty| { match uty.sty { ty::Array(ty, _) | ty::Slice(ty) => Some(ty), @@ -4275,10 +4543,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }; tcx.mk_array(element_ty, args.len() as u64) } - hir::ExprKind::Repeat(ref element, ref count) => { - let count_def_id = tcx.hir().local_def_id(count.id); + ExprKind::Repeat(ref element, ref count) => { + let count_def_id = tcx.hir().local_def_id_from_hir_id(count.hir_id); let param_env = ty::ParamEnv::empty(); - let substs = Substs::identity_for_item(tcx.global_tcx(), count_def_id); + let substs = InternalSubsts::identity_for_item(tcx.global_tcx(), count_def_id); let instance = ty::Instance::resolve( tcx.global_tcx(), param_env, @@ -4326,12 +4594,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if element_ty.references_error() { tcx.types.err } else if let Ok(count) = count { - tcx.mk_ty(ty::Array(t, count)) + tcx.mk_ty(ty::Array(t, tcx.mk_const(count))) } else { tcx.types.err } } - hir::ExprKind::Tup(ref elts) => { + ExprKind::Tup(ref elts) => { let flds = expected.only_has_type(self).and_then(|ty| { let ty = self.resolve_type_vars_with_obligations(ty); match ty.sty { @@ -4361,13 +4629,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { tuple } } - hir::ExprKind::Struct(ref qpath, ref fields, ref base_expr) => { + ExprKind::Struct(ref qpath, ref fields, ref base_expr) => { self.check_expr_struct(expr, expected, qpath, fields, base_expr) } - hir::ExprKind::Field(ref base, field) => { + ExprKind::Field(ref base, field) => { self.check_field(expr, needs, &base, field) } - hir::ExprKind::Index(ref base, ref idx) => { + ExprKind::Index(ref base, ref idx) => { let base_t = self.check_expr_with_needs(&base, needs); let idx_t = self.check_expr(&idx); @@ -4393,16 +4661,17 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let mut needs_note = true; // If the index is an integer, we can show the actual // fixed expression: - if let hir::ExprKind::Lit(ref lit) = idx.node { + if let ExprKind::Lit(ref lit) = idx.node { if let ast::LitKind::Int(i, ast::LitIntType::Unsuffixed) = lit.node { let snip = tcx.sess.source_map().span_to_snippet(base.span); if let Ok(snip) = snip { - err.span_suggestion_with_applicability( + err.span_suggestion( expr.span, "to access tuple elements, use", format!("{}.{}", snip, i), - Applicability::MachineApplicable); + Applicability::MachineApplicable, + ); needs_note = false; } } @@ -4418,7 +4687,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } } - hir::ExprKind::Yield(ref value) => { + ExprKind::Yield(ref value) => { match self.yield_ty { Some(ty) => { self.check_expr_coercable_to_type(&value, ty); @@ -4430,36 +4699,83 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } tcx.mk_unit() } + hir::ExprKind::Err => { + tcx.types.err + } + } + } + + /// Type check assignment expression `expr` of form `lhs = rhs`. + /// The expected type is `()` and is passsed to the function for the purposes of diagnostics. + fn check_assign( + &self, + expr: &'gcx hir::Expr, + expected: Expectation<'tcx>, + lhs: &'gcx hir::Expr, + rhs: &'gcx hir::Expr, + ) -> Ty<'tcx> { + let lhs_ty = self.check_expr_with_needs(&lhs, Needs::MutPlace); + let rhs_ty = self.check_expr_coercable_to_type(&rhs, lhs_ty); + + let expected_ty = expected.coercion_target_type(self, expr.span); + if expected_ty == self.tcx.types.bool { + // The expected type is `bool` but this will result in `()` so we can reasonably + // say that the user intended to write `lhs == rhs` instead of `lhs = rhs`. + // The likely cause of this is `if foo = bar { .. }`. + let actual_ty = self.tcx.mk_unit(); + let mut err = self.demand_suptype_diag(expr.span, expected_ty, actual_ty).unwrap(); + let msg = "try comparing for equality"; + let left = self.tcx.sess.source_map().span_to_snippet(lhs.span); + let right = self.tcx.sess.source_map().span_to_snippet(rhs.span); + if let (Ok(left), Ok(right)) = (left, right) { + let help = format!("{} == {}", left, right); + err.span_suggestion(expr.span, msg, help, Applicability::MaybeIncorrect); + } else { + err.help(msg); + } + err.emit(); + } else if !lhs.is_place_expr() { + struct_span_err!(self.tcx.sess, expr.span, E0070, + "invalid left-hand side expression") + .span_label(expr.span, "left-hand of expression not valid") + .emit(); + } + + self.require_type_is_sized(lhs_ty, lhs.span, traits::AssignmentLhsSized); + + if lhs_ty.references_error() || rhs_ty.references_error() { + self.tcx.types.err + } else { + self.tcx.mk_unit() } } // Finish resolving a path in a struct expression or pattern `S::A { .. }` if necessary. // The newly resolved definition is written into `type_dependent_defs`. fn finish_resolving_struct_path(&self, - qpath: &hir::QPath, + qpath: &QPath, path_span: Span, - node_id: ast::NodeId) + hir_id: hir::HirId) -> (Def, Ty<'tcx>) { match *qpath { - hir::QPath::Resolved(ref maybe_qself, ref path) => { + QPath::Resolved(ref maybe_qself, ref path) => { let self_ty = maybe_qself.as_ref().map(|qself| self.to_ty(qself)); let ty = AstConv::def_to_ty(self, self_ty, path, true); (path.def, ty) } - hir::QPath::TypeRelative(ref qself, ref segment) => { + QPath::TypeRelative(ref qself, ref segment) => { let ty = self.to_ty(qself); - let def = if let hir::TyKind::Path(hir::QPath::Resolved(_, ref path)) = qself.node { + let def = if let hir::TyKind::Path(QPath::Resolved(_, ref path)) = qself.node { path.def } else { Def::Err }; - let (ty, def) = AstConv::associated_path_def_to_ty(self, node_id, path_span, - ty, def, segment); + let (ty, def) = AstConv::associated_path_to_ty(self, hir_id, path_span, + ty, def, segment, true); // Write back the new resolution. - let hir_id = self.tcx.hir().node_to_hir_id(node_id); self.tables.borrow_mut().type_dependent_defs_mut().insert(hir_id, def); (def, ty) @@ -4467,32 +4783,32 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - // Resolve associated value path into a base type and associated constant or method definition. - // The newly resolved definition is written into `type_dependent_defs`. + /// Resolves associated value path into a base type and associated constant or method + /// definition. The newly resolved definition is written into `type_dependent_defs`. pub fn resolve_ty_and_def_ufcs<'b>(&self, - qpath: &'b hir::QPath, - node_id: ast::NodeId, + qpath: &'b QPath, + hir_id: hir::HirId, span: Span) -> (Def, Option>, &'b [hir::PathSegment]) { - let (ty, item_segment) = match *qpath { - hir::QPath::Resolved(ref opt_qself, ref path) => { + debug!("resolve_ty_and_def_ufcs: qpath={:?} hir_id={:?} span={:?}", qpath, hir_id, span); + let (ty, qself, item_segment) = match *qpath { + QPath::Resolved(ref opt_qself, ref path) => { return (path.def, opt_qself.as_ref().map(|qself| self.to_ty(qself)), &path.segments[..]); } - hir::QPath::TypeRelative(ref qself, ref segment) => { - (self.to_ty(qself), segment) + QPath::TypeRelative(ref qself, ref segment) => { + (self.to_ty(qself), qself, segment) } }; - let hir_id = self.tcx.hir().node_to_hir_id(node_id); - if let Some(cached_def) = self.tables.borrow().type_dependent_defs().get(hir_id) { + if let Some(cached_def) = self.tables.borrow().type_dependent_def(hir_id) { // Return directly on cache hit. This is useful to avoid doubly reporting // errors with default match binding modes. See #44614. - return (*cached_def, Some(ty), slice::from_ref(&**item_segment)) + return (cached_def, Some(ty), slice::from_ref(&**item_segment)) } let item_name = item_segment.ident; - let def = match self.resolve_ufcs(span, item_name, ty, node_id) { + let def = match self.resolve_ufcs(span, item_name, ty, hir_id) { Ok(def) => def, Err(error) => { let def = match error { @@ -4500,7 +4816,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { _ => Def::Err, }; if item_name.name != keywords::Invalid.name() { - self.report_method_error(span, ty, item_name, None, error, None); + self.report_method_error(span, + ty, + item_name, + SelfSource::QPath(qself), + error, + None); } def } @@ -4515,13 +4836,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { local: &'gcx hir::Local, init: &'gcx hir::Expr) -> Ty<'tcx> { - // FIXME(tschottdorf): contains_explicit_ref_binding() must be removed + // FIXME(tschottdorf): `contains_explicit_ref_binding()` must be removed // for #42640 (default match binding modes). // // See #44848. let ref_bindings = local.pat.contains_explicit_ref_binding(); - let local_ty = self.local_ty(init.span, local.id).revealed_ty; + let local_ty = self.local_ty(init.span, local.hir_id).revealed_ty; if let Some(m) = ref_bindings { // Somewhat subtle: if we have a `ref` binding in the pattern, // we want to avoid introducing coercions for the RHS. This is @@ -4540,7 +4861,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } pub fn check_decl_local(&self, local: &'gcx hir::Local) { - let t = self.local_ty(local.span, local.id).decl_ty; + let t = self.local_ty(local.span, local.hir_id).decl_ty; self.write_ty(local.hir_id, t); if let Some(ref init) = local.init { @@ -4550,9 +4871,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - self.check_pat_walk(&local.pat, t, - ty::BindingMode::BindByValue(hir::Mutability::MutImmutable), - true); + self.check_pat_walk( + &local.pat, + t, + ty::BindingMode::BindByValue(hir::Mutability::MutImmutable), + None, + ); let pat_ty = self.node_ty(local.pat.hir_id); if pat_ty.references_error() { self.write_ty(local.hir_id, pat_ty); @@ -4560,43 +4884,36 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } pub fn check_stmt(&self, stmt: &'gcx hir::Stmt) { - // Don't do all the complex logic below for DeclItem. + // Don't do all the complex logic below for `DeclItem`. match stmt.node { - hir::StmtKind::Decl(ref decl, _) => { - if let hir::DeclKind::Item(_) = decl.node { - return - } - } - hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {} + hir::StmtKind::Item(..) => return, + hir::StmtKind::Local(..) | hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {} } - self.warn_if_unreachable(stmt.node.id(), stmt.span, "statement"); + self.warn_if_unreachable(stmt.hir_id, stmt.span, "statement"); - // Hide the outer diverging and has_errors flags. + // Hide the outer diverging and `has_errors` flags. let old_diverges = self.diverges.get(); let old_has_errors = self.has_errors.get(); self.diverges.set(Diverges::Maybe); self.has_errors.set(false); match stmt.node { - hir::StmtKind::Decl(ref decl, _) => { - match decl.node { - hir::DeclKind::Local(ref l) => { - self.check_decl_local(&l); - } - hir::DeclKind::Item(_) => {/* ignore for now */} - } + hir::StmtKind::Local(ref l) => { + self.check_decl_local(&l); } - hir::StmtKind::Expr(ref expr, _) => { - // Check with expected type of () + // Ignore for now. + hir::StmtKind::Item(_) => {} + hir::StmtKind::Expr(ref expr) => { + // Check with expected type of `()`. self.check_expr_has_type_or_error(&expr, self.tcx.mk_unit()); } - hir::StmtKind::Semi(ref expr, _) => { + hir::StmtKind::Semi(ref expr) => { self.check_expr(&expr); } } - // Combine the diverging and has_error flags. + // Combine the diverging and `has_error` flags. self.diverges.set(self.diverges.get() | old_diverges); self.has_errors.set(self.has_errors.get() | old_has_errors); } @@ -4655,7 +4972,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { may_break: false, }; - let (ctxt, ()) = self.with_breakable_ctxt(blk.id, ctxt, || { + let (ctxt, ()) = self.with_breakable_ctxt(blk.hir_id, ctxt, || { for s in &blk.stmts { self.check_stmt(s); } @@ -4665,12 +4982,12 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let tail_expr_ty = tail_expr.map(|t| self.check_expr_with_expectation(t, expected)); let mut enclosing_breakables = self.enclosing_breakables.borrow_mut(); - let ctxt = enclosing_breakables.find_breakable(blk.id); + let ctxt = enclosing_breakables.find_breakable(blk.hir_id); let coerce = ctxt.coerce.as_mut().unwrap(); if let Some(tail_expr_ty) = tail_expr_ty { let tail_expr = tail_expr.unwrap(); let cause = self.cause(tail_expr.span, - ObligationCauseCode::BlockTailExpression(blk.id)); + ObligationCauseCode::BlockTailExpression(blk.hir_id)); coerce.coerce(self, &cause, tail_expr, @@ -4686,17 +5003,32 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // // #41425 -- label the implicit `()` as being the // "found type" here, rather than the "expected type". - // - // #44579 -- if the block was recovered during parsing, - // the type would be nonsensical and it is not worth it - // to perform the type check, so we avoid generating the - // diagnostic output. - if !self.diverges.get().always() && !blk.recovered { - coerce.coerce_forced_unit(self, &self.misc(blk.span), &mut |err| { + if !self.diverges.get().always() { + // #50009 -- Do not point at the entire fn block span, point at the return type + // span, as it is the cause of the requirement, and + // `consider_hint_about_removing_semicolon` will point at the last expression + // if it were a relevant part of the error. This improves usability in editors + // that highlight errors inline. + let mut sp = blk.span; + let mut fn_span = None; + if let Some((decl, ident)) = self.get_parent_fn_decl(blk.hir_id) { + let ret_sp = decl.output.span(); + if let Some(block_sp) = self.parent_item_span(blk.hir_id) { + // HACK: on some cases (`ui/liveness/liveness-issue-2163.rs`) the + // output would otherwise be incorrect and even misleading. Make sure + // the span we're aiming at correspond to a `fn` body. + if block_sp == blk.span { + sp = ret_sp; + fn_span = Some(ident.span); + } + } + } + coerce.coerce_forced_unit(self, &self.misc(sp), &mut |err| { if let Some(expected_ty) = expected.only_has_type(self) { - self.consider_hint_about_removing_semicolon(blk, - expected_ty, - err); + self.consider_hint_about_removing_semicolon(blk, expected_ty, err); + } + if let Some(fn_span) = fn_span { + err.span_label(fn_span, "this function's body doesn't return"); } }, false); } @@ -4721,64 +5053,89 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ty } - /// Given a `NodeId`, return the `FnDecl` of the method it is enclosed by and whether a - /// suggestion can be made, `None` otherwise. - pub fn get_fn_decl(&self, blk_id: ast::NodeId) -> Option<(hir::FnDecl, bool)> { - // Get enclosing Fn, if it is a function or a trait method, unless there's a `loop` or - // `while` before reaching it, as block tail returns are not available in them. - if let Some(fn_id) = self.tcx.hir().get_return_block(blk_id) { - let parent = self.tcx.hir().get(fn_id); - - if let Node::Item(&hir::Item { - name, node: hir::ItemKind::Fn(ref decl, ..), .. - }) = parent { - decl.clone().and_then(|decl| { - // This is less than ideal, it will not suggest a return type span on any - // method called `main`, regardless of whether it is actually the entry point, - // but it will still present it as the reason for the expected type. - Some((decl, name != Symbol::intern("main"))) - }) - } else if let Node::TraitItem(&hir::TraitItem { - node: hir::TraitItemKind::Method(hir::MethodSig { + fn parent_item_span(&self, id: hir::HirId) -> Option { + let node = self.tcx.hir().get_by_hir_id(self.tcx.hir().get_parent_item(id)); + match node { + Node::Item(&hir::Item { + node: hir::ItemKind::Fn(_, _, _, body_id), .. + }) | + Node::ImplItem(&hir::ImplItem { + node: hir::ImplItemKind::Method(_, body_id), .. + }) => { + let body = self.tcx.hir().body(body_id); + if let ExprKind::Block(block, _) = &body.value.node { + return Some(block.span); + } + } + _ => {} + } + None + } + + /// Given a function block's `HirId`, returns its `FnDecl` if it exists, or `None` otherwise. + fn get_parent_fn_decl(&self, blk_id: hir::HirId) -> Option<(hir::FnDecl, ast::Ident)> { + let parent = self.tcx.hir().get_by_hir_id(self.tcx.hir().get_parent_item(blk_id)); + self.get_node_fn_decl(parent).map(|(fn_decl, ident, _)| (fn_decl, ident)) + } + + /// Given a function `Node`, return its `FnDecl` if it exists, or `None` otherwise. + fn get_node_fn_decl(&self, node: Node<'_>) -> Option<(hir::FnDecl, ast::Ident, bool)> { + match node { + Node::Item(&hir::Item { + ident, node: hir::ItemKind::Fn(ref decl, ..), .. + }) => decl.clone().and_then(|decl| { + // This is less than ideal, it will not suggest a return type span on any + // method called `main`, regardless of whether it is actually the entry point, + // but it will still present it as the reason for the expected type. + Some((decl, ident, ident.name != Symbol::intern("main"))) + }), + Node::TraitItem(&hir::TraitItem { + ident, node: hir::TraitItemKind::Method(hir::MethodSig { ref decl, .. }, ..), .. - }) = parent { - decl.clone().and_then(|decl| { - Some((decl, true)) - }) - } else if let Node::ImplItem(&hir::ImplItem { - node: hir::ImplItemKind::Method(hir::MethodSig { + }) => decl.clone().and_then(|decl| Some((decl, ident, true))), + Node::ImplItem(&hir::ImplItem { + ident, node: hir::ImplItemKind::Method(hir::MethodSig { ref decl, .. }, ..), .. - }) = parent { - decl.clone().and_then(|decl| { - Some((decl, false)) - }) - } else { - None - } - } else { - None + }) => decl.clone().and_then(|decl| Some((decl, ident, false))), + _ => None, } } - /// On implicit return expressions with mismatched types, provide the following suggestions: + /// Given a `HirId`, return the `FnDecl` of the method it is enclosed by and whether a + /// suggestion can be made, `None` otherwise. + pub fn get_fn_decl(&self, blk_id: hir::HirId) -> Option<(hir::FnDecl, bool)> { + // Get enclosing Fn, if it is a function or a trait method, unless there's a `loop` or + // `while` before reaching it, as block tail returns are not available in them. + self.tcx.hir().get_return_block(blk_id).and_then(|blk_id| { + let parent = self.tcx.hir().get_by_hir_id(blk_id); + self.get_node_fn_decl(parent).map(|(fn_decl, _, is_main)| (fn_decl, is_main)) + }) + } + + /// On implicit return expressions with mismatched types, provides the following suggestions: /// - /// - Point out the method's return type as the reason for the expected type - /// - Possible missing semicolon - /// - Possible missing return type if the return type is the default, and not `fn main()` - pub fn suggest_mismatched_types_on_tail(&self, - err: &mut DiagnosticBuilder<'tcx>, - expression: &'gcx hir::Expr, - expected: Ty<'tcx>, - found: Ty<'tcx>, - cause_span: Span, - blk_id: ast::NodeId) { + /// - Points out the method's return type as the reason for the expected type. + /// - Possible missing semicolon. + /// - Possible missing return type if the return type is the default, and not `fn main()`. + pub fn suggest_mismatched_types_on_tail( + &self, + err: &mut DiagnosticBuilder<'tcx>, + expression: &'gcx hir::Expr, + expected: Ty<'tcx>, + found: Ty<'tcx>, + cause_span: Span, + blk_id: hir::HirId, + ) -> bool { self.suggest_missing_semicolon(err, expression, expected, cause_span); + let mut pointing_at_return_type = false; if let Some((fn_decl, can_suggest)) = self.get_fn_decl(blk_id) { - self.suggest_missing_return_type(err, &fn_decl, expected, found, can_suggest); + pointing_at_return_type = self.suggest_missing_return_type( + err, &fn_decl, expected, found, can_suggest); } self.suggest_ref_or_into(err, expression, expected, found); + pointing_at_return_type } pub fn suggest_ref_or_into( @@ -4789,7 +5146,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { found: Ty<'tcx>, ) { if let Some((sp, msg, suggestion)) = self.check_ref(expr, found, expected) { - err.span_suggestion_with_applicability( + err.span_suggestion( sp, msg, suggestion, @@ -4816,7 +5173,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } }).peekable(); if suggestions.peek().is_some() { - err.span_suggestions_with_applicability( + err.span_suggestions( expr.span, "try using a conversion method", suggestions, @@ -4827,7 +5184,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - /// A common error is to forget to add a semicolon at the end of a block: + /// A common error is to forget to add a semicolon at the end of a block, e.g., /// /// ``` /// fn foo() { @@ -4847,15 +5204,15 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // `BlockTailExpression` only relevant if the tail expr would be // useful on its own. match expression.node { - hir::ExprKind::Call(..) | - hir::ExprKind::MethodCall(..) | - hir::ExprKind::If(..) | - hir::ExprKind::While(..) | - hir::ExprKind::Loop(..) | - hir::ExprKind::Match(..) | - hir::ExprKind::Block(..) => { + ExprKind::Call(..) | + ExprKind::MethodCall(..) | + ExprKind::If(..) | + ExprKind::While(..) | + ExprKind::Loop(..) | + ExprKind::Match(..) | + ExprKind::Block(..) => { let sp = self.tcx.sess.source_map().next_point(cause_span); - err.span_suggestion_with_applicability( + err.span_suggestion( sp, "try adding a semicolon", ";".to_string(), @@ -4877,48 +5234,54 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// This routine checks if the return type is left as default, the method is not part of an /// `impl` block and that it isn't the `main` method. If so, it suggests setting the return /// type. - fn suggest_missing_return_type(&self, - err: &mut DiagnosticBuilder<'tcx>, - fn_decl: &hir::FnDecl, - expected: Ty<'tcx>, - found: Ty<'tcx>, - can_suggest: bool) { + fn suggest_missing_return_type( + &self, + err: &mut DiagnosticBuilder<'tcx>, + fn_decl: &hir::FnDecl, + expected: Ty<'tcx>, + found: Ty<'tcx>, + can_suggest: bool, + ) -> bool { // Only suggest changing the return type for methods that // haven't set a return type at all (and aren't `fn main()` or an impl). match (&fn_decl.output, found.is_suggestable(), can_suggest, expected.is_unit()) { (&hir::FunctionRetTy::DefaultReturn(span), true, true, true) => { - err.span_suggestion_with_applicability( + err.span_suggestion( span, "try adding a return type", format!("-> {} ", self.resolve_type_vars_with_obligations(found)), Applicability::MachineApplicable); + true } (&hir::FunctionRetTy::DefaultReturn(span), false, true, true) => { err.span_label(span, "possibly return type missing here?"); + true } (&hir::FunctionRetTy::DefaultReturn(span), _, false, true) => { // `fn main()` must return `()`, do not suggest changing return type err.span_label(span, "expected `()` because of default return type"); + true } // expectation was caused by something else, not the default return - (&hir::FunctionRetTy::DefaultReturn(_), _, _, false) => {} + (&hir::FunctionRetTy::DefaultReturn(_), _, _, false) => false, (&hir::FunctionRetTy::Return(ref ty), _, _, _) => { // Only point to return type if the expected type is the return type, as if they // are not, the expectation must have been caused by something else. debug!("suggest_missing_return_type: return type {:?} node {:?}", ty, ty.node); let sp = ty.span; let ty = AstConv::ast_ty_to_ty(self, ty); - debug!("suggest_missing_return_type: return type sty {:?}", ty.sty); - debug!("suggest_missing_return_type: expected type sty {:?}", ty.sty); + debug!("suggest_missing_return_type: return type {:?}", ty); + debug!("suggest_missing_return_type: expected type {:?}", ty); if ty.sty == expected.sty { err.span_label(sp, format!("expected `{}` because of return type", expected)); + return true; } + false } } } - /// A common error is to add an extra semicolon: /// /// ``` @@ -4930,126 +5293,88 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// This routine checks if the final statement in a block is an /// expression with an explicit semicolon whose type is compatible /// with `expected_ty`. If so, it suggests removing the semicolon. - fn consider_hint_about_removing_semicolon(&self, - blk: &'gcx hir::Block, - expected_ty: Ty<'tcx>, - err: &mut DiagnosticBuilder) { + fn consider_hint_about_removing_semicolon( + &self, + blk: &'gcx hir::Block, + expected_ty: Ty<'tcx>, + err: &mut DiagnosticBuilder<'_>, + ) { + if let Some(span_semi) = self.could_remove_semicolon(blk, expected_ty) { + err.span_suggestion( + span_semi, + "consider removing this semicolon", + String::new(), + Applicability::MachineApplicable, + ); + } + } + + fn could_remove_semicolon( + &self, + blk: &'gcx hir::Block, + expected_ty: Ty<'tcx>, + ) -> Option { // Be helpful when the user wrote `{... expr;}` and // taking the `;` off is enough to fix the error. - let last_stmt = match blk.stmts.last() { - Some(s) => s, - None => return, - }; + let last_stmt = blk.stmts.last()?; let last_expr = match last_stmt.node { - hir::StmtKind::Semi(ref e, _) => e, - _ => return, + hir::StmtKind::Semi(ref e) => e, + _ => return None, }; let last_expr_ty = self.node_ty(last_expr.hir_id); if self.can_sub(self.param_env, last_expr_ty, expected_ty).is_err() { - return; + return None; } let original_span = original_sp(last_stmt.span, blk.span); - let span_semi = original_span.with_lo(original_span.hi() - BytePos(1)); - err.span_suggestion_with_applicability( - span_semi, - "consider removing this semicolon", - String::new(), - Applicability::MachineApplicable); - } - - fn def_ids_for_path_segments(&self, - segments: &[hir::PathSegment], - def: Def) - -> Vec { - // We need to extract the type parameters supplied by the user in - // the path `path`. Due to the current setup, this is a bit of a - // tricky-process; the problem is that resolve only tells us the - // end-point of the path resolution, and not the intermediate steps. - // Luckily, we can (at least for now) deduce the intermediate steps - // just from the end-point. - // - // There are basically four cases to consider: - // - // 1. Reference to a constructor of enum variant or struct: - // - // struct Foo(...) - // enum E { Foo(...) } - // - // In these cases, the parameters are declared in the type - // space. - // - // 2. Reference to a fn item or a free constant: - // - // fn foo() { } - // - // In this case, the path will again always have the form - // `a::b::foo::` where only the final segment should have - // type parameters. However, in this case, those parameters are - // declared on a value, and hence are in the `FnSpace`. - // - // 3. Reference to a method or an associated constant: - // - // impl SomeStruct { - // fn foo(...) - // } - // - // Here we can have a path like - // `a::b::SomeStruct::::foo::`, in which case parameters - // may appear in two places. The penultimate segment, - // `SomeStruct::`, contains parameters in TypeSpace, and the - // final segment, `foo::` contains parameters in fn space. - // - // 4. Reference to a local variable - // - // Local variables can't have any type parameters. - // - // The first step then is to categorize the segments appropriately. - - assert!(!segments.is_empty()); - let last = segments.len() - 1; - - let mut path_segs = vec![]; - - match def { - // Case 1. Reference to a struct/variant constructor. - Def::StructCtor(def_id, ..) | - Def::VariantCtor(def_id, ..) | - Def::SelfCtor(.., def_id) => { - // Everything but the final segment should have no - // parameters at all. - let generics = self.tcx.generics_of(def_id); - // Variant and struct constructors use the - // generics of their parent type definition. - let generics_def_id = generics.parent.unwrap_or(def_id); - path_segs.push(PathSeg(generics_def_id, last)); - } + Some(original_span.with_lo(original_span.hi() - BytePos(1))) + } - // Case 2. Reference to a top-level value. - Def::Fn(def_id) | - Def::Const(def_id) | - Def::Static(def_id, _) => { - path_segs.push(PathSeg(def_id, last)); - } + // Rewrite `SelfCtor` to `Ctor` + pub fn rewrite_self_ctor(&self, def: Def, span: Span) -> (Def, DefId, Ty<'tcx>) { + let tcx = self.tcx; + if let Def::SelfCtor(impl_def_id) = def { + let ty = self.impl_self_ty(span, impl_def_id).ty; + let adt_def = ty.ty_adt_def(); + + match adt_def { + Some(adt_def) if adt_def.has_ctor() => { + let variant = adt_def.non_enum_variant(); + let ctor_def_id = variant.ctor_def_id.unwrap(); + let def = Def::Ctor(ctor_def_id, CtorOf::Struct, variant.ctor_kind); + (def, ctor_def_id, tcx.type_of(ctor_def_id)) + } + _ => { + let mut err = tcx.sess.struct_span_err(span, + "the `Self` constructor can only be used with tuple or unit structs"); + if let Some(adt_def) = adt_def { + match adt_def.adt_kind() { + AdtKind::Enum => { + err.help("did you mean to use one of the enum's variants?"); + }, + AdtKind::Struct | + AdtKind::Union => { + err.span_suggestion( + span, + "use curly brackets", + String::from("Self { /* fields */ }"), + Applicability::HasPlaceholders, + ); + } + } + } + err.emit(); - // Case 3. Reference to a method or associated const. - Def::Method(def_id) | - Def::AssociatedConst(def_id) => { - if segments.len() >= 2 { - let generics = self.tcx.generics_of(def_id); - path_segs.push(PathSeg(generics.parent.unwrap(), last - 1)); + (def, impl_def_id, tcx.types.err) } - path_segs.push(PathSeg(def_id, last)); } + } else { + let def_id = def.def_id(); - // Case 4. Local variable, no generics. - Def::Local(..) | Def::Upvar(..) => {} - - _ => bug!("unexpected definition: {:?}", def), + // The things we are substituting into the type should not contain + // escaping late-bound regions, and nor should the base type scheme. + let ty = tcx.type_of(def_id); + (def, def_id, ty) } - - debug!("path_segs = {:?}", path_segs); - - path_segs } // Instantiates the given path, which must refer to an item with the given @@ -5059,26 +5384,52 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self_ty: Option>, def: Def, span: Span, - node_id: ast::NodeId) + hir_id: hir::HirId) -> (Ty<'tcx>, Def) { debug!( - "instantiate_value_path(segments={:?}, self_ty={:?}, def={:?}, node_id={})", + "instantiate_value_path(segments={:?}, self_ty={:?}, def={:?}, hir_id={})", segments, self_ty, def, - node_id, + hir_id, ); - let path_segs = self.def_ids_for_path_segments(segments, def); + let tcx = self.tcx; + + match def { + Def::Local(nid) | Def::Upvar(nid, ..) => { + let hid = self.tcx.hir().node_to_hir_id(nid); + let ty = self.local_ty(span, hid).decl_ty; + let ty = self.normalize_associated_types_in(span, &ty); + self.write_ty(hir_id, ty); + return (ty, def); + } + _ => {} + } + + let (def, def_id, ty) = self.rewrite_self_ctor(def, span); + let path_segs = AstConv::def_ids_for_path_segments(self, segments, self_ty, def); let mut user_self_ty = None; + let mut is_alias_variant_ctor = false; match def { + Def::Ctor(_, CtorOf::Variant, _) => { + if let Some(self_ty) = self_ty { + let adt_def = self_ty.ty_adt_def().unwrap(); + user_self_ty = Some(UserSelfTy { + impl_def_id: adt_def.did, + self_ty, + }); + is_alias_variant_ctor = true; + } + } Def::Method(def_id) | Def::AssociatedConst(def_id) => { - let container = self.tcx.associated_item(def_id).container; + let container = tcx.associated_item(def_id).container; + debug!("instantiate_value_path: def={:?} container={:?}", def, container); match container { ty::TraitContainer(trait_did) => { - callee::check_legal_trait_for_method_call(self.tcx, span, trait_did) + callee::check_legal_trait_for_method_call(tcx, span, trait_did) } ty::ImplContainer(impl_def_id) => { if segments.len() == 1 { @@ -5104,24 +5455,18 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // provided (if any) into their appropriate spaces. We'll also report // errors if type parameters are provided in an inappropriate place. - let generic_segs = path_segs.iter().map(|PathSeg(_, index)| index) - .collect::>(); - AstConv::prohibit_generics(self, segments.iter().enumerate().filter_map(|(index, seg)| { - if !generic_segs.contains(&index) { + let generic_segs: FxHashSet<_> = path_segs.iter().map(|PathSeg(_, index)| index).collect(); + let generics_has_err = AstConv::prohibit_generics( + self, segments.iter().enumerate().filter_map(|(index, seg)| { + if !generic_segs.contains(&index) || is_alias_variant_ctor { Some(seg) } else { None } })); - - match def { - Def::Local(nid) | Def::Upvar(nid, ..) => { - let ty = self.local_ty(span, nid).decl_ty; - let ty = self.normalize_associated_types_in(span, &ty); - self.write_ty(self.tcx.hir().node_to_hir_id(node_id), ty); - return (ty, def); - } - _ => {} + if generics_has_err { + // Don't try to infer type parameters when prohibited generic arguments were given. + user_self_ty = None; } // Now we have to compare the types that the user *actually* @@ -5134,13 +5479,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let mut infer_args_for_err = FxHashSet::default(); for &PathSeg(def_id, index) in &path_segs { let seg = &segments[index]; - let generics = self.tcx.generics_of(def_id); + let generics = tcx.generics_of(def_id); // Argument-position `impl Trait` is treated as a normal generic // parameter internally, but we don't allow users to specify the // parameter's value explicitly, so we have to do some error- // checking here. let suppress_errors = AstConv::check_generic_arg_count_for_call( - self.tcx, + tcx, span, &generics, &seg, @@ -5153,56 +5498,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } let has_self = path_segs.last().map(|PathSeg(def_id, _)| { - self.tcx.generics_of(*def_id).has_self + tcx.generics_of(*def_id).has_self }).unwrap_or(false); - let mut new_def = def; - let (def_id, ty) = match def { - Def::SelfCtor(impl_def_id) => { - let ty = self.impl_self_ty(span, impl_def_id).ty; - let adt_def = ty.ty_adt_def(); - - match adt_def { - Some(adt_def) if adt_def.has_ctor() => { - let variant = adt_def.non_enum_variant(); - new_def = Def::StructCtor(variant.did, variant.ctor_kind); - (variant.did, self.tcx.type_of(variant.did)) - } - _ => { - let mut err = self.tcx.sess.struct_span_err(span, - "the `Self` constructor can only be used with tuple or unit structs"); - if let Some(adt_def) = adt_def { - match adt_def.adt_kind() { - AdtKind::Enum => { - err.note("did you mean to use one of the enum's variants?"); - }, - AdtKind::Struct | - AdtKind::Union => { - err.span_label( - span, - format!("did you mean `Self {{ /* fields */ }}`?"), - ); - } - } - } - err.emit(); - - (impl_def_id, self.tcx.types.err) - } - } - } - _ => { - let def_id = def.def_id(); - - // The things we are substituting into the type should not contain - // escaping late-bound regions, and nor should the base type scheme. - let ty = self.tcx.type_of(def_id); - (def_id, ty) - } - }; - let substs = AstConv::create_substs_for_generic_args( - self.tcx, + tcx, def_id, &[][..], has_self, @@ -5234,6 +5534,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => { self.to_ty(ty).into() } + (GenericParamDefKind::Const, GenericArg::Const(ct)) => { + self.to_const(&ct.value, self.tcx.type_of(param.def_id)).into() + } _ => unreachable!(), } }, @@ -5248,10 +5551,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // If we have a default, then we it doesn't matter that we're not // inferring the type arguments: we provide the default where any // is missing. - let default = self.tcx.type_of(param.def_id); + let default = tcx.type_of(param.def_id); self.normalize_ty( span, - default.subst_spanned(self.tcx, substs.unwrap(), Some(span)) + default.subst_spanned(tcx, substs.unwrap(), Some(span)) ).into() } else { // If no type arguments were provided, we have to infer them. @@ -5261,6 +5564,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.var_for_def(span, param) } } + GenericParamDefKind::Const => { + // FIXME(const_generics:defaults) + // No const parameters were provided, we have to infer them. + self.var_for_def(span, param) + } } }, ); @@ -5268,8 +5576,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { assert!(!ty.has_escaping_bound_vars()); // First, store the "user substs" for later. - let hir_id = self.tcx.hir().node_to_hir_id(node_id); - self.write_user_substs_from_substs(hir_id, substs, user_self_ty); + self.write_user_type_annotation_from_substs(hir_id, def_id, substs, user_self_ty); // Add all the obligations that are required, substituting and // normalized appropriately. @@ -5284,10 +5591,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty { // In the case of `Foo::method` and `>::method`, if `method` - // is inherent, there is no `Self` parameter, instead, the impl needs + // is inherent, there is no `Self` parameter; instead, the impl needs // type parameters, which we can infer by unifying the provided `Self` // with the substituted impl type. - let ty = self.tcx.type_of(impl_def_id); + // This also occurs for an enum variant on a type alias. + let ty = tcx.type_of(impl_def_id); let impl_ty = self.instantiate_type_scheme(span, &substs, &ty); match self.at(&self.misc(span), self.param_env).sup(impl_ty, self_ty) { @@ -5301,19 +5609,19 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - self.check_rustc_args_require_const(def_id, node_id, span); + self.check_rustc_args_require_const(def_id, hir_id, span); debug!("instantiate_value_path: type of {:?} is {:?}", - node_id, + hir_id, ty_substituted); self.write_substs(hir_id, substs); - (ty_substituted, new_def) + (ty_substituted, def) } fn check_rustc_args_require_const(&self, def_id: DefId, - node_id: ast::NodeId, + hir_id: hir::HirId, span: Span) { // We're only interested in functions tagged with // #[rustc_args_required_const], so ignore anything that's not. @@ -5323,9 +5631,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // If our calling expression is indeed the function itself, we're good! // If not, generate an error that this can only be called directly. - if let Node::Expr(expr) = self.tcx.hir().get(self.tcx.hir().get_parent_node(node_id)) { - if let hir::ExprKind::Call(ref callee, ..) = expr.node { - if callee.id == node_id { + if let Node::Expr(expr) = self.tcx.hir().get_by_hir_id( + self.tcx.hir().get_parent_node_by_hir_id(hir_id)) + { + if let ExprKind::Call(ref callee, ..) = expr.node { + if callee.hir_id == hir_id { return } } @@ -5353,7 +5663,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } } - fn with_breakable_ctxt R, R>(&self, id: ast::NodeId, + fn with_breakable_ctxt R, R>(&self, id: hir::HirId, ctxt: BreakableCtxt<'gcx, 'tcx>, f: F) -> (BreakableCtxt<'gcx, 'tcx>, R) { let index; @@ -5372,17 +5682,63 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }; (ctxt, result) } + + /// Instantiate a QueryResponse in a probe context, without a + /// good ObligationCause. + fn probe_instantiate_query_response( + &self, + span: Span, + original_values: &OriginalQueryValues<'tcx>, + query_result: &Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>, + ) -> InferResult<'tcx, Ty<'tcx>> + { + self.instantiate_query_response_and_region_obligations( + &traits::ObligationCause::misc(span, self.body_id), + self.param_env, + original_values, + query_result) + } + + /// Returns `true` if an expression is contained inside the LHS of an assignment expression. + fn expr_in_place(&self, mut expr_id: hir::HirId) -> bool { + let mut contained_in_place = false; + + while let hir::Node::Expr(parent_expr) = + self.tcx.hir().get_by_hir_id(self.tcx.hir().get_parent_node_by_hir_id(expr_id)) + { + match &parent_expr.node { + hir::ExprKind::Assign(lhs, ..) | hir::ExprKind::AssignOp(_, lhs, ..) => { + if lhs.hir_id == expr_id { + contained_in_place = true; + break; + } + } + _ => (), + } + expr_id = parent_expr.hir_id; + } + + contained_in_place + } } pub fn check_bounds_are_used<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, generics: &ty::Generics, ty: Ty<'tcx>) { let own_counts = generics.own_counts(); - debug!("check_bounds_are_used(n_tps={}, ty={:?})", own_counts.types, ty); + debug!( + "check_bounds_are_used(n_tys={}, n_cts={}, ty={:?})", + own_counts.types, + own_counts.consts, + ty + ); + + // FIXME(const_generics): we probably want to check the bounds for const parameters too. if own_counts.types == 0 { return; } + // Make a vector of booleans initially false, set to true when used. let mut types_used = vec![false; own_counts.types]; @@ -5404,8 +5760,8 @@ pub fn check_bounds_are_used<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }); for (&used, param) in types_used.iter().zip(types) { if !used { - let id = tcx.hir().as_local_node_id(param.def_id).unwrap(); - let span = tcx.hir().span(id); + let id = tcx.hir().as_local_hir_id(param.def_id).unwrap(); + let span = tcx.hir().span_by_hir_id(id); struct_span_err!(tcx.sess, span, E0091, "type parameter `{}` is unused", param.name) .span_label(span, "unused type parameter") .emit(); @@ -5426,7 +5782,7 @@ fn fatally_break_rust(sess: &Session) { ); handler.note_without_error(&format!("rustc {} running on {}", option_env!("CFG_VERSION").unwrap_or("unknown_version"), - ::session::config::host_triple(), + crate::session::config::host_triple(), )); } diff --git a/src/librustc_typeck/check/op.rs b/src/librustc_typeck/check/op.rs index c40789ce8bae0..d6932094dddb6 100644 --- a/src/librustc_typeck/check/op.rs +++ b/src/librustc_typeck/check/op.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Code related to processing overloaded binary and unary operators. use super::{FnCtxt, Needs}; @@ -22,7 +12,7 @@ use syntax::ast::Ident; use rustc::hir; impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { - /// Check a `a = b` + /// Checks a `a = b` pub fn check_binop_assign(&self, expr: &'gcx hir::Expr, op: hir::BinOp, @@ -52,7 +42,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { ty } - /// Check a potentially overloaded binary operator. + /// Checks a potentially overloaded binary operator. pub fn check_binop(&self, expr: &'gcx hir::Expr, op: hir::BinOp, @@ -61,8 +51,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { { let tcx = self.tcx; - debug!("check_binop(expr.id={}, expr={:?}, op={:?}, lhs_expr={:?}, rhs_expr={:?})", - expr.id, + debug!("check_binop(expr.hir_id={}, expr={:?}, op={:?}, lhs_expr={:?}, rhs_expr={:?})", + expr.hir_id, expr, op, lhs_expr, @@ -160,8 +150,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { is_assign: IsAssign) -> (Ty<'tcx>, Ty<'tcx>, Ty<'tcx>) { - debug!("check_overloaded_binop(expr.id={}, op={:?}, is_assign={:?})", - expr.id, + debug!("check_overloaded_binop(expr.hir_id={}, op={:?}, is_assign={:?})", + expr.hir_id, op, is_assign); @@ -272,9 +262,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let mut suggested_deref = false; if let Ref(_, mut rty, _) = lhs_ty.sty { if { - !self.infcx.type_moves_by_default(self.param_env, - rty, - lhs_expr.span) && + self.infcx.type_is_copy_modulo_regions(self.param_env, + rty, + lhs_expr.span) && self.lookup_op_method(rty, &[rhs_ty], Op::Binary(op, is_assign)) @@ -290,7 +280,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { rty, lstring, ); - err.span_suggestion_with_applicability( + err.span_suggestion( lhs_expr.span, msg, format!("*{}", lstring), @@ -316,7 +306,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if let Some(missing_trait) = missing_trait { if op.node == hir::BinOpKind::Add && self.check_str_addition(expr, lhs_expr, rhs_expr, lhs_ty, - rhs_ty, &mut err, true) { + rhs_ty, &mut err, true, op) { // This has nothing here because it means we did string // concatenation (e.g., "Hello " += "World!"). This means // we don't want the note in the else clause to be emitted @@ -337,16 +327,22 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { err.emit(); } IsAssign::No => { - let mut err = struct_span_err!(self.tcx.sess, expr.span, E0369, + let mut err = struct_span_err!(self.tcx.sess, op.span, E0369, "binary operation `{}` cannot be applied to type `{}`", op.node.as_str(), lhs_ty); + + if !lhs_expr.span.eq(&rhs_expr.span) { + err.span_label(lhs_expr.span, lhs_ty.to_string()); + err.span_label(rhs_expr.span, rhs_ty.to_string()); + } + let mut suggested_deref = false; if let Ref(_, mut rty, _) = lhs_ty.sty { if { - !self.infcx.type_moves_by_default(self.param_env, - rty, - lhs_expr.span) && + self.infcx.type_is_copy_modulo_regions(self.param_env, + rty, + lhs_expr.span) && self.lookup_op_method(rty, &[rhs_ty], Op::Binary(op, is_assign)) @@ -390,7 +386,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { if let Some(missing_trait) = missing_trait { if op.node == hir::BinOpKind::Add && self.check_str_addition(expr, lhs_expr, rhs_expr, lhs_ty, - rhs_ty, &mut err, false) { + rhs_ty, &mut err, false, op) { // This has nothing here because it means we did string // concatenation (e.g., "Hello " + "World!"). This means // we don't want the note in the else clause to be emitted @@ -426,8 +422,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { rhs_expr: &'gcx hir::Expr, lhs_ty: Ty<'tcx>, rhs_ty: Ty<'tcx>, - err: &mut errors::DiagnosticBuilder, + err: &mut errors::DiagnosticBuilder<'_>, is_assign: bool, + op: hir::BinOp, ) -> bool { let source_map = self.tcx.sess.source_map(); let msg = "`to_owned()` can be used to create an owned `String` \ @@ -441,10 +438,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { (&Ref(_, l_ty, _), &Ref(_, r_ty, _)) if l_ty.sty == Str && r_ty.sty == Str => { if !is_assign { - err.span_label(expr.span, + err.span_label(op.span, "`+` can't be used to concatenate two `&str` strings"); match source_map.span_to_snippet(lhs_expr.span) { - Ok(lstring) => err.span_suggestion_with_applicability( + Ok(lstring) => err.span_suggestion( lhs_expr.span, msg, format!("{}.to_owned()", lstring), @@ -465,7 +462,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { is_assign, ) { (Ok(l), Ok(r), false) => { - err.multipart_suggestion_with_applicability( + err.multipart_suggestion( msg, vec![ (lhs_expr.span, format!("{}.to_owned()", l)), @@ -682,7 +679,7 @@ enum Op { Unary(hir::UnOp, Span), } -/// Returns true if this is a built-in arithmetic operation (e.g., u32 +/// Returns `true` if this is a built-in arithmetic operation (e.g., u32 /// + u32, i16x4 == i16x4) and false if these types would have to be /// overloaded to be legal. There are two reasons that we distinguish /// builtin operations from overloaded ones (vs trying to drive @@ -691,14 +688,14 @@ enum Op { /// /// 1. Builtin operations can trivially be evaluated in constants. /// 2. For comparison operators applied to SIMD types the result is -/// not of type `bool`. For example, `i16x4==i16x4` yields a +/// not of type `bool`. For example, `i16x4 == i16x4` yields a /// type like `i16x4`. This means that the overloaded trait /// `PartialEq` is not applicable. /// /// Reason #2 is the killer. I tried for a while to always use /// overloaded logic and just check the types in constants/codegen after /// the fact, and it worked fine, except for SIMD types. -nmatsakis -fn is_builtin_binop(lhs: Ty, rhs: Ty, op: hir::BinOp) -> bool { +fn is_builtin_binop(lhs: Ty<'_>, rhs: Ty<'_>, op: hir::BinOp) -> bool { match BinOpCategory::from(op) { BinOpCategory::Shortcircuit => { true diff --git a/src/librustc_typeck/check/regionck.rs b/src/librustc_typeck/check/regionck.rs index 7960d743de5e4..a03d33a3ef5bc 100644 --- a/src/librustc_typeck/check/regionck.rs +++ b/src/librustc_typeck/check/regionck.rs @@ -1,16 +1,6 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The region check is a final pass that runs over the AST after we have //! inferred the type constraints but before we have actually finalized -//! the types. Its purpose is to embed a variety of region constraints. +//! the types. Its purpose is to embed a variety of region constraints. //! Inserting these constraints as a separate pass is good because (1) it //! localizes the code that has to do with region inference and (2) often //! we cannot know what constraints are needed until the basic types have @@ -44,17 +34,17 @@ //! #### Reborrows //! //! Generally speaking, `regionck` does NOT try to ensure that the data -//! `data` will outlive the pointer `x`. That is the job of borrowck. The +//! `data` will outlive the pointer `x`. That is the job of borrowck. The //! one exception is when "re-borrowing" the contents of another borrowed //! pointer. For example, imagine you have a borrowed pointer `b` with -//! lifetime L1 and you have an expression `&*b`. The result of this -//! expression will be another borrowed pointer with lifetime L2 (which is +//! lifetime `L1` and you have an expression `&*b`. The result of this +//! expression will be another borrowed pointer with lifetime `L2` (which is //! an inference variable). The borrow checker is going to enforce the -//! constraint that L2 < L1, because otherwise you are re-borrowing data -//! for a lifetime larger than the original loan. However, without the +//! constraint that `L2 < L1`, because otherwise you are re-borrowing data +//! for a lifetime larger than the original loan. However, without the //! routines in this module, the region inferencer would not know of this -//! dependency and thus it might infer the lifetime of L2 to be greater -//! than L1 (issue #3148). +//! dependency and thus it might infer the lifetime of `L2` to be greater +//! than `L1` (issue #3148). //! //! There are a number of troublesome scenarios in the tests //! `region-dependent-*.rs`, but here is one example: @@ -72,26 +62,26 @@ //! //! The key point here is that when you are borrowing a value that //! is "guaranteed" by a borrowed pointer, you must link the -//! lifetime of that borrowed pointer (L1, here) to the lifetime of -//! the borrow itself (L2). What do I mean by "guaranteed" by a +//! lifetime of that borrowed pointer (`L1`, here) to the lifetime of +//! the borrow itself (`L2`). What do I mean by "guaranteed" by a //! borrowed pointer? I mean any data that is reached by first //! dereferencing a borrowed pointer and then either traversing -//! interior offsets or boxes. We say that the guarantor +//! interior offsets or boxes. We say that the guarantor //! of such data is the region of the borrowed pointer that was -//! traversed. This is essentially the same as the ownership +//! traversed. This is essentially the same as the ownership //! relation, except that a borrowed pointer never owns its //! contents. -use check::dropck; -use check::FnCtxt; -use middle::mem_categorization as mc; -use middle::mem_categorization::Categorization; -use middle::region; +use crate::check::dropck; +use crate::check::FnCtxt; +use crate::middle::mem_categorization as mc; +use crate::middle::mem_categorization::Categorization; +use crate::middle::region; use rustc::hir::def_id::DefId; use rustc::infer::outlives::env::OutlivesEnvironment; use rustc::infer::{self, RegionObligation, SuppressRegionErrors}; use rustc::ty::adjustment; -use rustc::ty::subst::Substs; +use rustc::ty::subst::{SubstsRef, UnpackedKind}; use rustc::ty::{self, Ty}; use rustc::hir::intravisit::{self, NestedVisitorMap, Visitor}; @@ -100,7 +90,6 @@ use rustc_data_structures::sync::Lrc; use std::mem; use std::ops::Deref; use std::rc::Rc; -use syntax::ast; use syntax_pos::Span; // a variation on try that just returns unit @@ -122,7 +111,7 @@ macro_rules! ignore_err { impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn regionck_expr(&self, body: &'gcx hir::Body) { let subject = self.tcx.hir().body_owner_def_id(body.id()); - let id = body.value.id; + let id = body.value.hir_id; let mut rcx = RegionCtxt::new( self, RepeatingScope(id), @@ -148,9 +137,9 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// Region checking during the WF phase for items. `wf_tys` are the /// types from which we should derive implied bounds, if any. - pub fn regionck_item(&self, item_id: ast::NodeId, span: Span, wf_tys: &[Ty<'tcx>]) { + pub fn regionck_item(&self, item_id: hir::HirId, span: Span, wf_tys: &[Ty<'tcx>]) { debug!("regionck_item(item.id={:?}, wf_tys={:?})", item_id, wf_tys); - let subject = self.tcx.hir().local_def_id(item_id); + let subject = self.tcx.hir().local_def_id_from_hir_id(item_id); let mut rcx = RegionCtxt::new( self, RepeatingScope(item_id), @@ -173,21 +162,21 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { /// rest of type check and because sometimes we need type /// inference to have completed before we can determine which /// constraints to add. - pub fn regionck_fn(&self, fn_id: ast::NodeId, body: &'gcx hir::Body) { + pub fn regionck_fn(&self, fn_id: hir::HirId, body: &'gcx hir::Body) { debug!("regionck_fn(id={})", fn_id); let subject = self.tcx.hir().body_owner_def_id(body.id()); - let node_id = body.value.id; + let hir_id = body.value.hir_id; let mut rcx = RegionCtxt::new( self, - RepeatingScope(node_id), - node_id, + RepeatingScope(hir_id), + hir_id, Subject(subject), self.param_env, ); if self.err_count_since_creation() == 0 { // regionck assumes typeck succeeded - rcx.visit_fn_body(fn_id, body, self.tcx.hir().span(fn_id)); + rcx.visit_fn_body(fn_id, body, self.tcx.hir().span_by_hir_id(fn_id)); } rcx.resolve_regions_and_report_errors(SuppressRegionErrors::when_nll_is_enabled(self.tcx)); @@ -211,13 +200,13 @@ pub struct RegionCtxt<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> { outlives_environment: OutlivesEnvironment<'tcx>, // id of innermost fn body id - body_id: ast::NodeId, + body_id: hir::HirId, // call_site scope of innermost fn call_site_scope: Option, // id of innermost fn or loop - repeating_scope: ast::NodeId, + repeating_scope: hir::HirId, // id of AST node being analyzed (the subject of the analysis). subject_def_id: DefId, @@ -230,14 +219,14 @@ impl<'a, 'gcx, 'tcx> Deref for RegionCtxt<'a, 'gcx, 'tcx> { } } -pub struct RepeatingScope(ast::NodeId); +pub struct RepeatingScope(hir::HirId); pub struct Subject(DefId); impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { pub fn new( fcx: &'a FnCtxt<'a, 'gcx, 'tcx>, RepeatingScope(initial_repeating_scope): RepeatingScope, - initial_body_id: ast::NodeId, + initial_body_id: hir::HirId, Subject(subject): Subject, param_env: ty::ParamEnv<'tcx>, ) -> RegionCtxt<'a, 'gcx, 'tcx> { @@ -254,15 +243,15 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { } } - fn set_repeating_scope(&mut self, scope: ast::NodeId) -> ast::NodeId { + fn set_repeating_scope(&mut self, scope: hir::HirId) -> hir::HirId { mem::replace(&mut self.repeating_scope, scope) } - /// Try to resolve the type for the given node, returning t_err if an error results. Note that + /// Try to resolve the type for the given node, returning `t_err` if an error results. Note that /// we never care about the details of the error, the same error will be detected and reported /// in the writeback phase. /// - /// Note one important point: we do not attempt to resolve *region variables* here. This is + /// Note one important point: we do not attempt to resolve *region variables* here. This is /// because regionck is essentially adding constraints to those region variables and so may yet /// influence how they are resolved. /// @@ -276,9 +265,9 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { /// } /// ``` /// - /// Here, the region of `b` will be ``. `` is constrained to be some subregion of the - /// block B and some superregion of the call. If we forced it now, we'd choose the smaller - /// region (the call). But that would make the *b illegal. Since we don't resolve, the type + /// Here, the region of `b` will be ``. `` is constrained to be some subregion of the + /// block B and some superregion of the call. If we forced it now, we'd choose the smaller + /// region (the call). But that would make the *b illegal. Since we don't resolve, the type /// of b will be `&.i32` and then `*b` will require that `` be bigger than the let and /// the `*b` expression, so we will effectively resolve `` to be the block B. pub fn resolve_type(&self, unresolved_ty: Ty<'tcx>) -> Ty<'tcx> { @@ -311,15 +300,15 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { /// `intravisit::Visitor` impl below.) fn visit_fn_body( &mut self, - id: ast::NodeId, // the id of the fn itself + id: hir::HirId, // the id of the fn itself body: &'gcx hir::Body, span: Span, ) { // When we enter a function, we can derive - debug!("visit_fn_body(id={})", id); + debug!("visit_fn_body(id={:?})", id); let body_id = body.id(); - self.body_id = body_id.node_id; + self.body_id = body_id.hir_id; let call_site = region::Scope { id: body.value.hir_id.local_id, @@ -328,11 +317,10 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { self.call_site_scope = Some(call_site); let fn_sig = { - let fn_hir_id = self.tcx.hir().node_to_hir_id(id); - match self.tables.borrow().liberated_fn_sigs().get(fn_hir_id) { + match self.tables.borrow().liberated_fn_sigs().get(id) { Some(f) => f.clone(), None => { - bug!("No fn-sig entry for id={}", id); + bug!("No fn-sig entry for id={:?}", id); } } }; @@ -352,11 +340,11 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { self.outlives_environment.add_implied_bounds( self.fcx, &fn_sig_tys[..], - body_id.node_id, + body_id.hir_id, span, ); self.outlives_environment - .save_implied_bounds(body_id.node_id); + .save_implied_bounds(body_id.hir_id); self.link_fn_args( region::Scope { id: body.value.hir_id.local_id, @@ -365,7 +353,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { &body.arguments, ); self.visit_body(body); - self.visit_region_obligations(body_id.node_id); + self.visit_region_obligations(body_id.hir_id); let call_site_scope = self.call_site_scope.unwrap(); debug!( @@ -375,8 +363,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { ); let call_site_region = self.tcx.mk_region(ty::ReScope(call_site_scope)); - let body_hir_id = self.tcx.hir().node_to_hir_id(body_id.node_id); - self.type_of_node_must_outlive(infer::CallReturn(span), body_hir_id, call_site_region); + self.type_of_node_must_outlive(infer::CallReturn(span), body_id.hir_id, call_site_region); self.constrain_opaque_types( &self.fcx.opaque_types.borrow(), @@ -384,8 +371,8 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { ); } - fn visit_region_obligations(&mut self, node_id: ast::NodeId) { - debug!("visit_region_obligations: node_id={}", node_id); + fn visit_region_obligations(&mut self, hir_id: hir::HirId) { + debug!("visit_region_obligations: hir_id={:?}", hir_id); // region checking can introduce new pending obligations // which, when processed, might generate new region @@ -467,7 +454,7 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for RegionCtxt<'a, 'gcx, 'tcx> { _: &'gcx hir::FnDecl, body_id: hir::BodyId, span: Span, - id: ast::NodeId, + hir_id: hir::HirId, ) { assert!( match fk { @@ -484,7 +471,7 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for RegionCtxt<'a, 'gcx, 'tcx> { let env_snapshot = self.outlives_environment.push_snapshot_pre_closure(); let body = self.tcx.hir().body(body_id); - self.visit_fn_body(id, body, span); + self.visit_fn_body(hir_id, body, span); // Restore state from previous function. self.outlives_environment @@ -512,7 +499,7 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for RegionCtxt<'a, 'gcx, 'tcx> { fn visit_expr(&mut self, expr: &'gcx hir::Expr) { debug!( - "regionck::visit_expr(e={:?}, repeating_scope={})", + "regionck::visit_expr(e={:?}, repeating_scope={:?})", expr, self.repeating_scope ); @@ -565,7 +552,7 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for RegionCtxt<'a, 'gcx, 'tcx> { } debug!( - "regionck::visit_expr(e={:?}, repeating_scope={}) - visiting subexprs", + "regionck::visit_expr(e={:?}, repeating_scope={:?}) - visiting subexprs", expr, self.repeating_scope ); match expr.node { @@ -689,16 +676,16 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for RegionCtxt<'a, 'gcx, 'tcx> { } hir::ExprKind::Loop(ref body, _, _) => { - let repeating_scope = self.set_repeating_scope(body.id); + let repeating_scope = self.set_repeating_scope(body.hir_id); intravisit::walk_expr(self, expr); self.set_repeating_scope(repeating_scope); } hir::ExprKind::While(ref cond, ref body, _) => { - let repeating_scope = self.set_repeating_scope(cond.id); + let repeating_scope = self.set_repeating_scope(cond.hir_id); self.visit_expr(&cond); - self.set_repeating_scope(body.id); + self.set_repeating_scope(body.hir_id); self.visit_block(&body); self.set_repeating_scope(repeating_scope); @@ -707,8 +694,8 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for RegionCtxt<'a, 'gcx, 'tcx> { hir::ExprKind::Ret(Some(ref ret_expr)) => { let call_site_scope = self.call_site_scope; debug!( - "visit_expr ExprKind::Ret ret_expr.id {} call_site_scope: {:?}", - ret_expr.id, call_site_scope + "visit_expr ExprKind::Ret ret_expr.hir_id {} call_site_scope: {:?}", + ret_expr.hir_id, call_site_scope ); let call_site_region = self.tcx.mk_region(ty::ReScope(call_site_scope.unwrap())); self.type_of_node_must_outlive( @@ -768,7 +755,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { } fn check_expr_fn_block(&mut self, expr: &'gcx hir::Expr, body_id: hir::BodyId) { - let repeating_scope = self.set_repeating_scope(body_id.node_id); + let repeating_scope = self.set_repeating_scope(body_id.hir_id); intravisit::walk_expr(self, expr); self.set_repeating_scope(repeating_scope); } @@ -836,7 +823,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { } } - /// Create a temporary `MemCategorizationContext` and pass it to the closure. + /// Creates a temporary `MemCategorizationContext` and pass it to the closure. fn with_mc(&self, f: F) -> R where F: for<'b> FnOnce(mc::MemCategorizationContext<'b, 'gcx, 'tcx>) -> R, @@ -1406,7 +1393,7 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { fn substs_wf_in_scope( &mut self, origin: infer::ParameterOrigin, - substs: &Substs<'tcx>, + substs: SubstsRef<'tcx>, expr_span: Span, expr_region: ty::Region<'tcx>, ) { @@ -1420,13 +1407,19 @@ impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> { let origin = infer::ParameterInScope(origin, expr_span); - for region in substs.regions() { - self.sub_regions(origin.clone(), expr_region, region); - } - - for ty in substs.types() { - let ty = self.resolve_type(ty); - self.type_must_outlive(origin.clone(), ty, expr_region); + for kind in substs { + match kind.unpack() { + UnpackedKind::Lifetime(lt) => { + self.sub_regions(origin.clone(), expr_region, lt); + } + UnpackedKind::Type(ty) => { + let ty = self.resolve_type(ty); + self.type_must_outlive(origin.clone(), ty, expr_region); + } + UnpackedKind::Const(_) => { + // Const parameters don't impose constraints. + } + } } } } diff --git a/src/librustc_typeck/check/upvar.rs b/src/librustc_typeck/check/upvar.rs index 562022cefa806..a76dfdd69ba97 100644 --- a/src/librustc_typeck/check/upvar.rs +++ b/src/librustc_typeck/check/upvar.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! ### Inferring borrow kinds for upvars //! //! Whenever there is a closure expression, we need to determine how each @@ -42,9 +32,9 @@ use super::FnCtxt; -use middle::expr_use_visitor as euv; -use middle::mem_categorization as mc; -use middle::mem_categorization::Categorization; +use crate::middle::expr_use_visitor as euv; +use crate::middle::mem_categorization as mc; +use crate::middle::mem_categorization::Categorization; use rustc::hir; use rustc::hir::def_id::DefId; use rustc::hir::def_id::LocalDefId; @@ -77,7 +67,7 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for InferBorrowKindVisitor<'a, 'gcx, 'tcx> { let body = self.fcx.tcx.hir().body(body_id); self.visit_body(body); self.fcx - .analyze_closure(expr.id, expr.hir_id, expr.span, body, cc); + .analyze_closure(expr.hir_id, expr.span, body, cc); } intravisit::walk_expr(self, expr); @@ -87,7 +77,6 @@ impl<'a, 'gcx, 'tcx> Visitor<'gcx> for InferBorrowKindVisitor<'a, 'gcx, 'tcx> { impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn analyze_closure( &self, - closure_node_id: ast::NodeId, closure_hir_id: hir::HirId, span: Span, body: &hir::Body, @@ -99,24 +88,25 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { debug!( "analyze_closure(id={:?}, body.id={:?})", - closure_node_id, + closure_hir_id, body.id() ); // Extract the type of the closure. - let (closure_def_id, substs) = match self.node_ty(closure_hir_id).sty { + let ty = self.node_ty(closure_hir_id); + let (closure_def_id, substs) = match ty.sty { ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)), ty::Generator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)), ty::Error => { // #51714: skip analysis when we have already encountered type errors return; } - ref t => { + _ => { span_bug!( span, "type of closure expr {:?} is not a closure {:?}", - closure_node_id, - t + closure_hir_id, + ty ); } }; @@ -131,15 +121,19 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { None }; - self.tcx.with_freevars(closure_node_id, |freevars| { + self.tcx.with_freevars(closure_hir_id, |freevars| { + let mut freevar_list: Vec = Vec::with_capacity(freevars.len()); for freevar in freevars { let upvar_id = ty::UpvarId { var_path: ty::UpvarPath { - hir_id : self.tcx.hir().node_to_hir_id(freevar.var_id()), + hir_id: self.tcx.hir().node_to_hir_id(freevar.var_id()), }, closure_expr_id: LocalDefId::from_def_id(closure_def_id), }; debug!("seed upvar_id {:?}", upvar_id); + // Adding the upvar Id to the list of Upvars, which will be added + // to the map for the closure at the end of the for loop. + freevar_list.push(upvar_id); let capture_kind = match capture_clause { hir::CaptureByValue => ty::UpvarCapture::ByValue, @@ -159,6 +153,15 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { .upvar_capture_map .insert(upvar_id, capture_kind); } + // Add the vector of freevars to the map keyed with the closure id. + // This gives us an easier access to them without having to call + // with_freevars again.. + if !freevar_list.is_empty() { + self.tables + .borrow_mut() + .upvar_list + .insert(closure_def_id, freevar_list); + } }); let body_owner_def_id = self.tcx.hir().body_owner_def_id(body.id()); @@ -176,7 +179,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { self.param_env, region_scope_tree, &self.tables.borrow(), - ).consume_body(body); + ) + .consume_body(body); if let Some(closure_substs) = infer_kind { // Unify the (as yet unbound) type variable in the closure @@ -212,10 +216,10 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { // inference algorithm will reject it). // Equate the type variables for the upvars with the actual types. - let final_upvar_tys = self.final_upvar_tys(closure_node_id); + let final_upvar_tys = self.final_upvar_tys(closure_hir_id); debug!( "analyze_closure: id={:?} substs={:?} final_upvar_tys={:?}", - closure_node_id, substs, final_upvar_tys + closure_hir_id, substs, final_upvar_tys ); for (upvar_ty, final_upvar_ty) in substs .upvar_tys(closure_def_id, self.tcx) @@ -233,14 +237,14 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { } // Returns a list of `ClosureUpvar`s for each upvar. - fn final_upvar_tys(&self, closure_id: ast::NodeId) -> Vec> { + fn final_upvar_tys(&self, closure_id: hir::HirId) -> Vec> { // Presently an unboxed closure type cannot "escape" out of a // function, so we will only encounter ones that originated in the // local crate or were inlined into it along with some function. // This may change if abstract return types of some sort are // implemented. let tcx = self.tcx; - let closure_def_index = tcx.hir().local_def_id(closure_id); + let closure_def_index = tcx.hir().local_def_id_from_hir_id(closure_id); tcx.with_freevars(closure_id, |freevars| { freevars @@ -250,9 +254,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { let var_hir_id = tcx.hir().node_to_hir_id(var_node_id); let freevar_ty = self.node_ty(var_hir_id); let upvar_id = ty::UpvarId { - var_path: ty::UpvarPath { - hir_id: var_hir_id, - }, + var_path: ty::UpvarPath { hir_id: var_hir_id }, closure_expr_id: LocalDefId::from_def_id(closure_def_index), }; let capture = self.tables.borrow().upvar_capture(upvar_id); @@ -272,7 +274,8 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { }, ), } - }).collect() + }) + .collect() }) } } @@ -579,7 +582,7 @@ impl<'a, 'gcx, 'tcx> InferBorrowKind<'a, 'gcx, 'tcx> { impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'gcx, 'tcx> { fn consume( &mut self, - _consume_id: ast::NodeId, + _consume_id: hir::HirId, _consume_span: Span, cmt: &mc::cmt_<'tcx>, mode: euv::ConsumeMode, @@ -608,7 +611,7 @@ impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'gcx, 'tcx> { fn borrow( &mut self, - borrow_id: ast::NodeId, + borrow_id: hir::HirId, _borrow_span: Span, cmt: &mc::cmt_<'tcx>, _loan_region: ty::Region<'tcx>, @@ -631,11 +634,11 @@ impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'gcx, 'tcx> { } } - fn decl_without_init(&mut self, _id: ast::NodeId, _span: Span) {} + fn decl_without_init(&mut self, _id: hir::HirId, _span: Span) {} fn mutate( &mut self, - _assignment_id: ast::NodeId, + _assignment_id: hir::HirId, _assignment_span: Span, assignee_cmt: &mc::cmt_<'tcx>, _mode: euv::MutateMode, @@ -646,7 +649,6 @@ impl<'a, 'gcx, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'gcx, 'tcx> { } } -fn var_name(tcx: TyCtxt, var_hir_id: hir::HirId) -> ast::Name { - let var_node_id = tcx.hir().hir_to_node_id(var_hir_id); - tcx.hir().name(var_node_id) +fn var_name(tcx: TyCtxt<'_, '_, '_>, var_hir_id: hir::HirId) -> ast::Name { + tcx.hir().name_by_hir_id(var_hir_id) } diff --git a/src/librustc_typeck/check/wfcheck.rs b/src/librustc_typeck/check/wfcheck.rs index 6471e745aa6fd..d108e7c3107af 100644 --- a/src/librustc_typeck/check/wfcheck.rs +++ b/src/librustc_typeck/check/wfcheck.rs @@ -1,22 +1,12 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use check::{Inherited, FnCtxt}; -use constrained_type_params::{identify_constrained_type_params, Parameter}; - -use hir::def_id::DefId; +use crate::check::{Inherited, FnCtxt}; +use crate::constrained_generic_params::{identify_constrained_generic_params, Parameter}; + +use crate::hir::def_id::DefId; use rustc::traits::{self, ObligationCauseCode}; -use rustc::ty::{self, Lift, Ty, TyCtxt, TyKind, GenericParamDefKind, TypeFoldable}; -use rustc::ty::subst::{Subst, Substs}; -use rustc::ty::util::ExplicitSelf; +use rustc::ty::{self, Lift, Ty, TyCtxt, GenericParamDefKind, TypeFoldable, ToPredicate}; +use rustc::ty::subst::{Subst, InternalSubsts}; use rustc::util::nodemap::{FxHashSet, FxHashMap}; +use rustc::mir::interpret::ConstValue; use rustc::middle::lang_items; use rustc::infer::opaque_types::may_define_existential_type; @@ -25,7 +15,7 @@ use syntax::feature_gate::{self, GateIssue}; use syntax_pos::Span; use errors::{DiagnosticBuilder, DiagnosticId}; -use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap}; +use rustc::hir::itemlikevisit::ParItemLikeVisitor; use rustc::hir; /// Helper type of a temporary returned by `.for_item(...)`. @@ -33,7 +23,7 @@ use rustc::hir; /// `F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(FnCtxt<'b, 'gcx, 'tcx>)`. struct CheckWfFcxBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> { inherited: super::InheritedBuilder<'a, 'gcx, 'tcx>, - id: ast::NodeId, + id: hir::HirId, span: Span, param_env: ty::ParamEnv<'tcx>, } @@ -73,12 +63,12 @@ impl<'a, 'gcx, 'tcx> CheckWfFcxBuilder<'a, 'gcx, 'tcx> { /// not included it frequently leads to confusing errors in fn bodies. So it's better to check /// the types first. pub fn check_item_well_formed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) { - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); - let item = tcx.hir().expect_item(node_id); + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); + let item = tcx.hir().expect_item_by_hir_id(hir_id); - debug!("check_item_well_formed(it.id={}, it.name={})", - item.id, - tcx.item_path_str(def_id)); + debug!("check_item_well_formed(it.hir_id={:?}, it.name={})", + item.hir_id, + tcx.def_path_str(def_id)); match item.node { // Right now we check that every default trait implementation @@ -99,7 +89,7 @@ pub fn check_item_well_formed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: Def // won't be allowed unless there's an *explicit* implementation of `Send` // for `T` hir::ItemKind::Impl(_, polarity, defaultness, _, ref trait_ref, ref self_ty, _) => { - let is_auto = tcx.impl_trait_ref(tcx.hir().local_def_id(item.id)) + let is_auto = tcx.impl_trait_ref(tcx.hir().local_def_id_from_hir_id(item.hir_id)) .map_or(false, |trait_ref| tcx.trait_is_auto(trait_ref.def_id)); if let (hir::Defaultness::Default { .. }, true) = (defaultness, is_auto) { tcx.sess.span_err(item.span, "impls of auto traits cannot be default"); @@ -119,14 +109,14 @@ pub fn check_item_well_formed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: Def check_item_fn(tcx, item); } hir::ItemKind::Static(ref ty, ..) => { - check_item_type(tcx, item.id, ty.span, false); + check_item_type(tcx, item.hir_id, ty.span, false); } hir::ItemKind::Const(ref ty, ..) => { - check_item_type(tcx, item.id, ty.span, false); + check_item_type(tcx, item.hir_id, ty.span, false); } hir::ItemKind::ForeignMod(ref module) => for it in module.items.iter() { if let hir::ForeignItemKind::Static(ref ty, ..) = it.node { - check_item_type(tcx, it.id, ty.span, true); + check_item_type(tcx, it.hir_id, ty.span, true); } }, hir::ItemKind::Struct(ref struct_def, ref ast_generics) => { @@ -161,36 +151,36 @@ pub fn check_item_well_formed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: Def } pub fn check_trait_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) { - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); - let trait_item = tcx.hir().expect_trait_item(node_id); + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); + let trait_item = tcx.hir().expect_trait_item(hir_id); let method_sig = match trait_item.node { hir::TraitItemKind::Method(ref sig, _) => Some(sig), _ => None }; - check_associated_item(tcx, trait_item.id, trait_item.span, method_sig); + check_associated_item(tcx, trait_item.hir_id, trait_item.span, method_sig); } pub fn check_impl_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) { - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); - let impl_item = tcx.hir().expect_impl_item(node_id); + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); + let impl_item = tcx.hir().expect_impl_item(hir_id); let method_sig = match impl_item.node { hir::ImplItemKind::Method(ref sig, _) => Some(sig), _ => None }; - check_associated_item(tcx, impl_item.id, impl_item.span, method_sig); + check_associated_item(tcx, impl_item.hir_id, impl_item.span, method_sig); } fn check_associated_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - item_id: ast::NodeId, + item_id: hir::HirId, span: Span, sig_if_method: Option<&hir::MethodSig>) { debug!("check_associated_item: {:?}", item_id); let code = ObligationCauseCode::MiscObligation; for_id(tcx, item_id, span).with_fcx(|fcx, tcx| { - let item = fcx.tcx.associated_item(fcx.tcx.hir().local_def_id(item_id)); + let item = fcx.tcx.associated_item(fcx.tcx.hir().local_def_id_from_hir_id(item_id)); let (mut implied_bounds, self_ty) = match item.container { ty::TraitContainer(_) => (vec![], fcx.tcx.mk_self_type()), @@ -231,12 +221,12 @@ fn check_associated_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, fn for_item<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, item: &hir::Item) -> CheckWfFcxBuilder<'a, 'gcx, 'tcx> { - for_id(tcx, item.id, item.span) + for_id(tcx, item.hir_id, item.span) } -fn for_id<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, id: ast::NodeId, span: Span) +fn for_id<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, id: hir::HirId, span: Span) -> CheckWfFcxBuilder<'a, 'gcx, 'tcx> { - let def_id = tcx.hir().local_def_id(id); + let def_id = tcx.hir().local_def_id_from_hir_id(id); CheckWfFcxBuilder { inherited: Inherited::build(tcx, def_id), id, @@ -252,7 +242,7 @@ fn check_type_defn<'a, 'tcx, F>(tcx: TyCtxt<'a, 'tcx, 'tcx>, { for_item(tcx, item).with_fcx(|fcx, fcx_tcx| { let variants = lookup_fields(fcx); - let def_id = fcx.tcx.hir().local_def_id(item.id); + let def_id = fcx.tcx.hir().local_def_id_from_hir_id(item.hir_id); let packed = fcx.tcx.adt_def(def_id).repr.packed(); for variant in &variants { @@ -261,11 +251,14 @@ fn check_type_defn<'a, 'tcx, F>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let needs_drop_copy = || { packed && { let ty = variant.fields.last().unwrap().ty; - let ty = fcx.tcx.erase_regions(&ty).lift_to_tcx(fcx_tcx) + fcx.tcx.erase_regions(&ty).lift_to_tcx(fcx_tcx) + .map(|ty| ty.needs_drop(fcx_tcx, fcx_tcx.param_env(def_id))) .unwrap_or_else(|| { - span_bug!(item.span, "inference variables in {:?}", ty) - }); - ty.needs_drop(fcx_tcx, fcx_tcx.param_env(def_id)) + fcx_tcx.sess.delay_span_bug( + item.span, &format!("inference variables in {:?}", ty)); + // Just treat unresolved type expression as if it needs drop. + true + }) } }; let all_sized = @@ -313,9 +306,9 @@ fn check_type_defn<'a, 'tcx, F>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } fn check_trait<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item: &hir::Item) { - debug!("check_trait: {:?}", item.id); + debug!("check_trait: {:?}", item.hir_id); - let trait_def_id = tcx.hir().local_def_id(item.id); + let trait_def_id = tcx.hir().local_def_id_from_hir_id(item.hir_id); let trait_def = tcx.trait_def(trait_def_id); if trait_def.is_marker { @@ -337,7 +330,7 @@ fn check_trait<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item: &hir::Item) { fn check_item_fn<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item: &hir::Item) { for_item(tcx, item).with_fcx(|fcx, tcx| { - let def_id = fcx.tcx.hir().local_def_id(item.id); + let def_id = fcx.tcx.hir().local_def_id_from_hir_id(item.hir_id); let sig = fcx.tcx.fn_sig(def_id); let sig = fcx.normalize_associated_types_in(item.span, &sig); let mut implied_bounds = vec![]; @@ -349,19 +342,19 @@ fn check_item_fn<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item: &hir::Item) { fn check_item_type<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, - item_id: ast::NodeId, + item_id: hir::HirId, ty_span: Span, allow_foreign_ty: bool, ) { debug!("check_item_type: {:?}", item_id); for_id(tcx, item_id, ty_span).with_fcx(|fcx, gcx| { - let ty = gcx.type_of(gcx.hir().local_def_id(item_id)); + let ty = gcx.type_of(gcx.hir().local_def_id_from_hir_id(item_id)); let item_ty = fcx.normalize_associated_types_in(ty_span, &ty); let mut forbid_unsized = true; if allow_foreign_ty { - if let TyKind::Foreign(_) = fcx.tcx.struct_tail(item_ty).sty { + if let ty::Foreign(_) = fcx.tcx.struct_tail(item_ty).sty { forbid_unsized = false; } } @@ -387,7 +380,7 @@ fn check_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, debug!("check_impl: {:?}", item); for_item(tcx, item).with_fcx(|fcx, tcx| { - let item_def_id = fcx.tcx.hir().local_def_id(item.id); + let item_def_id = fcx.tcx.hir().local_def_id_from_hir_id(item.hir_id); match *ast_trait_ref { Some(ref ast_trait_ref) => { @@ -419,7 +412,7 @@ fn check_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }); } -/// Checks where clauses and inline bounds that are declared on def_id. +/// Checks where-clauses and inline bounds that are declared on `def_id`. fn check_where_clauses<'a, 'gcx, 'fcx, 'tcx>( tcx: TyCtxt<'a, 'gcx, 'gcx>, fcx: &FnCtxt<'fcx, 'gcx, 'tcx>, @@ -427,9 +420,6 @@ fn check_where_clauses<'a, 'gcx, 'fcx, 'tcx>( def_id: DefId, return_ty: Option>, ) { - use ty::subst::Subst; - use rustc::ty::TypeFoldable; - let predicates = fcx.tcx.predicates_of(def_id); let generics = tcx.generics_of(def_id); @@ -447,7 +437,7 @@ fn check_where_clauses<'a, 'gcx, 'fcx, 'tcx>( // struct Foo> { .. } // Here the default `Vec<[u32]>` is not WF because `[u32]: Sized` does not hold. for param in &generics.params { - if let GenericParamDefKind::Type {..} = param.kind { + if let GenericParamDefKind::Type { .. } = param.kind { if is_our_default(¶m) { let ty = fcx.tcx.type_of(param.def_id); // ignore dependent defaults -- that is, where the default of one type @@ -469,13 +459,13 @@ fn check_where_clauses<'a, 'gcx, 'fcx, 'tcx>( // For more examples see tests `defaults-well-formedness.rs` and `type-check-defaults.rs`. // // First we build the defaulted substitution. - let substs = Substs::for_item(fcx.tcx, def_id, |param, _| { + let substs = InternalSubsts::for_item(fcx.tcx, def_id, |param, _| { match param.kind { GenericParamDefKind::Lifetime => { // All regions are identity. fcx.tcx.mk_param_from_def(param) } - GenericParamDefKind::Type {..} => { + GenericParamDefKind::Type { .. } => { // If the param has a default, if is_our_default(param) { let default_ty = fcx.tcx.type_of(param.def_id); @@ -488,6 +478,10 @@ fn check_where_clauses<'a, 'gcx, 'fcx, 'tcx>( // Mark unwanted params as err. fcx.tcx.types.err.into() } + GenericParamDefKind::Const => { + // FIXME(const_generics:defaults) + fcx.tcx.types.err.into() + } } }); // Now we build the substituted predicates. @@ -508,6 +502,13 @@ fn check_where_clauses<'a, 'gcx, 'fcx, 'tcx>( fn visit_region(&mut self, _: ty::Region<'tcx>) -> bool { true } + + fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool { + if let ConstValue::Param(param) = c.val { + self.params.insert(param.index); + } + c.super_visit_with(self) + } } let mut param_count = CountParams::default(); let has_region = pred.visit_with(&mut param_count); @@ -611,7 +612,7 @@ fn check_existential_types<'a, 'fcx, 'gcx, 'tcx>( span: Span, ty: Ty<'tcx>, ) -> Vec> { - trace!("check_existential_types: {:?}, {:?}", ty, ty.sty); + trace!("check_existential_types: {:?}", ty); let mut substituted_predicates = Vec::new(); ty.fold_with(&mut ty::fold::BottomUpFolder { tcx: fcx.tcx, @@ -619,20 +620,19 @@ fn check_existential_types<'a, 'fcx, 'gcx, 'tcx>( if let ty::Opaque(def_id, substs) = ty.sty { trace!("check_existential_types: opaque_ty, {:?}, {:?}", def_id, substs); let generics = tcx.generics_of(def_id); - // only check named existential types - if generics.parent.is_none() { - let opaque_node_id = tcx.hir().as_local_node_id(def_id).unwrap(); - if may_define_existential_type(tcx, fn_def_id, opaque_node_id) { + // only check named existential types defined in this crate + if generics.parent.is_none() && def_id.is_local() { + let opaque_hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); + if may_define_existential_type(tcx, fn_def_id, opaque_hir_id) { trace!("check_existential_types may define. Generics: {:#?}", generics); let mut seen: FxHashMap<_, Vec<_>> = FxHashMap::default(); for (subst, param) in substs.iter().zip(&generics.params) { match subst.unpack() { ty::subst::UnpackedKind::Type(ty) => match ty.sty { - ty::Param(..) => {}, + ty::Param(..) => {} // prevent `fn foo() -> Foo` from being defining _ => { - tcx - .sess + tcx.sess .struct_span_err( span, "non-defining existential type use \ @@ -647,8 +647,9 @@ fn check_existential_types<'a, 'fcx, 'gcx, 'tcx>( ), ) .emit(); - }, - }, // match ty + } + } + ty::subst::UnpackedKind::Lifetime(region) => { let param_span = tcx.def_span(param.def_id); if let ty::ReStatic = region { @@ -669,7 +670,28 @@ fn check_existential_types<'a, 'fcx, 'gcx, 'tcx>( } else { seen.entry(region).or_default().push(param_span); } - }, + } + + ty::subst::UnpackedKind::Const(ct) => match ct.val { + ConstValue::Param(_) => {} + _ => { + tcx.sess + .struct_span_err( + span, + "non-defining existential type use \ + in defining scope", + ) + .span_note( + tcx.def_span(param.def_id), + &format!( + "used non-generic const {} for \ + generic parameter", + ty, + ), + ) + .emit(); + } + } } // match subst } // for (subst, param) for (_, spans) in seen { @@ -749,79 +771,158 @@ fn check_method_receiver<'fcx, 'gcx, 'tcx>(fcx: &FnCtxt<'fcx, 'gcx, 'tcx>, &ty::Binder::bind(self_ty) ); - let self_arg_ty = sig.inputs()[0]; + let receiver_ty = sig.inputs()[0]; - let cause = fcx.cause(span, ObligationCauseCode::MethodReceiver); - let self_arg_ty = fcx.normalize_associated_types_in(span, &self_arg_ty); - let self_arg_ty = fcx.tcx.liberate_late_bound_regions( + let receiver_ty = fcx.normalize_associated_types_in(span, &receiver_ty); + let receiver_ty = fcx.tcx.liberate_late_bound_regions( method.def_id, - &ty::Binder::bind(self_arg_ty) + &ty::Binder::bind(receiver_ty) ); - let mut autoderef = fcx.autoderef(span, self_arg_ty).include_raw_pointers(); + if fcx.tcx.features().arbitrary_self_types { + if !receiver_is_valid(fcx, span, receiver_ty, self_ty, true) { + // report error, arbitrary_self_types was enabled + fcx.tcx.sess.diagnostic().mut_span_err( + span, &format!("invalid method receiver type: {:?}", receiver_ty) + ).note("type of `self` must be `Self` or a type that dereferences to it") + .help("consider changing to `self`, `&self`, `&mut self`, or `self: Box`") + .code(DiagnosticId::Error("E0307".into())) + .emit(); + } + } else { + if !receiver_is_valid(fcx, span, receiver_ty, self_ty, false) { + if receiver_is_valid(fcx, span, receiver_ty, self_ty, true) { + // report error, would have worked with arbitrary_self_types + feature_gate::feature_err( + &fcx.tcx.sess.parse_sess, + "arbitrary_self_types", + span, + GateIssue::Language, + &format!( + "`{}` cannot be used as the type of `self` without \ + the `arbitrary_self_types` feature", + receiver_ty, + ), + ).help("consider changing to `self`, `&self`, `&mut self`, or `self: Box`") + .emit(); + } else { + // report error, would not have worked with arbitrary_self_types + fcx.tcx.sess.diagnostic().mut_span_err( + span, &format!("invalid method receiver type: {:?}", receiver_ty) + ).note("type must be `Self` or a type that dereferences to it") + .help("consider changing to `self`, `&self`, `&mut self`, or `self: Box`") + .code(DiagnosticId::Error("E0307".into())) + .emit(); + } + } + } +} +/// returns true if `receiver_ty` would be considered a valid receiver type for `self_ty`. If +/// `arbitrary_self_types` is enabled, `receiver_ty` must transitively deref to `self_ty`, possibly +/// through a `*const/mut T` raw pointer. If the feature is not enabled, the requirements are more +/// strict: `receiver_ty` must implement `Receiver` and directly implement `Deref`. +/// +/// N.B., there are cases this function returns `true` but causes an error to be emitted, +/// particularly when `receiver_ty` derefs to a type that is the same as `self_ty` but has the +/// wrong lifetime. Be careful of this if you are calling this function speculatively. +fn receiver_is_valid<'fcx, 'tcx, 'gcx>( + fcx: &FnCtxt<'fcx, 'gcx, 'tcx>, + span: Span, + receiver_ty: Ty<'tcx>, + self_ty: Ty<'tcx>, + arbitrary_self_types_enabled: bool, +) -> bool { + let cause = fcx.cause(span, traits::ObligationCauseCode::MethodReceiver); + + let can_eq_self = |ty| fcx.infcx.can_eq(fcx.param_env, self_ty, ty).is_ok(); + + // `self: Self` is always valid + if can_eq_self(receiver_ty) { + if let Some(mut err) = fcx.demand_eqtype_with_origin(&cause, self_ty, receiver_ty) { + err.emit(); + } + return true + } + + let mut autoderef = fcx.autoderef(span, receiver_ty); + + // the `arbitrary_self_types` feature allows raw pointer receivers like `self: *const Self` + if arbitrary_self_types_enabled { + autoderef = autoderef.include_raw_pointers(); + } + + // the first type is `receiver_ty`, which we know its not equal to `self_ty`. skip it. + autoderef.next(); + + // keep dereferencing `receiver_ty` until we get to `self_ty` loop { if let Some((potential_self_ty, _)) = autoderef.next() { - debug!("check_method_receiver: potential self type `{:?}` to match `{:?}`", + debug!("receiver_is_valid: potential self type `{:?}` to match `{:?}`", potential_self_ty, self_ty); - if fcx.infcx.can_eq(fcx.param_env, self_ty, potential_self_ty).is_ok() { - autoderef.finalize(); + if can_eq_self(potential_self_ty) { + autoderef.finalize(fcx); + if let Some(mut err) = fcx.demand_eqtype_with_origin( - &cause, self_ty, potential_self_ty) { + &cause, self_ty, potential_self_ty + ) { err.emit(); } + break } } else { - fcx.tcx.sess.diagnostic().mut_span_err( - span, &format!("invalid `self` type: {:?}", self_arg_ty)) - .note(&format!("type must be `{:?}` or a type that dereferences to it", self_ty)) - .help("consider changing to `self`, `&self`, `&mut self`, or `self: Box`") - .code(DiagnosticId::Error("E0307".into())) - .emit(); - return + debug!("receiver_is_valid: type `{:?}` does not deref to `{:?}`", + receiver_ty, self_ty); + // If he receiver already has errors reported due to it, consider it valid to avoid + // unecessary errors (#58712). + return receiver_ty.references_error(); + } + + // without the `arbitrary_self_types` feature, `receiver_ty` must directly deref to + // `self_ty`. Enforce this by only doing one iteration of the loop + if !arbitrary_self_types_enabled { + return false } } - let is_self_ty = |ty| fcx.infcx.can_eq(fcx.param_env, self_ty, ty).is_ok(); - let self_kind = ExplicitSelf::determine(self_arg_ty, is_self_ty); + // without `feature(arbitrary_self_types)`, we require that `receiver_ty` implements `Receiver` + if !arbitrary_self_types_enabled { + let trait_def_id = match fcx.tcx.lang_items().receiver_trait() { + Some(did) => did, + None => { + debug!("receiver_is_valid: missing Receiver trait"); + return false + } + }; - if !fcx.tcx.features().arbitrary_self_types { - match self_kind { - ExplicitSelf::ByValue | - ExplicitSelf::ByReference(_, _) | - ExplicitSelf::ByBox => (), + let trait_ref = ty::TraitRef{ + def_id: trait_def_id, + substs: fcx.tcx.mk_substs_trait(receiver_ty, &[]), + }; - ExplicitSelf::ByRawPointer(_) => { - feature_gate::feature_err( - &fcx.tcx.sess.parse_sess, - "arbitrary_self_types", - span, - GateIssue::Language, - "raw pointer `self` is unstable") - .help("consider changing to `self`, `&self`, `&mut self`, or `self: Box`") - .emit(); - } + let obligation = traits::Obligation::new( + cause.clone(), + fcx.param_env, + trait_ref.to_predicate() + ); - ExplicitSelf::Other => { - feature_gate::feature_err( - &fcx.tcx.sess.parse_sess, - "arbitrary_self_types", - span, - GateIssue::Language,"arbitrary `self` types are unstable") - .help("consider changing to `self`, `&self`, `&mut self`, or `self: Box`") - .emit(); - } + if !fcx.predicate_must_hold_modulo_regions(&obligation) { + debug!("receiver_is_valid: type `{:?}` does not implement `Receiver` trait", + receiver_ty); + return false } } + + true } fn check_variances_for_type_defn<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item: &hir::Item, hir_generics: &hir::Generics) { - let item_def_id = tcx.hir().local_def_id(item.id); + let item_def_id = tcx.hir().local_def_id_from_hir_id(item.hir_id); let ty = tcx.type_of(item_def_id); if tcx.has_error_field(ty) { return; @@ -837,7 +938,7 @@ fn check_variances_for_type_defn<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, .map(|(index, _)| Parameter(index as u32)) .collect(); - identify_constrained_type_params(tcx, + identify_constrained_generic_params(tcx, &ty_predicates, None, &mut constrained_parameters); @@ -866,17 +967,19 @@ fn report_bivariance<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, if let Some(def_id) = suggested_marker_id { err.help(&format!("consider removing `{}` or using a marker such as `{}`", param_name, - tcx.item_path_str(def_id))); + tcx.def_path_str(def_id))); } err.emit(); } -fn reject_shadowing_parameters(tcx: TyCtxt, def_id: DefId) { +fn reject_shadowing_parameters(tcx: TyCtxt<'_, '_, '_>, def_id: DefId) { let generics = tcx.generics_of(def_id); let parent = tcx.generics_of(generics.parent.unwrap()); let impl_params: FxHashMap<_, _> = parent.params.iter().flat_map(|param| match param.kind { GenericParamDefKind::Lifetime => None, - GenericParamDefKind::Type {..} => Some((param.name, param.def_id)), + GenericParamDefKind::Type { .. } | GenericParamDefKind::Const => { + Some((param.name, param.def_id)) + } }).collect(); for method_param in &generics.params { @@ -897,18 +1000,16 @@ fn reject_shadowing_parameters(tcx: TyCtxt, def_id: DefId) { } } -/// Feature gates RFC 2056 - trivial bounds, checking for global bounds that +/// Feature gates RFC 2056 -- trivial bounds, checking for global bounds that /// aren't true. fn check_false_global_bounds<'a, 'gcx, 'tcx>( fcx: &FnCtxt<'a, 'gcx, 'tcx>, span: Span, - id: ast::NodeId) + id: hir::HirId) { - use rustc::ty::TypeFoldable; - let empty_env = ty::ParamEnv::empty(); - let def_id = fcx.tcx.hir().local_def_id(id); + let def_id = fcx.tcx.hir().local_def_id_from_hir_id(id); let predicates = fcx.tcx.predicates_of(def_id).predicates .iter() .map(|(p, _)| *p) @@ -949,30 +1050,23 @@ impl<'a, 'gcx> CheckTypeWellFormedVisitor<'a, 'gcx> { } } -impl<'a, 'tcx, 'v> Visitor<'v> for CheckTypeWellFormedVisitor<'a, 'tcx> { - fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> { - NestedVisitorMap::None - } - - fn visit_item(&mut self, i: &hir::Item) { +impl<'a, 'tcx> ParItemLikeVisitor<'tcx> for CheckTypeWellFormedVisitor<'a, 'tcx> { + fn visit_item(&self, i: &'tcx hir::Item) { debug!("visit_item: {:?}", i); - let def_id = self.tcx.hir().local_def_id(i.id); - ty::query::queries::check_item_well_formed::ensure(self.tcx, def_id); - intravisit::walk_item(self, i); + let def_id = self.tcx.hir().local_def_id_from_hir_id(i.hir_id); + self.tcx.ensure().check_item_well_formed(def_id); } - fn visit_trait_item(&mut self, trait_item: &'v hir::TraitItem) { + fn visit_trait_item(&self, trait_item: &'tcx hir::TraitItem) { debug!("visit_trait_item: {:?}", trait_item); - let def_id = self.tcx.hir().local_def_id(trait_item.id); - ty::query::queries::check_trait_item_well_formed::ensure(self.tcx, def_id); - intravisit::walk_trait_item(self, trait_item) + let def_id = self.tcx.hir().local_def_id_from_hir_id(trait_item.hir_id); + self.tcx.ensure().check_trait_item_well_formed(def_id); } - fn visit_impl_item(&mut self, impl_item: &'v hir::ImplItem) { + fn visit_impl_item(&self, impl_item: &'tcx hir::ImplItem) { debug!("visit_impl_item: {:?}", impl_item); - let def_id = self.tcx.hir().local_def_id(impl_item.id); - ty::query::queries::check_impl_item_well_formed::ensure(self.tcx, def_id); - intravisit::walk_impl_item(self, impl_item) + let def_id = self.tcx.hir().local_def_id_from_hir_id(impl_item.hir_id); + self.tcx.ensure().check_impl_item_well_formed(def_id); } } @@ -991,7 +1085,7 @@ struct AdtField<'tcx> { impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { fn non_enum_variant(&self, struct_def: &hir::VariantData) -> AdtVariant<'tcx> { let fields = struct_def.fields().iter().map(|field| { - let field_ty = self.tcx.type_of(self.tcx.hir().local_def_id(field.id)); + let field_ty = self.tcx.type_of(self.tcx.hir().local_def_id_from_hir_id(field.hir_id)); let field_ty = self.normalize_associated_types_in(field.span, &field_ty); AdtField { ty: field_ty, span: field.span } @@ -1033,7 +1127,7 @@ fn error_392<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, span: Span, param_name: ast: err } -fn error_194(tcx: TyCtxt, span: Span, trait_decl_span: Span, name: &str) { +fn error_194(tcx: TyCtxt<'_, '_, '_>, span: Span, trait_decl_span: Span, name: &str) { struct_span_err!(tcx.sess, span, E0194, "type parameter `{}` shadows another type parameter of the same name", name) diff --git a/src/librustc_typeck/check/writeback.rs b/src/librustc_typeck/check/writeback.rs index 8d7fc008fb0d0..c2404917fa7a7 100644 --- a/src/librustc_typeck/check/writeback.rs +++ b/src/librustc_typeck/check/writeback.rs @@ -1,18 +1,9 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // Type resolution: the phase that finds all the types in the AST with // unresolved type variables and replaces "ty_var" types with their // substitutions. -use check::FnCtxt; +use crate::check::FnCtxt; +use errors::DiagnosticBuilder; use rustc::hir; use rustc::hir::def_id::{DefId, DefIndex}; use rustc::hir::intravisit::{self, NestedVisitorMap, Visitor}; @@ -24,12 +15,20 @@ use rustc::ty::{self, Ty, TyCtxt}; use rustc::util::nodemap::DefIdSet; use rustc_data_structures::sync::Lrc; use std::mem; -use syntax::ast; use syntax_pos::Span; /////////////////////////////////////////////////////////////////////////// // Entry point +// During type inference, partially inferred types are +// represented using Type variables (ty::Infer). These don't appear in +// the final TypeckTables since all of the types should have been +// inferred once typeck_tables_of is done. +// When type inference is running however, having to update the typeck +// tables every time a new type is inferred would be unreasonably slow, +// so instead all of the replacement happens at the end in +// resolve_type_vars_in_body, which creates a new TypeTables which +// doesn't contain any inference types. impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { pub fn resolve_type_vars_in_body(&self, body: &'gcx hir::Body) -> &'gcx ty::TypeckTables<'gcx> { let item_id = self.tcx.hir().body_owner(body.id()); @@ -44,12 +43,13 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> { wbcx.visit_node_id(arg.pat.span, arg.hir_id); } wbcx.visit_body(body); - wbcx.visit_upvar_borrow_map(); + wbcx.visit_upvar_capture_map(); + wbcx.visit_upvar_list_map(); wbcx.visit_closures(); wbcx.visit_liberated_fn_sigs(); wbcx.visit_fru_field_types(); wbcx.visit_opaque_types(body.value.span); - wbcx.visit_cast_types(); + wbcx.visit_coercion_casts(); wbcx.visit_free_region_map(); wbcx.visit_user_provided_tys(); wbcx.visit_user_provided_sigs(); @@ -99,7 +99,7 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { body: &'gcx hir::Body, rustc_dump_user_substs: bool, ) -> WritebackCx<'cx, 'gcx, 'tcx> { - let owner = fcx.tcx.hir().definitions().node_to_hir_id(body.id().node_id); + let owner = body.id().hir_id; WritebackCx { fcx, @@ -242,11 +242,11 @@ impl<'cx, 'gcx, 'tcx> Visitor<'gcx> for WritebackCx<'cx, 'gcx, 'tcx> { } hir::ExprKind::Struct(_, ref fields, _) => { for field in fields { - self.visit_field_id(field.id); + self.visit_field_id(field.hir_id); } } hir::ExprKind::Field(..) => { - self.visit_field_id(e.id); + self.visit_field_id(e.hir_id); } _ => {} } @@ -272,7 +272,7 @@ impl<'cx, 'gcx, 'tcx> Visitor<'gcx> for WritebackCx<'cx, 'gcx, 'tcx> { } hir::PatKind::Struct(_, ref fields, _) => { for field in fields { - self.visit_field_id(field.node.id); + self.visit_field_id(field.node.hir_id); } } _ => {} @@ -286,7 +286,7 @@ impl<'cx, 'gcx, 'tcx> Visitor<'gcx> for WritebackCx<'cx, 'gcx, 'tcx> { fn visit_local(&mut self, l: &'gcx hir::Local) { intravisit::walk_local(self, l); - let var_ty = self.fcx.local_ty(l.span, l.id).decl_ty; + let var_ty = self.fcx.local_ty(l.span, l.hir_id).decl_ty; let var_ty = self.resolve(&var_ty, &l.span); self.write_ty_to_tables(l.hir_id, var_ty); } @@ -300,7 +300,7 @@ impl<'cx, 'gcx, 'tcx> Visitor<'gcx> for WritebackCx<'cx, 'gcx, 'tcx> { } impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { - fn visit_upvar_borrow_map(&mut self) { + fn visit_upvar_capture_map(&mut self) { for (upvar_id, upvar_capture) in self.fcx.tables.borrow().upvar_capture_map.iter() { let new_upvar_capture = match *upvar_capture { ty::UpvarCapture::ByValue => ty::UpvarCapture::ByValue, @@ -323,6 +323,21 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { } } + /// Runs through the function context's upvar list map and adds the same to + /// the TypeckTables. upvarlist is a hashmap of the list of upvars referred + /// to in a closure.. + fn visit_upvar_list_map(&mut self) { + for (closure_def_id, upvar_list) in self.fcx.tables.borrow().upvar_list.iter() { + debug!( + "UpvarIDs captured by closure {:?} are: {:?}", + closure_def_id, upvar_list + ); + self.tables + .upvar_list + .insert(*closure_def_id, upvar_list.to_vec()); + } + } + fn visit_closures(&mut self) { let fcx_tables = self.fcx.tables.borrow(); debug_assert_eq!(fcx_tables.local_id_root, self.tables.local_id_root); @@ -339,19 +354,13 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { } } - fn visit_cast_types(&mut self) { + fn visit_coercion_casts(&mut self) { let fcx_tables = self.fcx.tables.borrow(); - let fcx_cast_kinds = fcx_tables.cast_kinds(); + let fcx_coercion_casts = fcx_tables.coercion_casts(); debug_assert_eq!(fcx_tables.local_id_root, self.tables.local_id_root); - let mut self_cast_kinds = self.tables.cast_kinds_mut(); - let common_local_id_root = fcx_tables.local_id_root.unwrap(); - for (&local_id, &cast_kind) in fcx_cast_kinds.iter() { - let hir_id = hir::HirId { - owner: common_local_id_root.index, - local_id, - }; - self_cast_kinds.insert(hir_id, cast_kind); + for local_id in fcx_coercion_casts { + self.tables.set_coercion_cast(*local_id); } } @@ -367,7 +376,8 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { debug_assert_eq!(fcx_tables.local_id_root, self.tables.local_id_root); let common_local_id_root = fcx_tables.local_id_root.unwrap(); - for (&local_id, c_ty) in fcx_tables.user_provided_tys().iter() { + let mut errors_buffer = Vec::new(); + for (&local_id, c_ty) in fcx_tables.user_provided_types().iter() { let hir_id = hir::HirId { owner: common_local_id_root.index, local_id, @@ -384,8 +394,29 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { }; self.tables - .user_provided_tys_mut() + .user_provided_types_mut() .insert(hir_id, c_ty.clone()); + + if let ty::UserType::TypeOf(_, user_substs) = c_ty.value { + if self.rustc_dump_user_substs { + // This is a unit-testing mechanism. + let span = self.tcx().hir().span_by_hir_id(hir_id); + // We need to buffer the errors in order to guarantee a consistent + // order when emitting them. + let err = self.tcx().sess.struct_span_err( + span, + &format!("user substs: {:?}", user_substs) + ); + err.buffer(&mut errors_buffer); + } + } + } + + if !errors_buffer.is_empty() { + errors_buffer.sort_by_key(|diag| diag.span.primary_span()); + for diag in errors_buffer.drain(..) { + DiagnosticBuilder::new_diagnostic(self.tcx().sess.diagnostic(), diag).emit(); + } } } @@ -412,8 +443,8 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { fn visit_opaque_types(&mut self, span: Span) { for (&def_id, opaque_defn) in self.fcx.opaque_types.borrow().iter() { - let node_id = self.tcx().hir().as_local_node_id(def_id).unwrap(); - let instantiated_ty = self.resolve(&opaque_defn.concrete_ty, &node_id); + let hir_id = self.tcx().hir().as_local_hir_id(def_id).unwrap(); + let instantiated_ty = self.resolve(&opaque_defn.concrete_ty, &hir_id); let generics = self.tcx().generics_of(def_id); @@ -440,7 +471,7 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { instantiated_ty.fold_with(&mut BottomUpFolder { tcx: self.tcx().global_tcx(), fldop: |ty| { - trace!("checking type {:?}: {:#?}", ty, ty.sty); + trace!("checking type {:?}", ty); // find a type parameter if let ty::Param(..) = ty.sty { // look it up in the substitution list @@ -522,21 +553,29 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { if def_id == defin_ty_def_id { // Concrete type resolved to the existential type itself // Force a cycle error + // FIXME(oli-obk): we could just not insert it into `concrete_existential_types` + // which simply would make this use not a defining use. self.tcx().at(span).type_of(defin_ty_def_id); } } + let new = ty::ResolvedOpaqueTy { + concrete_type: definition_ty, + substs: self.tcx().lift_to_global(&opaque_defn.substs).unwrap(), + }; + let old = self.tables .concrete_existential_types - .insert(def_id, definition_ty); + .insert(def_id, new); if let Some(old) = old { - if old != definition_ty { + if old.concrete_type != definition_ty || old.substs != opaque_defn.substs { span_bug!( span, "visit_opaque_types tried to write \ - different types for the same existential type: {:?}, {:?}, {:?}", + different types for the same existential type: {:?}, {:?}, {:?}, {:?}", def_id, definition_ty, + opaque_defn, old, ); } @@ -544,8 +583,7 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { } } - fn visit_field_id(&mut self, node_id: ast::NodeId) { - let hir_id = self.tcx().hir().node_to_hir_id(node_id); + fn visit_field_id(&mut self, hir_id: hir::HirId) { if let Some(index) = self.fcx .tables .borrow_mut() @@ -583,22 +621,6 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { assert!(!substs.needs_infer() && !substs.has_placeholders()); self.tables.node_substs_mut().insert(hir_id, substs); } - - // Copy over any user-substs - if let Some(user_substs) = self.fcx.tables.borrow().user_substs(hir_id) { - let user_substs = self.tcx().lift_to_global(&user_substs).unwrap(); - self.tables.user_substs_mut().insert(hir_id, user_substs); - - // Unit-testing mechanism: - if self.rustc_dump_user_substs { - let node_id = self.tcx().hir().hir_to_node_id(hir_id); - let span = self.tcx().hir().span(node_id); - self.tcx().sess.span_err( - span, - &format!("user substs: {:?}", user_substs), - ); - } - } } fn visit_adjustments(&mut self, span: Span, hir_id: hir::HirId) { @@ -699,32 +721,25 @@ impl<'cx, 'gcx, 'tcx> WritebackCx<'cx, 'gcx, 'tcx> { } trait Locatable { - fn to_span(&self, tcx: &TyCtxt) -> Span; + fn to_span(&self, tcx: &TyCtxt<'_, '_, '_>) -> Span; } impl Locatable for Span { - fn to_span(&self, _: &TyCtxt) -> Span { + fn to_span(&self, _: &TyCtxt<'_, '_, '_>) -> Span { *self } } -impl Locatable for ast::NodeId { - fn to_span(&self, tcx: &TyCtxt) -> Span { - tcx.hir().span(*self) - } -} - impl Locatable for DefIndex { - fn to_span(&self, tcx: &TyCtxt) -> Span { - let node_id = tcx.hir().def_index_to_node_id(*self); - tcx.hir().span(node_id) + fn to_span(&self, tcx: &TyCtxt<'_, '_, '_>) -> Span { + let hir_id = tcx.hir().def_index_to_hir_id(*self); + tcx.hir().span_by_hir_id(hir_id) } } impl Locatable for hir::HirId { - fn to_span(&self, tcx: &TyCtxt) -> Span { - let node_id = tcx.hir().hir_to_node_id(*self); - tcx.hir().span(node_id) + fn to_span(&self, tcx: &TyCtxt<'_, '_, '_>) -> Span { + tcx.hir().span_by_hir_id(*self) } } diff --git a/src/librustc_typeck/check_unused.rs b/src/librustc_typeck/check_unused.rs index e6171e9da3bd5..cbb6d9b29f59f 100644 --- a/src/librustc_typeck/check_unused.rs +++ b/src/librustc_typeck/check_unused.rs @@ -1,14 +1,4 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use lint; +use crate::lint; use rustc::ty::TyCtxt; use errors::Applicability; @@ -44,7 +34,7 @@ impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for CheckVisitor<'a, 'tcx> { return; } if let hir::ItemKind::Use(ref path, _) = item.node { - self.check_import(item.id, path.span); + self.check_import(item.hir_id, path.span); } } @@ -61,14 +51,13 @@ struct CheckVisitor<'a, 'tcx: 'a> { } impl<'a, 'tcx> CheckVisitor<'a, 'tcx> { - fn check_import(&self, id: ast::NodeId, span: Span) { - let def_id = self.tcx.hir().local_def_id(id); + fn check_import(&self, id: hir::HirId, span: Span) { + let def_id = self.tcx.hir().local_def_id_from_hir_id(id); if !self.tcx.maybe_unused_trait_import(def_id) { return; } - let import_def_id = self.tcx.hir().local_def_id(id); - if self.used_trait_imports.contains(&import_def_id) { + if self.used_trait_imports.contains(&def_id) { return; } @@ -77,7 +66,7 @@ impl<'a, 'tcx> CheckVisitor<'a, 'tcx> { } else { "unused import".to_owned() }; - self.tcx.lint_node(lint::builtin::UNUSED_IMPORTS, id, span, &msg); + self.tcx.lint_hir(lint::builtin::UNUSED_IMPORTS, id, span, &msg); } } @@ -105,8 +94,8 @@ fn unused_crates_lint<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>) { // Note that if we carry through to the `extern_mod_stmt_cnum` query // below it'll cause a panic because `def_id` is actually bogus at this // point in time otherwise. - if let Some(id) = tcx.hir().as_local_node_id(def_id) { - if tcx.hir().find(id).is_none() { + if let Some(id) = tcx.hir().as_local_hir_id(def_id) { + if tcx.hir().find_by_hir_id(id).is_none() { return false; } } @@ -131,8 +120,8 @@ fn unused_crates_lint<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>) { }); for extern_crate in &crates_to_lint { - let id = tcx.hir().as_local_node_id(extern_crate.def_id).unwrap(); - let item = tcx.hir().expect_item(id); + let id = tcx.hir().as_local_hir_id(extern_crate.def_id).unwrap(); + let item = tcx.hir().expect_item_by_hir_id(id); // If the crate is fully unused, we suggest removing it altogether. // We do this in any edition. @@ -145,8 +134,8 @@ fn unused_crates_lint<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>) { .map(|attr| attr.span) .fold(span, |acc, attr_span| acc.to(attr_span)); - tcx.struct_span_lint_node(lint, id, span, msg) - .span_suggestion_short_with_applicability( + tcx.struct_span_lint_hir(lint, id, span, msg) + .span_suggestion_short( span_with_attrs, "remove it", String::new(), @@ -164,7 +153,7 @@ fn unused_crates_lint<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>) { // If the extern crate isn't in the extern prelude, // there is no way it can be written as an `use`. - let orig_name = extern_crate.orig_name.unwrap_or(item.name); + let orig_name = extern_crate.orig_name.unwrap_or(item.ident.name); if !tcx.extern_prelude.get(&orig_name).map_or(false, |from_item| !from_item) { continue; } @@ -183,12 +172,12 @@ fn unused_crates_lint<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>) { visibility_qualified(&item.vis, "use") ); let base_replacement = match extern_crate.orig_name { - Some(orig_name) => format!("use {} as {};", orig_name, item.name), - None => format!("use {};", item.name), + Some(orig_name) => format!("use {} as {};", orig_name, item.ident.name), + None => format!("use {};", item.ident.name), }; let replacement = visibility_qualified(&item.vis, base_replacement); - tcx.struct_span_lint_node(lint, id, extern_crate.span, msg) - .span_suggestion_short_with_applicability( + tcx.struct_span_lint_hir(lint, id, extern_crate.span, msg) + .span_suggestion_short( extern_crate.span, &help, replacement, @@ -204,7 +193,7 @@ struct CollectExternCrateVisitor<'a, 'tcx: 'a> { } struct ExternCrateToLint { - /// def-id of the extern crate + /// `DefId` of the extern crate def_id: DefId, /// span from the item @@ -223,13 +212,13 @@ struct ExternCrateToLint { impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for CollectExternCrateVisitor<'a, 'tcx> { fn visit_item(&mut self, item: &hir::Item) { if let hir::ItemKind::ExternCrate(orig_name) = item.node { - let extern_crate_def_id = self.tcx.hir().local_def_id(item.id); + let extern_crate_def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); self.crates_to_lint.push( ExternCrateToLint { def_id: extern_crate_def_id, span: item.span, orig_name, - warn_if_unused: !item.name.as_str().starts_with('_'), + warn_if_unused: !item.ident.as_str().starts_with('_'), } ); } diff --git a/src/librustc_typeck/coherence/builtin.rs b/src/librustc_typeck/coherence/builtin.rs index 2d5dcf0ec1b6d..c3b08569d2f1f 100644 --- a/src/librustc_typeck/coherence/builtin.rs +++ b/src/librustc_typeck/coherence/builtin.rs @@ -1,13 +1,3 @@ -// Copyright 2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Check properties that are required by built-in traits and set //! up data structures required by type-checking/codegen. @@ -47,7 +37,7 @@ impl<'a, 'tcx> Checker<'a, 'tcx> { { if Some(self.trait_def_id) == trait_def_id { for &impl_id in self.tcx.hir().trait_impls(self.trait_def_id) { - let impl_def_id = self.tcx.hir().local_def_id(impl_id); + let impl_def_id = self.tcx.hir().local_def_id_from_hir_id(impl_id); f(self.tcx, impl_def_id); } } @@ -60,8 +50,8 @@ fn visit_implementation_of_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_did: /* do nothing */ } else { // Destructors only work on nominal types. - if let Some(impl_node_id) = tcx.hir().as_local_node_id(impl_did) { - if let Some(Node::Item(item)) = tcx.hir().find(impl_node_id) { + if let Some(impl_hir_id) = tcx.hir().as_local_hir_id(impl_did) { + if let Some(Node::Item(item)) = tcx.hir().find_by_hir_id(impl_hir_id) { let span = match item.node { ItemKind::Impl(.., ref ty, _) => ty.span, _ => item.span, @@ -86,7 +76,7 @@ fn visit_implementation_of_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_did: fn visit_implementation_of_copy<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_did: DefId) { debug!("visit_implementation_of_copy: impl_did={:?}", impl_did); - let impl_node_id = if let Some(n) = tcx.hir().as_local_node_id(impl_did) { + let impl_hir_id = if let Some(n) = tcx.hir().as_local_hir_id(impl_did) { n } else { debug!("visit_implementation_of_copy(): impl not in this crate"); @@ -97,7 +87,7 @@ fn visit_implementation_of_copy<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_did: debug!("visit_implementation_of_copy: self_type={:?} (bound)", self_type); - let span = tcx.hir().span(impl_node_id); + let span = tcx.hir().span_by_hir_id(impl_hir_id); let param_env = tcx.param_env(impl_did); assert!(!self_type.has_escaping_bound_vars()); @@ -107,7 +97,7 @@ fn visit_implementation_of_copy<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_did: match param_env.can_type_implement_copy(tcx, self_type) { Ok(()) => {} Err(CopyImplementationError::InfrigingFields(fields)) => { - let item = tcx.hir().expect_item(impl_node_id); + let item = tcx.hir().expect_item_by_hir_id(impl_hir_id); let span = if let ItemKind::Impl(.., Some(ref tr), _, _) = item.node { tr.path.span } else { @@ -124,7 +114,7 @@ fn visit_implementation_of_copy<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_did: err.emit() } Err(CopyImplementationError::NotAnAdt) => { - let item = tcx.hir().expect_item(impl_node_id); + let item = tcx.hir().expect_item_by_hir_id(impl_hir_id); let span = if let ItemKind::Impl(.., ref ty, _) = item.node { ty.span } else { @@ -172,8 +162,8 @@ fn visit_implementation_of_dispatch_from_dyn<'a, 'tcx>( if impl_did.is_local() { let dispatch_from_dyn_trait = tcx.lang_items().dispatch_from_dyn_trait().unwrap(); - let impl_node_id = tcx.hir().as_local_node_id(impl_did).unwrap(); - let span = tcx.hir().span(impl_node_id); + let impl_hir_id = tcx.hir().as_local_hir_id(impl_did).unwrap(); + let span = tcx.hir().span_by_hir_id(impl_hir_id); let source = tcx.type_of(impl_did); assert!(!source.has_escaping_bound_vars()); @@ -195,7 +185,7 @@ fn visit_implementation_of_dispatch_from_dyn<'a, 'tcx>( }; tcx.infer_ctxt().enter(|infcx| { - let cause = ObligationCause::misc(span, impl_node_id); + let cause = ObligationCause::misc(span, impl_hir_id); use ty::TyKind::*; match (&source.sty, &target.sty) { @@ -208,8 +198,8 @@ fn visit_implementation_of_dispatch_from_dyn<'a, 'tcx>( if def_a.is_struct() && def_b.is_struct() => { if def_a != def_b { - let source_path = tcx.item_path_str(def_a.did); - let target_path = tcx.item_path_str(def_b.did); + let source_path = tcx.def_path_str(def_a.did); + let target_path = tcx.def_path_str(def_b.did); create_err( &format!( @@ -342,7 +332,7 @@ pub fn coerce_unsized_info<'a, 'gcx>(gcx: TyCtxt<'a, 'gcx, 'gcx>, }); // this provider should only get invoked for local def-ids - let impl_node_id = gcx.hir().as_local_node_id(impl_did).unwrap_or_else(|| { + let impl_hir_id = gcx.hir().as_local_hir_id(impl_did).unwrap_or_else(|| { bug!("coerce_unsized_info: invoked for non-local def-id {:?}", impl_did) }); @@ -354,7 +344,7 @@ pub fn coerce_unsized_info<'a, 'gcx>(gcx: TyCtxt<'a, 'gcx, 'gcx>, source, target); - let span = gcx.hir().span(impl_node_id); + let span = gcx.hir().span_by_hir_id(impl_hir_id); let param_env = gcx.param_env(impl_did); assert!(!source.has_escaping_bound_vars()); @@ -365,7 +355,7 @@ pub fn coerce_unsized_info<'a, 'gcx>(gcx: TyCtxt<'a, 'gcx, 'gcx>, target); gcx.infer_ctxt().enter(|infcx| { - let cause = ObligationCause::misc(span, impl_node_id); + let cause = ObligationCause::misc(span, impl_hir_id); let check_mutbl = |mt_a: ty::TypeAndMut<'gcx>, mt_b: ty::TypeAndMut<'gcx>, mk_ptr: &dyn Fn(Ty<'gcx>) -> Ty<'gcx>| { @@ -398,8 +388,8 @@ pub fn coerce_unsized_info<'a, 'gcx>(gcx: TyCtxt<'a, 'gcx, 'gcx>, (&ty::Adt(def_a, substs_a), &ty::Adt(def_b, substs_b)) if def_a.is_struct() && def_b.is_struct() => { if def_a != def_b { - let source_path = gcx.item_path_str(def_a.did); - let target_path = gcx.item_path_str(def_b.did); + let source_path = gcx.def_path_str(def_a.did); + let target_path = gcx.def_path_str(def_b.did); span_err!(gcx.sess, span, E0377, @@ -491,11 +481,11 @@ pub fn coerce_unsized_info<'a, 'gcx>(gcx: TyCtxt<'a, 'gcx, 'gcx>, being coerced, none found"); return err_info; } else if diff_fields.len() > 1 { - let item = gcx.hir().expect_item(impl_node_id); + let item = gcx.hir().expect_item_by_hir_id(impl_hir_id); let span = if let ItemKind::Impl(.., Some(ref t), _, _) = item.node { t.path.span } else { - gcx.hir().span(impl_node_id) + gcx.hir().span_by_hir_id(impl_hir_id) }; let mut err = struct_span_err!(gcx.sess, @@ -537,7 +527,7 @@ pub fn coerce_unsized_info<'a, 'gcx>(gcx: TyCtxt<'a, 'gcx, 'gcx>, let mut fulfill_cx = TraitEngine::new(infcx.tcx); // Register an obligation for `A: Trait`. - let cause = traits::ObligationCause::misc(span, impl_node_id); + let cause = traits::ObligationCause::misc(span, impl_hir_id); let predicate = gcx.predicate_for_trait_def(param_env, cause, trait_def_id, diff --git a/src/librustc_typeck/coherence/inherent_impls.rs b/src/librustc_typeck/coherence/inherent_impls.rs index 59989a65d4f91..d167c7fcafbe4 100644 --- a/src/librustc_typeck/coherence/inherent_impls.rs +++ b/src/librustc_typeck/coherence/inherent_impls.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! The code in this module gathers up all of the inherent impls in //! the current crate and organizes them in a map. It winds up //! touching the whole crate and thus must be recomputed completely @@ -95,7 +85,7 @@ impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for InherentCollect<'a, 'tcx> { _ => return }; - let def_id = self.tcx.hir().local_def_id(item.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); let self_ty = self.tcx.type_of(def_id); let lang_items = self.tcx.lang_items(); match self_ty.sty { @@ -105,8 +95,8 @@ impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for InherentCollect<'a, 'tcx> { ty::Foreign(did) => { self.check_def_id(item, did); } - ty::Dynamic(ref data, ..) => { - self.check_def_id(item, data.principal().def_id()); + ty::Dynamic(ref data, ..) if data.principal_def_id().is_some() => { + self.check_def_id(item, data.principal_def_id().unwrap()); } ty::Char => { self.check_primitive_impl(def_id, @@ -298,7 +288,7 @@ impl<'a, 'tcx> InherentCollect<'a, 'tcx> { // Add the implementation to the mapping from implementation to base // type def ID, if there is a base type for this implementation and // the implementation does not have any associated traits. - let impl_def_id = self.tcx.hir().local_def_id(item.id); + let impl_def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); let mut rc_vec = self.impls_map.inherent_impls .entry(def_id) .or_default(); diff --git a/src/librustc_typeck/coherence/inherent_impls_overlap.rs b/src/librustc_typeck/coherence/inherent_impls_overlap.rs index c273c8f645677..d0156db32e948 100644 --- a/src/librustc_typeck/coherence/inherent_impls_overlap.rs +++ b/src/librustc_typeck/coherence/inherent_impls_overlap.rs @@ -1,22 +1,11 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use namespace::Namespace; +use crate::namespace::Namespace; use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; use rustc::hir; use rustc::hir::itemlikevisit::ItemLikeVisitor; -use rustc::traits::{self, IntercrateMode, FutureCompatOverlapErrorKind}; +use rustc::traits::{self, IntercrateMode}; use rustc::ty::TyCtxt; -use rustc::ty::relate::TraitObjectMode; -use lint; +use crate::lint; pub fn crate_inherent_impls_overlap_check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) { @@ -30,15 +19,13 @@ struct InherentOverlapChecker<'a, 'tcx: 'a> { } impl<'a, 'tcx> InherentOverlapChecker<'a, 'tcx> { - fn check_for_common_items_in_impls( - &self, impl1: DefId, impl2: DefId, - overlap: traits::OverlapResult, - used_to_be_allowed: Option) - { + fn check_for_common_items_in_impls(&self, impl1: DefId, impl2: DefId, + overlap: traits::OverlapResult<'_>, + used_to_be_allowed: bool) { let name_and_namespace = |def_id| { let item = self.tcx.associated_item(def_id); - (item.ident, Namespace::from(item.kind)) + (item.ident.modern(), Namespace::from(item.kind)) }; let impl_items1 = self.tcx.associated_item_def_ids(impl1); @@ -49,29 +36,20 @@ impl<'a, 'tcx> InherentOverlapChecker<'a, 'tcx> { for &item2 in &impl_items2[..] { if (name, namespace) == name_and_namespace(item2) { - let node_id = self.tcx.hir().as_local_node_id(impl1); - let mut err = match used_to_be_allowed { - Some(kind) if node_id.is_some() => { - let lint = match kind { - FutureCompatOverlapErrorKind::Issue43355 => - lint::builtin::INCOHERENT_FUNDAMENTAL_IMPLS, - FutureCompatOverlapErrorKind::Issue33140 => - lint::builtin::ORDER_DEPENDENT_TRAIT_OBJECTS, - }; - self.tcx.struct_span_lint_node( - lint, - node_id.unwrap(), - self.tcx.span_of_impl(item1).unwrap(), - &format!("duplicate definitions with name `{}` (E0592)", name) - ) - } - _ => { - struct_span_err!(self.tcx.sess, - self.tcx.span_of_impl(item1).unwrap(), - E0592, - "duplicate definitions with name `{}`", - name) - } + let hir_id = self.tcx.hir().as_local_hir_id(impl1); + let mut err = if used_to_be_allowed && hir_id.is_some() { + self.tcx.struct_span_lint_hir( + lint::builtin::INCOHERENT_FUNDAMENTAL_IMPLS, + hir_id.unwrap(), + self.tcx.span_of_impl(item1).unwrap(), + &format!("duplicate definitions with name `{}` (E0592)", name) + ) + } else { + struct_span_err!(self.tcx.sess, + self.tcx.span_of_impl(item1).unwrap(), + E0592, + "duplicate definitions with name `{}`", + name) }; err.span_label(self.tcx.span_of_impl(item1).unwrap(), @@ -83,6 +61,10 @@ impl<'a, 'tcx> InherentOverlapChecker<'a, 'tcx> { cause.add_intercrate_ambiguity_hint(&mut err); } + if overlap.involves_placeholder { + traits::add_placeholder_note(&mut err); + } + err.emit(); } } @@ -94,73 +76,38 @@ impl<'a, 'tcx> InherentOverlapChecker<'a, 'tcx> { for (i, &impl1_def_id) in impls.iter().enumerate() { for &impl2_def_id in &impls[(i + 1)..] { - // First, check if the impl was forbidden under the - // old rules. In that case, just have an error. let used_to_be_allowed = traits::overlapping_impls( self.tcx, impl1_def_id, impl2_def_id, IntercrateMode::Issue43355, - TraitObjectMode::NoSquash, |overlap| { self.check_for_common_items_in_impls( impl1_def_id, impl2_def_id, overlap, - None, + false, ); false }, || true, ); - if !used_to_be_allowed { - continue; - } - - // Then, check if the impl was forbidden under only - // #43355. In that case, emit an #43355 error. - let used_to_be_allowed = traits::overlapping_impls( - self.tcx, - impl1_def_id, - impl2_def_id, - IntercrateMode::Fixed, - TraitObjectMode::NoSquash, - |overlap| { - self.check_for_common_items_in_impls( + if used_to_be_allowed { + traits::overlapping_impls( + self.tcx, + impl1_def_id, + impl2_def_id, + IntercrateMode::Fixed, + |overlap| self.check_for_common_items_in_impls( impl1_def_id, impl2_def_id, overlap, - Some(FutureCompatOverlapErrorKind::Issue43355), - ); - false - }, - || true, - ); - - if !used_to_be_allowed { - continue; + true, + ), + || (), + ); } - - // Then, check if the impl was forbidden under - // #33140. In that case, emit a #33140 error. - traits::overlapping_impls( - self.tcx, - impl1_def_id, - impl2_def_id, - IntercrateMode::Fixed, - TraitObjectMode::SquashAutoTraitsIssue33140, - |overlap| { - self.check_for_common_items_in_impls( - impl1_def_id, - impl2_def_id, - overlap, - Some(FutureCompatOverlapErrorKind::Issue33140), - ); - false - }, - || true, - ); } } } @@ -173,7 +120,7 @@ impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for InherentOverlapChecker<'a, 'tcx> { hir::ItemKind::Struct(..) | hir::ItemKind::Trait(..) | hir::ItemKind::Union(..) => { - let type_def_id = self.tcx.hir().local_def_id(item.id); + let type_def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); self.check_for_overlapping_inherent_impls(type_def_id); } _ => {} diff --git a/src/librustc_typeck/coherence/mod.rs b/src/librustc_typeck/coherence/mod.rs index 0360617be3c49..40f01ebb3c19f 100644 --- a/src/librustc_typeck/coherence/mod.rs +++ b/src/librustc_typeck/coherence/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // Coherence phase // // The job of the coherence phase of typechecking is to ensure that @@ -15,12 +5,12 @@ // done by the orphan and overlap modules. Then we build up various // mappings. That mapping code resides here. -use hir::def_id::{DefId, LOCAL_CRATE}; +use crate::hir::HirId; +use crate::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::traits; use rustc::ty::{self, TyCtxt, TypeFoldable}; use rustc::ty::query::Providers; - -use syntax::ast; +use rustc::util::common::time; mod builtin; mod inherent_impls; @@ -28,8 +18,8 @@ mod inherent_impls_overlap; mod orphan; mod unsafety; -fn check_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, node_id: ast::NodeId) { - let impl_def_id = tcx.hir().local_def_id(node_id); +fn check_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, hir_id: HirId) { + let impl_def_id = tcx.hir().local_def_id_from_hir_id(hir_id); // If there are no traits, then this implementation must have a // base type. @@ -37,7 +27,7 @@ fn check_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, node_id: ast::NodeId) { if let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) { debug!("(checking implementation) adding impl for trait '{:?}', item '{}'", trait_ref, - tcx.item_path_str(impl_def_id)); + tcx.def_path_str(impl_def_id)); // Skip impls where one of the self type is an error type. // This occurs with e.g., resolve failures (#30589). @@ -50,7 +40,11 @@ fn check_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, node_id: ast::NodeId) { } } -fn enforce_trait_manually_implementable(tcx: TyCtxt, impl_def_id: DefId, trait_def_id: DefId) { +fn enforce_trait_manually_implementable( + tcx: TyCtxt<'_, '_, '_>, + impl_def_id: DefId, + trait_def_id: DefId +) { let did = Some(trait_def_id); let li = tcx.lang_items(); let span = tcx.sess.source_map().def_span(tcx.span_of_impl(impl_def_id).unwrap()); @@ -102,7 +96,11 @@ fn enforce_trait_manually_implementable(tcx: TyCtxt, impl_def_id: DefId, trait_d /// We allow impls of marker traits to overlap, so they can't override impls /// as that could make it ambiguous which associated item to use. -fn enforce_empty_impls_for_marker_traits(tcx: TyCtxt, impl_def_id: DefId, trait_def_id: DefId) { +fn enforce_empty_impls_for_marker_traits( + tcx: TyCtxt<'_, '_, '_>, + impl_def_id: DefId, + trait_def_id: DefId +) { if !tcx.trait_def(trait_def_id).is_marker { return; } @@ -119,7 +117,7 @@ fn enforce_empty_impls_for_marker_traits(tcx: TyCtxt, impl_def_id: DefId, trait_ .emit(); } -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { use self::builtin::coerce_unsized_info; use self::inherent_impls::{crate_inherent_impls, inherent_impls}; use self::inherent_impls_overlap::crate_inherent_impls_overlap_check; @@ -147,22 +145,22 @@ fn coherent_trait<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) { pub fn check_coherence<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { for &trait_def_id in tcx.hir().krate().trait_impls.keys() { - ty::query::queries::coherent_trait::ensure(tcx, trait_def_id); + tcx.ensure().coherent_trait(trait_def_id); } - unsafety::check(tcx); - orphan::check(tcx); + time(tcx.sess, "unsafety checking", || unsafety::check(tcx)); + time(tcx.sess, "orphan checking", || orphan::check(tcx)); // these queries are executed for side-effects (error reporting): - ty::query::queries::crate_inherent_impls::ensure(tcx, LOCAL_CRATE); - ty::query::queries::crate_inherent_impls_overlap_check::ensure(tcx, LOCAL_CRATE); + tcx.ensure().crate_inherent_impls(LOCAL_CRATE); + tcx.ensure().crate_inherent_impls_overlap_check(LOCAL_CRATE); } -/// Overlap: No two impls for the same trait are implemented for the +/// Overlap: no two impls for the same trait are implemented for the /// same type. Likewise, no two inherent impls for a given type /// constructor provide a method with the same name. -fn check_impl_overlap<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, node_id: ast::NodeId) { - let impl_def_id = tcx.hir().local_def_id(node_id); +fn check_impl_overlap<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, hir_id: HirId) { + let impl_def_id = tcx.hir().local_def_id_from_hir_id(hir_id); let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap(); let trait_def_id = trait_ref.def_id; @@ -181,24 +179,36 @@ fn check_impl_overlap<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, node_id: ast::NodeI // This is something like impl Trait1 for Trait2. Illegal // if Trait1 is a supertrait of Trait2 or Trait2 is not object safe. - if !tcx.is_object_safe(data.principal().def_id()) { - // This is an error, but it will be reported by wfcheck. Ignore it here. - // This is tested by `coherence-impl-trait-for-trait-object-safe.rs`. - } else { - let mut supertrait_def_ids = - traits::supertrait_def_ids(tcx, data.principal().def_id()); - if supertrait_def_ids.any(|d| d == trait_def_id) { - let sp = tcx.sess.source_map().def_span(tcx.span_of_impl(impl_def_id).unwrap()); - struct_span_err!(tcx.sess, - sp, - E0371, - "the object type `{}` automatically implements the trait `{}`", - trait_ref.self_ty(), - tcx.item_path_str(trait_def_id)) - .span_label(sp, format!("`{}` automatically implements trait `{}`", - trait_ref.self_ty(), - tcx.item_path_str(trait_def_id))) - .emit(); + let component_def_ids = data.iter().flat_map(|predicate| { + match predicate.skip_binder() { + ty::ExistentialPredicate::Trait(tr) => Some(tr.def_id), + ty::ExistentialPredicate::AutoTrait(def_id) => Some(*def_id), + // An associated type projection necessarily comes with + // an additional `Trait` requirement. + ty::ExistentialPredicate::Projection(..) => None, + } + }); + + for component_def_id in component_def_ids { + if !tcx.is_object_safe(component_def_id) { + // This is an error, but it will be reported by wfcheck. Ignore it here. + // This is tested by `coherence-impl-trait-for-trait-object-safe.rs`. + } else { + let mut supertrait_def_ids = + traits::supertrait_def_ids(tcx, component_def_id); + if supertrait_def_ids.any(|d| d == trait_def_id) { + let sp = tcx.sess.source_map().def_span(tcx.span_of_impl(impl_def_id).unwrap()); + struct_span_err!(tcx.sess, + sp, + E0371, + "the object type `{}` automatically implements the trait `{}`", + trait_ref.self_ty(), + tcx.def_path_str(trait_def_id)) + .span_label(sp, format!("`{}` automatically implements trait `{}`", + trait_ref.self_ty(), + tcx.def_path_str(trait_def_id))) + .emit(); + } } } } diff --git a/src/librustc_typeck/coherence/orphan.rs b/src/librustc_typeck/coherence/orphan.rs index 131413eb402d4..7e1c38e051542 100644 --- a/src/librustc_typeck/coherence/orphan.rs +++ b/src/librustc_typeck/coherence/orphan.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Orphan checker: every impl either implements a trait defined in this //! crate or pertains to a type defined in this crate. @@ -27,16 +17,16 @@ struct OrphanChecker<'cx, 'tcx: 'cx> { impl<'cx, 'tcx, 'v> ItemLikeVisitor<'v> for OrphanChecker<'cx, 'tcx> { /// Checks exactly one impl for orphan rules and other such - /// restrictions. In this fn, it can happen that multiple errors + /// restrictions. In this fn, it can happen that multiple errors /// apply to a specific impl, so just return after reporting one /// to prevent inundating the user with a bunch of similar error /// reports. fn visit_item(&mut self, item: &hir::Item) { - let def_id = self.tcx.hir().local_def_id(item.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); // "Trait" impl if let hir::ItemKind::Impl(.., Some(_), _, _) = item.node { debug!("coherence2::orphan check: trait impl {}", - self.tcx.hir().node_to_string(item.id)); + self.tcx.hir().hir_to_string(item.hir_id)); let trait_ref = self.tcx.impl_trait_ref(def_id).unwrap(); let trait_def_id = trait_ref.def_id; let cm = self.tcx.sess.source_map(); @@ -50,7 +40,7 @@ impl<'cx, 'tcx, 'v> ItemLikeVisitor<'v> for OrphanChecker<'cx, 'tcx> { "only traits defined in the current crate can be \ implemented for arbitrary types") .span_label(sp, "impl doesn't use types inside crate") - .note("the impl does not reference any types defined in this crate") + .note("the impl does not reference only types defined in this crate") .note("define and implement a trait or new type instead") .emit(); return; @@ -131,7 +121,7 @@ impl<'cx, 'tcx, 'v> ItemLikeVisitor<'v> for OrphanChecker<'cx, 'tcx> { format!("cross-crate traits with a default impl, like `{}`, \ can only be implemented for a struct/enum type \ defined in the current crate", - self.tcx.item_path_str(trait_def_id)), + self.tcx.def_path_str(trait_def_id)), "can't implement cross-crate trait for type in another crate" )) } @@ -139,7 +129,7 @@ impl<'cx, 'tcx, 'v> ItemLikeVisitor<'v> for OrphanChecker<'cx, 'tcx> { _ => { Some((format!("cross-crate traits with a default impl, like `{}`, can \ only be implemented for a struct/enum type, not `{}`", - self.tcx.item_path_str(trait_def_id), + self.tcx.def_path_str(trait_def_id), self_ty), "can't implement cross-crate trait with a default impl for \ non-struct/enum type")) diff --git a/src/librustc_typeck/coherence/unsafety.rs b/src/librustc_typeck/coherence/unsafety.rs index b4196b572b4b2..0b1de510aa4bd 100644 --- a/src/librustc_typeck/coherence/unsafety.rs +++ b/src/librustc_typeck/coherence/unsafety.rs @@ -1,13 +1,3 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Unsafety checker: every impl either implements a trait defined in this //! crate or pertains to a type defined in this crate. @@ -31,7 +21,8 @@ impl<'cx, 'tcx, 'v> UnsafetyChecker<'cx, 'tcx> { unsafety: hir::Unsafety, polarity: hir::ImplPolarity) { - if let Some(trait_ref) = self.tcx.impl_trait_ref(self.tcx.hir().local_def_id(item.id)) { + let local_did = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); + if let Some(trait_ref) = self.tcx.impl_trait_ref(local_did) { let trait_def = self.tcx.trait_def(trait_ref.def_id); let unsafe_attr = impl_generics.and_then(|generics| { generics.params.iter().find(|p| p.pure_wrt_drop).map(|_| "may_dangle") diff --git a/src/librustc_typeck/collect.rs b/src/librustc_typeck/collect.rs index 3b8b6d47d9130..0cd7fe9159493 100644 --- a/src/librustc_typeck/collect.rs +++ b/src/librustc_typeck/collect.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! "Collection" is the process of determining the type and other external //! details of each item in Rust. Collection is specifically concerned //! with *interprocedural* things -- for example, for a function @@ -24,17 +14,19 @@ //! At present, however, we do run collection across all items in the //! crate as a kind of pass. This should eventually be factored away. -use astconv::{AstConv, Bounds}; -use constrained_type_params as ctp; -use lint; -use middle::lang_items::SizedTraitLangItem; -use middle::resolve_lifetime as rl; -use middle::weak_lang_items; +use crate::astconv::{AstConv, Bounds}; +use crate::constrained_generic_params as ctp; +use crate::check::intrinsic::intrisic_operation_unsafety; +use crate::lint; +use crate::middle::lang_items::SizedTraitLangItem; +use crate::middle::resolve_lifetime as rl; +use crate::middle::weak_lang_items; use rustc::mir::mono::Linkage; use rustc::ty::query::Providers; -use rustc::ty::subst::Substs; +use rustc::ty::subst::{Subst, InternalSubsts}; use rustc::ty::util::Discr; use rustc::ty::util::IntTypeExt; +use rustc::ty::subst::UnpackedKind; use rustc::ty::{self, AdtKind, ToPolyTraitRef, Ty, TyCtxt}; use rustc::ty::{ReprOptions, ToPredicate}; use rustc::util::captures::Captures; @@ -43,8 +35,8 @@ use rustc_data_structures::sync::Lrc; use rustc_target::spec::abi; use syntax::ast; -use syntax::ast::MetaItemKind; -use syntax::attr::{InlineAttr, list_contains_name, mark_used}; +use syntax::ast::{Ident, MetaItemKind}; +use syntax::attr::{InlineAttr, OptimizeAttr, list_contains_name, mark_used}; use syntax::source_map::Spanned; use syntax::feature_gate; use syntax::symbol::{keywords, Symbol}; @@ -64,14 +56,14 @@ struct OnlySelfBounds(bool); /////////////////////////////////////////////////////////////////////////// // Main entry point -pub fn collect_item_types<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - let mut visitor = CollectItemTypesVisitor { tcx }; - tcx.hir() - .krate() - .visit_all_item_likes(&mut visitor.as_deep_visitor()); +fn collect_mod_item_types<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>, module_def_id: DefId) { + tcx.hir().visit_item_likes_in_module( + module_def_id, + &mut CollectItemTypesVisitor { tcx }.as_deep_visitor() + ); } -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { *providers = Providers { type_of, generics_of, @@ -87,6 +79,7 @@ pub fn provide(providers: &mut Providers) { impl_polarity, is_foreign_item, codegen_fn_attrs, + collect_mod_item_types, ..*providers }; } @@ -94,7 +87,7 @@ pub fn provide(providers: &mut Providers) { /////////////////////////////////////////////////////////////////////////// /// Context specific to some particular item. This is what implements -/// AstConv. It has information about the predicates that are defined +/// `AstConv`. It has information about the predicates that are defined /// on the trait. Unfortunately, this predicate information is /// available in various different forms at various points in the /// process. So we can't just store a pointer to e.g., the AST or the @@ -119,7 +112,7 @@ impl<'a, 'tcx> Visitor<'tcx> for CollectItemTypesVisitor<'a, 'tcx> { } fn visit_item(&mut self, item: &'tcx hir::Item) { - convert_item(self.tcx, item.id); + convert_item(self.tcx, item.hir_id); intravisit::walk_item(self, item); } @@ -130,10 +123,14 @@ impl<'a, 'tcx> Visitor<'tcx> for CollectItemTypesVisitor<'a, 'tcx> { hir::GenericParamKind::Type { default: Some(_), .. } => { - let def_id = self.tcx.hir().local_def_id(param.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(param.hir_id); self.tcx.type_of(def_id); } hir::GenericParamKind::Type { .. } => {} + hir::GenericParamKind::Const { .. } => { + let def_id = self.tcx.hir().local_def_id_from_hir_id(param.hir_id); + self.tcx.type_of(def_id); + } } } intravisit::walk_generics(self, generics); @@ -141,7 +138,7 @@ impl<'a, 'tcx> Visitor<'tcx> for CollectItemTypesVisitor<'a, 'tcx> { fn visit_expr(&mut self, expr: &'tcx hir::Expr) { if let hir::ExprKind::Closure(..) = expr.node { - let def_id = self.tcx.hir().local_def_id(expr.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(expr.hir_id); self.tcx.generics_of(def_id); self.tcx.type_of(def_id); } @@ -149,12 +146,12 @@ impl<'a, 'tcx> Visitor<'tcx> for CollectItemTypesVisitor<'a, 'tcx> { } fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem) { - convert_trait_item(self.tcx, trait_item.id); + convert_trait_item(self.tcx, trait_item.hir_id); intravisit::walk_trait_item(self, trait_item); } fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem) { - convert_impl_item(self.tcx, impl_item.id); + convert_impl_item(self.tcx, impl_item.hir_id); intravisit::walk_impl_item(self, impl_item); } } @@ -252,9 +249,9 @@ fn type_param_predicates<'a, 'tcx>( // written inline like `` or in a where clause like // `where T : Foo`. - let param_id = tcx.hir().as_local_node_id(def_id).unwrap(); + let param_id = tcx.hir().as_local_hir_id(def_id).unwrap(); let param_owner = tcx.hir().ty_param_owner(param_id); - let param_owner_def_id = tcx.hir().local_def_id(param_owner); + let param_owner_def_id = tcx.hir().local_def_id_from_hir_id(param_owner); let generics = tcx.generics_of(param_owner_def_id); let index = generics.param_def_id_to_index[&def_id]; let ty = tcx.mk_ty_param(index, tcx.hir().ty_param_name(param_id).as_interned_str()); @@ -277,8 +274,8 @@ fn type_param_predicates<'a, 'tcx>( }, ); - let item_node_id = tcx.hir().as_local_node_id(item_def_id).unwrap(); - let ast_generics = match tcx.hir().get(item_node_id) { + let item_hir_id = tcx.hir().as_local_hir_id(item_def_id).unwrap(); + let ast_generics = match tcx.hir().get_by_hir_id(item_hir_id) { Node::TraitItem(item) => &item.generics, Node::ImplItem(item) => &item.generics, @@ -298,7 +295,7 @@ fn type_param_predicates<'a, 'tcx>( | ItemKind::Union(_, ref generics) => generics, ItemKind::Trait(_, _, ref generics, ..) => { // Implied `Self: Trait` and supertrait bounds. - if param_id == item_node_id { + if param_id == item_hir_id { let identity_trait_ref = ty::TraitRef::identity(tcx, item_def_id); Lrc::make_mut(&mut result) .predicates @@ -327,14 +324,14 @@ fn type_param_predicates<'a, 'tcx>( } impl<'a, 'tcx> ItemCtxt<'a, 'tcx> { - /// Find bounds from `hir::Generics`. This requires scanning through the + /// Finds bounds from `hir::Generics`. This requires scanning through the /// AST. We do this to avoid having to convert *all* the bounds, which /// would create artificial cycles. Instead we can only convert the /// bounds for a type parameter `X` if `X::Foo` is used. fn type_parameter_bounds_in_generics( &self, ast_generics: &hir::Generics, - param_id: ast::NodeId, + param_id: hir::HirId, ty: Ty<'tcx>, only_self_bounds: OnlySelfBounds, ) -> Vec<(ty::Predicate<'tcx>, Span)> { @@ -342,7 +339,7 @@ impl<'a, 'tcx> ItemCtxt<'a, 'tcx> { .params .iter() .filter_map(|param| match param.kind { - GenericParamKind::Type { .. } if param.id == param_id => Some(¶m.bounds), + GenericParamKind::Type { .. } if param.hir_id == param_id => Some(¶m.bounds), _ => None, }) .flat_map(|bounds| bounds.iter()) @@ -373,18 +370,18 @@ impl<'a, 'tcx> ItemCtxt<'a, 'tcx> { } /// Tests whether this is the AST for a reference to the type -/// parameter with id `param_id`. We use this so as to avoid running +/// parameter with ID `param_id`. We use this so as to avoid running /// `ast_ty_to_ty`, because we want to avoid triggering an all-out /// conversion of the type to avoid inducing unnecessary cycles. fn is_param<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, ast_ty: &hir::Ty, - param_id: ast::NodeId, + param_id: hir::HirId, ) -> bool { if let hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) = ast_ty.node { match path.def { Def::SelfTy(Some(def_id), None) | Def::TyParam(def_id) => { - def_id == tcx.hir().local_def_id(param_id) + def_id == tcx.hir().local_def_id_from_hir_id(param_id) } _ => false, } @@ -393,10 +390,10 @@ fn is_param<'a, 'tcx>( } } -fn convert_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item_id: ast::NodeId) { - let it = tcx.hir().expect_item(item_id); - debug!("convert: item {} with id {}", it.name, it.id); - let def_id = tcx.hir().local_def_id(item_id); +fn convert_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item_id: hir::HirId) { + let it = tcx.hir().expect_item_by_hir_id(item_id); + debug!("convert: item {} with id {}", it.ident, it.hir_id); + let def_id = tcx.hir().local_def_id_from_hir_id(item_id); match it.node { // These don't define types. hir::ItemKind::ExternCrate(_) @@ -405,7 +402,7 @@ fn convert_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item_id: ast::NodeId) { | hir::ItemKind::GlobalAsm(_) => {} hir::ItemKind::ForeignMod(ref foreign_mod) => { for item in &foreign_mod.items { - let def_id = tcx.hir().local_def_id(item.id); + let def_id = tcx.hir().local_def_id_from_hir_id(item.hir_id); tcx.generics_of(def_id); tcx.type_of(def_id); tcx.predicates_of(def_id); @@ -443,14 +440,14 @@ fn convert_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item_id: ast::NodeId) { tcx.predicates_of(def_id); for f in struct_def.fields() { - let def_id = tcx.hir().local_def_id(f.id); + let def_id = tcx.hir().local_def_id_from_hir_id(f.hir_id); tcx.generics_of(def_id); tcx.type_of(def_id); tcx.predicates_of(def_id); } - if !struct_def.is_struct() { - convert_variant_ctor(tcx, struct_def.id()); + if let Some(ctor_hir_id) = struct_def.ctor_hir_id() { + convert_variant_ctor(tcx, ctor_hir_id); } } @@ -475,9 +472,9 @@ fn convert_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item_id: ast::NodeId) { } } -fn convert_trait_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_item_id: ast::NodeId) { +fn convert_trait_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_item_id: hir::HirId) { let trait_item = tcx.hir().expect_trait_item(trait_item_id); - let def_id = tcx.hir().local_def_id(trait_item.id); + let def_id = tcx.hir().local_def_id_from_hir_id(trait_item.hir_id); tcx.generics_of(def_id); match trait_item.node { @@ -496,8 +493,8 @@ fn convert_trait_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, trait_item_id: ast: tcx.predicates_of(def_id); } -fn convert_impl_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_item_id: ast::NodeId) { - let def_id = tcx.hir().local_def_id(impl_item_id); +fn convert_impl_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_item_id: hir::HirId) { + let def_id = tcx.hir().local_def_id_from_hir_id(impl_item_id); tcx.generics_of(def_id); tcx.type_of(def_id); tcx.predicates_of(def_id); @@ -506,8 +503,8 @@ fn convert_impl_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, impl_item_id: ast::N } } -fn convert_variant_ctor<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ctor_id: ast::NodeId) { - let def_id = tcx.hir().local_def_id(ctor_id); +fn convert_variant_ctor<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ctor_id: hir::HirId) { + let def_id = tcx.hir().local_def_id_from_hir_id(ctor_id); tcx.generics_of(def_id); tcx.type_of(def_id); tcx.predicates_of(def_id); @@ -528,7 +525,7 @@ fn convert_enum_variant_types<'a, 'tcx>( let wrapped_discr = prev_discr.map_or(initial, |d| d.wrap_incr(tcx)); prev_discr = Some( if let Some(ref e) = variant.node.disr_expr { - let expr_did = tcx.hir().local_def_id(e.id); + let expr_did = tcx.hir().local_def_id_from_hir_id(e.hir_id); def.eval_explicit_discr(tcx, expr_did) } else if let Some(discr) = repr_type.disr_incr(tcx, prev_discr) { Some(discr) @@ -543,7 +540,7 @@ fn convert_enum_variant_types<'a, 'tcx>( format!("overflowed on value after {}", prev_discr.unwrap()), ).note(&format!( "explicitly set `{} = {}` if that is desired outcome", - variant.node.name, wrapped_discr + variant.node.ident, wrapped_discr )) .emit(); None @@ -551,7 +548,7 @@ fn convert_enum_variant_types<'a, 'tcx>( ); for f in variant.node.data.fields() { - let def_id = tcx.hir().local_def_id(f.id); + let def_id = tcx.hir().local_def_id_from_hir_id(f.hir_id); tcx.generics_of(def_id); tcx.type_of(def_id); tcx.predicates_of(def_id); @@ -559,26 +556,29 @@ fn convert_enum_variant_types<'a, 'tcx>( // Convert the ctor, if any. This also registers the variant as // an item. - convert_variant_ctor(tcx, variant.node.data.id()); + if let Some(ctor_hir_id) = variant.node.data.ctor_hir_id() { + convert_variant_ctor(tcx, ctor_hir_id); + } } } fn convert_variant<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, - did: DefId, - name: ast::Name, + variant_did: Option, + ctor_did: Option, + ident: Ident, discr: ty::VariantDiscr, def: &hir::VariantData, adt_kind: ty::AdtKind, - attribute_def_id: DefId + parent_did: DefId ) -> ty::VariantDef { let mut seen_fields: FxHashMap = Default::default(); - let node_id = tcx.hir().as_local_node_id(did).unwrap(); + let hir_id = tcx.hir().as_local_hir_id(variant_did.unwrap_or(parent_did)).unwrap(); let fields = def .fields() .iter() .map(|f| { - let fid = tcx.hir().local_def_id(f.id); + let fid = tcx.hir().local_def_id_from_hir_id(f.hir_id); let dup_span = seen_fields.get(&f.ident.modern()).cloned(); if let Some(prev_span) = dup_span { struct_span_err!( @@ -597,25 +597,33 @@ fn convert_variant<'a, 'tcx>( ty::FieldDef { did: fid, ident: f.ident, - vis: ty::Visibility::from_hir(&f.vis, node_id, tcx), + vis: ty::Visibility::from_hir(&f.vis, hir_id, tcx), } }) .collect(); - ty::VariantDef::new(tcx, - did, - name, + let recovered = match def { + hir::VariantData::Struct(_, r) => *r, + _ => false, + }; + ty::VariantDef::new( + tcx, + ident, + variant_did, + ctor_did, discr, fields, - adt_kind, CtorKind::from_hir(def), - attribute_def_id) + adt_kind, + parent_did, + recovered, + ) } fn adt_def<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty::AdtDef { use rustc::hir::*; - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); - let item = match tcx.hir().get(node_id) { + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); + let item = match tcx.hir().get_by_hir_id(hir_id) { Node::Item(item) => item, _ => bug!(), }; @@ -624,64 +632,58 @@ fn adt_def<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty::Ad let (kind, variants) = match item.node { ItemKind::Enum(ref def, _) => { let mut distance_from_explicit = 0; - ( - AdtKind::Enum, - def.variants - .iter() - .map(|v| { - let did = tcx.hir().local_def_id(v.node.data.id()); - let discr = if let Some(ref e) = v.node.disr_expr { - distance_from_explicit = 0; - ty::VariantDiscr::Explicit(tcx.hir().local_def_id(e.id)) - } else { - ty::VariantDiscr::Relative(distance_from_explicit) - }; - distance_from_explicit += 1; + let variants = def.variants + .iter() + .map(|v| { + let variant_did = Some(tcx.hir().local_def_id_from_hir_id(v.node.id)); + let ctor_did = v.node.data.ctor_hir_id() + .map(|hir_id| tcx.hir().local_def_id_from_hir_id(hir_id)); + + let discr = if let Some(ref e) = v.node.disr_expr { + distance_from_explicit = 0; + ty::VariantDiscr::Explicit(tcx.hir().local_def_id_from_hir_id(e.hir_id)) + } else { + ty::VariantDiscr::Relative(distance_from_explicit) + }; + distance_from_explicit += 1; + + convert_variant(tcx, variant_did, ctor_did, v.node.ident, discr, + &v.node.data, AdtKind::Enum, def_id) + }) + .collect(); - convert_variant(tcx, did, v.node.name, discr, &v.node.data, AdtKind::Enum, - did) - }) - .collect(), - ) + (AdtKind::Enum, variants) } ItemKind::Struct(ref def, _) => { - // Use separate constructor id for unit/tuple structs and reuse did for braced structs. - let ctor_id = if !def.is_struct() { - Some(tcx.hir().local_def_id(def.id())) - } else { - None - }; - ( - AdtKind::Struct, - std::iter::once(convert_variant( - tcx, - ctor_id.unwrap_or(def_id), - item.name, - ty::VariantDiscr::Relative(0), - def, - AdtKind::Struct, - def_id - )).collect(), - ) - } - ItemKind::Union(ref def, _) => ( - AdtKind::Union, - std::iter::once(convert_variant( - tcx, - def_id, - item.name, - ty::VariantDiscr::Relative(0), - def, - AdtKind::Union, - def_id - )).collect(), - ), + let variant_did = None; + let ctor_did = def.ctor_hir_id() + .map(|hir_id| tcx.hir().local_def_id_from_hir_id(hir_id)); + + let variants = std::iter::once(convert_variant( + tcx, variant_did, ctor_did, item.ident, ty::VariantDiscr::Relative(0), def, + AdtKind::Struct, def_id, + )).collect(); + + (AdtKind::Struct, variants) + } + ItemKind::Union(ref def, _) => { + let variant_did = None; + let ctor_did = def.ctor_hir_id() + .map(|hir_id| tcx.hir().local_def_id_from_hir_id(hir_id)); + + let variants = std::iter::once(convert_variant( + tcx, variant_did, ctor_did, item.ident, ty::VariantDiscr::Relative(0), def, + AdtKind::Union, def_id, + )).collect(); + + (AdtKind::Union, variants) + }, _ => bug!(), }; tcx.alloc_adt_def(def_id, kind, variants, repr) } -/// Ensures that the super-predicates of the trait with def-id +/// Ensures that the super-predicates of the trait with `DefId` /// trait_def_id are converted and stored. This also ensures that /// the transitive super-predicates are converted; fn super_predicates_of<'a, 'tcx>( @@ -689,11 +691,11 @@ fn super_predicates_of<'a, 'tcx>( trait_def_id: DefId, ) -> Lrc> { debug!("super_predicates(trait_def_id={:?})", trait_def_id); - let trait_node_id = tcx.hir().as_local_node_id(trait_def_id).unwrap(); + let trait_hir_id = tcx.hir().as_local_hir_id(trait_def_id).unwrap(); - let item = match tcx.hir().get(trait_node_id) { + let item = match tcx.hir().get_by_hir_id(trait_hir_id) { Node::Item(item) => item, - _ => bug!("trait_node_id {} is not an item", trait_node_id), + _ => bug!("trait_node_id {} is not an item", trait_hir_id), }; let (generics, bounds) = match item.node { @@ -715,9 +717,9 @@ fn super_predicates_of<'a, 'tcx>( // In the case of trait aliases, however, we include all bounds in the where clause, // so e.g., `trait Foo = where u32: PartialEq` would include `u32: PartialEq` // as one of its "superpredicates". - let is_trait_alias = ty::is_trait_alias(tcx, trait_def_id); + let is_trait_alias = tcx.is_trait_alias(trait_def_id); let superbounds2 = icx.type_parameter_bounds_in_generics( - generics, item.id, self_param_ty, OnlySelfBounds(!is_trait_alias)); + generics, item.hir_id, self_param_ty, OnlySelfBounds(!is_trait_alias)); // Combine the two lists to form the complete set of superbounds: let superbounds: Vec<_> = superbounds1.into_iter().chain(superbounds2).collect(); @@ -738,8 +740,8 @@ fn super_predicates_of<'a, 'tcx>( } fn trait_def<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty::TraitDef { - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); - let item = tcx.hir().expect_item(node_id); + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); + let item = tcx.hir().expect_item_by_hir_id(hir_id); let (is_auto, unsafety) = match item.node { hir::ItemKind::Trait(is_auto, unsafety, ..) => (is_auto == hir::IsAuto::Yes, unsafety), @@ -815,8 +817,7 @@ fn has_late_bound_regions<'a, 'tcx>( return; } - let hir_id = self.tcx.hir().node_to_hir_id(lt.id); - match self.tcx.named_region(hir_id) { + match self.tcx.named_region(lt.hir_id) { Some(rl::Region::Static) | Some(rl::Region::EarlyBound(..)) => {} Some(rl::Region::LateBound(debruijn, _, _)) | Some(rl::Region::LateBoundAnon(debruijn, _)) if debruijn < self.outer_index => {} @@ -842,8 +843,7 @@ fn has_late_bound_regions<'a, 'tcx>( }; for param in &generics.params { if let GenericParamKind::Lifetime { .. } = param.kind { - let hir_id = tcx.hir().node_to_hir_id(param.id); - if tcx.is_late_bound(hir_id) { + if tcx.is_late_bound(param.hir_id) { return Some(param.span); } } @@ -884,14 +884,14 @@ fn has_late_bound_regions<'a, 'tcx>( fn generics_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty::Generics { use rustc::hir::*; - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); - let node = tcx.hir().get(node_id); + let node = tcx.hir().get_by_hir_id(hir_id); let parent_def_id = match node { - Node::ImplItem(_) | Node::TraitItem(_) | Node::Variant(_) - | Node::StructCtor(_) | Node::Field(_) => { - let parent_id = tcx.hir().get_parent(node_id); - Some(tcx.hir().local_def_id(parent_id)) + Node::ImplItem(_) | Node::TraitItem(_) | Node::Variant(_) | + Node::Ctor(..) | Node::Field(_) => { + let parent_id = tcx.hir().get_parent_item(hir_id); + Some(tcx.hir().local_def_id_from_hir_id(parent_id)) } Node::Expr(&hir::Expr { node: hir::ExprKind::Closure(..), @@ -934,12 +934,12 @@ fn generics_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty // // Something of a hack: use the node id for the trait, also as // the node id for the Self type parameter. - let param_id = item.id; + let param_id = item.hir_id; opt_self = Some(ty::GenericParamDef { index: 0, name: keywords::SelfUpper.name().as_interned_str(), - def_id: tcx.hir().local_def_id(param_id), + def_id: tcx.hir().local_def_id_from_hir_id(param_id), pure_wrt_drop: false, kind: ty::GenericParamDefKind::Type { has_default: false, @@ -985,13 +985,12 @@ fn generics_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty .map(|(i, param)| ty::GenericParamDef { name: param.name.ident().as_interned_str(), index: own_start + i as u32, - def_id: tcx.hir().local_def_id(param.id), + def_id: tcx.hir().local_def_id_from_hir_id(param.hir_id), pure_wrt_drop: param.pure_wrt_drop, kind: ty::GenericParamDefKind::Lifetime, }), ); - let hir_id = tcx.hir().node_to_hir_id(node_id); let object_lifetime_defaults = tcx.object_lifetime_defaults(hir_id); // Now create the real type parameters. @@ -1001,51 +1000,65 @@ fn generics_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty ast_generics .params .iter() - .filter_map(|param| match param.kind { - GenericParamKind::Type { - ref default, - synthetic, - .. - } => { - if param.name.ident().name == keywords::SelfUpper.name() { - span_bug!( - param.span, - "`Self` should not be the name of a regular parameter" - ); - } - - if !allow_defaults && default.is_some() { - if !tcx.features().default_type_parameter_fallback { - tcx.lint_node( - lint::builtin::INVALID_TYPE_PARAM_DEFAULT, - param.id, + .filter_map(|param| { + let kind = match param.kind { + GenericParamKind::Type { + ref default, + synthetic, + .. + } => { + if param.name.ident().name == keywords::SelfUpper.name() { + span_bug!( param.span, - &format!( - "defaults for type parameters are only allowed in \ - `struct`, `enum`, `type`, or `trait` definitions." - ), + "`Self` should not be the name of a regular parameter" ); } - } - let ty_param = ty::GenericParamDef { - index: type_start + i as u32, - name: param.name.ident().as_interned_str(), - def_id: tcx.hir().local_def_id(param.id), - pure_wrt_drop: param.pure_wrt_drop, - kind: ty::GenericParamDefKind::Type { + if !allow_defaults && default.is_some() { + if !tcx.features().default_type_parameter_fallback { + tcx.lint_hir( + lint::builtin::INVALID_TYPE_PARAM_DEFAULT, + param.hir_id, + param.span, + &format!( + "defaults for type parameters are only allowed in \ + `struct`, `enum`, `type`, or `trait` definitions." + ), + ); + } + } + + ty::GenericParamDefKind::Type { has_default: default.is_some(), object_lifetime_default: object_lifetime_defaults .as_ref() .map_or(rl::Set1::Empty, |o| o[i]), synthetic, - }, - }; - i += 1; - Some(ty_param) - } - _ => None, - }), + } + } + GenericParamKind::Const { .. } => { + if param.name.ident().name == keywords::SelfUpper.name() { + span_bug!( + param.span, + "`Self` should not be the name of a regular parameter", + ); + } + + ty::GenericParamDefKind::Const + } + _ => return None, + }; + + let param_def = ty::GenericParamDef { + index: type_start + i as u32, + name: param.name.ident().as_interned_str(), + def_id: tcx.hir().local_def_id_from_hir_id(param.hir_id), + pure_wrt_drop: param.pure_wrt_drop, + kind, + }; + i += 1; + Some(param_def) + }) ); // provide junk type parameter defs - the only place that @@ -1079,7 +1092,7 @@ fn generics_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> &'tcx ty }), ); - tcx.with_freevars(node_id, |fv| { + tcx.with_freevars(hir_id, |fv| { params.extend(fv.iter().zip((dummy_args.len() as u32)..).map(|(_, i)| { ty::GenericParamDef { index: type_start + i, @@ -1116,38 +1129,61 @@ fn report_assoc_ty_on_inherent_impl<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, span: tcx.sess, span, E0202, - "associated types are not allowed in inherent impls" + "associated types are not yet supported in inherent impls (see #8995)" ); } fn type_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Ty<'tcx> { + checked_type_of(tcx, def_id, true).unwrap() +} + +/// Same as [`type_of`] but returns [`Option`] instead of failing. +/// +/// If you want to fail anyway, you can set the `fail` parameter to true, but in this case, +/// you'd better just call [`type_of`] directly. +pub fn checked_type_of<'a, 'tcx>( + tcx: TyCtxt<'a, 'tcx, 'tcx>, + def_id: DefId, + fail: bool, +) -> Option> { use rustc::hir::*; - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); + let hir_id = match tcx.hir().as_local_hir_id(def_id) { + Some(hir_id) => hir_id, + None => { + if !fail { + return None; + } + bug!("invalid node"); + } + }; let icx = ItemCtxt::new(tcx, def_id); - match tcx.hir().get(node_id) { + Some(match tcx.hir().get_by_hir_id(hir_id) { Node::TraitItem(item) => match item.node { TraitItemKind::Method(..) => { - let substs = Substs::identity_for_item(tcx, def_id); + let substs = InternalSubsts::identity_for_item(tcx, def_id); tcx.mk_fn_def(def_id, substs) } TraitItemKind::Const(ref ty, _) | TraitItemKind::Type(_, Some(ref ty)) => icx.to_ty(ty), TraitItemKind::Type(_, None) => { + if !fail { + return None; + } span_bug!(item.span, "associated type missing default"); } }, Node::ImplItem(item) => match item.node { ImplItemKind::Method(..) => { - let substs = Substs::identity_for_item(tcx, def_id); + let substs = InternalSubsts::identity_for_item(tcx, def_id); tcx.mk_fn_def(def_id, substs) } ImplItemKind::Const(ref ty, _) => icx.to_ty(ty), ImplItemKind::Existential(_) => { if tcx - .impl_trait_ref(tcx.hir().get_parent_did(node_id)) + .impl_trait_ref(tcx.hir().get_parent_did_by_hir_id(hir_id)) .is_none() { report_assoc_ty_on_inherent_impl(tcx, item.span); @@ -1157,7 +1193,7 @@ fn type_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Ty<'tcx> { } ImplItemKind::Type(ref ty) => { if tcx - .impl_trait_ref(tcx.hir().get_parent_did(node_id)) + .impl_trait_ref(tcx.hir().get_parent_did_by_hir_id(hir_id)) .is_none() { report_assoc_ty_on_inherent_impl(tcx, item.span); @@ -1174,12 +1210,12 @@ fn type_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Ty<'tcx> { | ItemKind::Ty(ref t, _) | ItemKind::Impl(.., ref t, _) => icx.to_ty(t), ItemKind::Fn(..) => { - let substs = Substs::identity_for_item(tcx, def_id); + let substs = InternalSubsts::identity_for_item(tcx, def_id); tcx.mk_fn_def(def_id, substs) } ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) => { let def = tcx.adt_def(def_id); - let substs = Substs::identity_for_item(tcx, def_id); + let substs = InternalSubsts::identity_for_item(tcx, def_id); tcx.mk_adt(def, substs) } ItemKind::Existential(hir::ExistTy { @@ -1194,7 +1230,7 @@ fn type_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Ty<'tcx> { tcx.typeck_tables_of(owner) .concrete_existential_types .get(&def_id) - .cloned() + .map(|opaque| opaque.concrete_type) .unwrap_or_else(|| { // This can occur if some error in the // owner fn prevented us from populating @@ -1216,6 +1252,9 @@ fn type_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Ty<'tcx> { | ItemKind::GlobalAsm(..) | ItemKind::ExternCrate(..) | ItemKind::Use(..) => { + if !fail { + return None; + } span_bug!( item.span, "compute_type_of_item: unexpected item type: {:?}", @@ -1227,23 +1266,22 @@ fn type_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Ty<'tcx> { Node::ForeignItem(foreign_item) => match foreign_item.node { ForeignItemKind::Fn(..) => { - let substs = Substs::identity_for_item(tcx, def_id); + let substs = InternalSubsts::identity_for_item(tcx, def_id); tcx.mk_fn_def(def_id, substs) } ForeignItemKind::Static(ref t, _) => icx.to_ty(t), ForeignItemKind::Type => tcx.mk_foreign(def_id), }, - Node::StructCtor(&ref def) - | Node::Variant(&Spanned { + Node::Ctor(&ref def) | Node::Variant(&Spanned { node: hir::VariantKind { data: ref def, .. }, .. }) => match *def { VariantData::Unit(..) | VariantData::Struct(..) => { - tcx.type_of(tcx.hir().get_parent_did(node_id)) + tcx.type_of(tcx.hir().get_parent_did_by_hir_id(hir_id)) } VariantData::Tuple(..) => { - let substs = Substs::identity_for_item(tcx, def_id); + let substs = InternalSubsts::identity_for_item(tcx, def_id); tcx.mk_fn_def(def_id, substs) } }, @@ -1255,66 +1293,158 @@ fn type_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> Ty<'tcx> { .. }) => { if gen.is_some() { - let hir_id = tcx.hir().node_to_hir_id(node_id); - return tcx.typeck_tables_of(def_id).node_id_to_type(hir_id); + return Some(tcx.typeck_tables_of(def_id).node_type(hir_id)); } let substs = ty::ClosureSubsts { - substs: Substs::identity_for_item(tcx, def_id), + substs: InternalSubsts::identity_for_item(tcx, def_id), }; tcx.mk_closure(def_id, substs) } - Node::AnonConst(_) => match tcx.hir().get(tcx.hir().get_parent_node(node_id)) { - Node::Ty(&hir::Ty { - node: hir::TyKind::Array(_, ref constant), - .. - }) - | Node::Ty(&hir::Ty { - node: hir::TyKind::Typeof(ref constant), - .. - }) - | Node::Expr(&hir::Expr { - node: ExprKind::Repeat(_, ref constant), - .. - }) if constant.id == node_id => - { - tcx.types.usize - } - - Node::Variant(&Spanned { - node: - VariantKind { - disr_expr: Some(ref e), - .. - }, - .. - }) if e.id == node_id => - { - tcx.adt_def(tcx.hir().get_parent_did(node_id)) - .repr - .discr_type() - .to_ty(tcx) - } + Node::AnonConst(_) => { + let parent_node = tcx.hir().get_by_hir_id(tcx.hir().get_parent_node_by_hir_id(hir_id)); + match parent_node { + Node::Ty(&hir::Ty { + node: hir::TyKind::Array(_, ref constant), + .. + }) + | Node::Ty(&hir::Ty { + node: hir::TyKind::Typeof(ref constant), + .. + }) + | Node::Expr(&hir::Expr { + node: ExprKind::Repeat(_, ref constant), + .. + }) if constant.hir_id == hir_id => + { + tcx.types.usize + } - x => { - bug!("unexpected const parent in type_of_def_id(): {:?}", x); + Node::Variant(&Spanned { + node: + VariantKind { + disr_expr: Some(ref e), + .. + }, + .. + }) if e.hir_id == hir_id => + { + tcx.adt_def(tcx.hir().get_parent_did_by_hir_id(hir_id)) + .repr + .discr_type() + .to_ty(tcx) + } + + Node::Ty(&hir::Ty { node: hir::TyKind::Path(_), .. }) | + Node::Expr(&hir::Expr { node: ExprKind::Struct(..), .. }) | + Node::Expr(&hir::Expr { node: ExprKind::Path(_), .. }) => { + let path = match parent_node { + Node::Ty(&hir::Ty { node: hir::TyKind::Path(ref path), .. }) | + Node::Expr(&hir::Expr { node: ExprKind::Path(ref path), .. }) => { + path + } + Node::Expr(&hir::Expr { node: ExprKind::Struct(ref path, ..), .. }) => { + &*path + } + _ => unreachable!(), + }; + + match path { + QPath::Resolved(_, ref path) => { + let mut arg_index = 0; + let mut found_const = false; + for seg in &path.segments { + if let Some(generic_args) = &seg.args { + let args = &generic_args.args; + for arg in args { + if let GenericArg::Const(ct) = arg { + if ct.value.hir_id == hir_id { + found_const = true; + break; + } + arg_index += 1; + } + } + } + } + // Sanity check to make sure everything is as expected. + if !found_const { + if !fail { + return None; + } + bug!("no arg matching AnonConst in path") + } + match path.def { + // We've encountered an `AnonConst` in some path, so we need to + // figure out which generic parameter it corresponds to and return + // the relevant type. + Def::Struct(def_id) + | Def::Union(def_id) + | Def::Enum(def_id) + | Def::Fn(def_id) => { + let generics = tcx.generics_of(def_id); + let mut param_index = 0; + for param in &generics.params { + if let ty::GenericParamDefKind::Const = param.kind { + if param_index == arg_index { + return Some(tcx.type_of(param.def_id)); + } + param_index += 1; + } + } + // This is no generic parameter associated with the arg. This is + // probably from an extra arg where one is not needed. + return Some(tcx.types.err); + } + Def::Err => tcx.types.err, + x => { + if !fail { + return None; + } + bug!("unexpected const parent path def {:?}", x); + } + } + } + x => { + if !fail { + return None; + } + bug!("unexpected const parent path {:?}", x); + } + } + } + + x => { + if !fail { + return None; + } + bug!("unexpected const parent in type_of_def_id(): {:?}", x); + } } - }, + } - Node::GenericParam(param) => match param.kind { - hir::GenericParamKind::Type { - default: Some(ref ty), - .. - } => icx.to_ty(ty), - _ => bug!("unexpected non-type NodeGenericParam"), + Node::GenericParam(param) => match ¶m.kind { + hir::GenericParamKind::Type { default: Some(ref ty), .. } | + hir::GenericParamKind::Const { ref ty, .. } => { + icx.to_ty(ty) + } + x => { + if !fail { + return None; + } + bug!("unexpected non-type Node::GenericParam: {:?}", x) + }, }, x => { + if !fail { + return None; + } bug!("unexpected sort of node in type_of_def_id(): {:?}", x); } - } + }) } fn find_existential_constraints<'a, 'tcx>( @@ -1326,7 +1456,13 @@ fn find_existential_constraints<'a, 'tcx>( struct ConstraintLocator<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, - found: Option<(Span, ty::Ty<'tcx>)>, + // First found type span, actual type, mapping from the existential type's generic + // parameters to the concrete type's generic parameters + // + // The mapping is an index for each use site of a generic parameter in the concrete type + // + // The indices index into the generic parameters on the existential type. + found: Option<(Span, ty::Ty<'tcx>, Vec)>, } impl<'a, 'tcx> ConstraintLocator<'a, 'tcx> { @@ -1341,23 +1477,106 @@ fn find_existential_constraints<'a, 'tcx>( .tcx .typeck_tables_of(def_id) .concrete_existential_types - .get(&self.def_id) - .cloned(); - if let Some(ty) = ty { + .get(&self.def_id); + if let Some(ty::ResolvedOpaqueTy { concrete_type, substs }) = ty { // FIXME(oli-obk): trace the actual span from inference to improve errors let span = self.tcx.def_span(def_id); - if let Some((prev_span, prev_ty)) = self.found { - if ty != prev_ty { + // used to quickly look up the position of a generic parameter + let mut index_map: FxHashMap = FxHashMap::default(); + // skip binder is ok, since we only use this to find generic parameters and their + // positions. + for (idx, subst) in substs.iter().enumerate() { + if let UnpackedKind::Type(ty) = subst.unpack() { + if let ty::Param(p) = ty.sty { + if index_map.insert(p, idx).is_some() { + // there was already an entry for `p`, meaning a generic parameter + // was used twice + self.tcx.sess.span_err( + span, + &format!("defining existential type use restricts existential \ + type by using the generic parameter `{}` twice", p.name), + ); + return; + } + } else { + self.tcx.sess.delay_span_bug( + span, + &format!( + "non-defining exist ty use in defining scope: {:?}, {:?}", + concrete_type, substs, + ), + ); + } + } + } + // compute the index within the existential type for each generic parameter used in + // the concrete type + let indices = concrete_type + .subst(self.tcx, substs) + .walk() + .filter_map(|t| match &t.sty { + ty::Param(p) => Some(*index_map.get(p).unwrap()), + _ => None, + }).collect(); + let is_param = |ty: ty::Ty<'_>| match ty.sty { + ty::Param(_) => true, + _ => false, + }; + if !substs.types().all(is_param) { + self.tcx.sess.span_err( + span, + "defining existential type use does not fully define existential type", + ); + } else if let Some((prev_span, prev_ty, ref prev_indices)) = self.found { + let mut ty = concrete_type.walk().fuse(); + let mut p_ty = prev_ty.walk().fuse(); + let iter_eq = (&mut ty).zip(&mut p_ty).all(|(t, p)| match (&t.sty, &p.sty) { + // type parameters are equal to any other type parameter for the purpose of + // concrete type equality, as it is possible to obtain the same type just + // by passing matching parameters to a function. + (ty::Param(_), ty::Param(_)) => true, + _ => t == p, + }); + if !iter_eq || ty.next().is_some() || p_ty.next().is_some() { // found different concrete types for the existential type let mut err = self.tcx.sess.struct_span_err( span, - "defining existential type use differs from previous", + "concrete type differs from previous defining existential type use", + ); + err.span_label( + span, + format!("expected `{}`, got `{}`", prev_ty, concrete_type), + ); + err.span_note(prev_span, "previous use here"); + err.emit(); + } else if indices != *prev_indices { + // found "same" concrete types, but the generic parameter order differs + let mut err = self.tcx.sess.struct_span_err( + span, + "concrete type's generic parameters differ from previous defining use", ); + use std::fmt::Write; + let mut s = String::new(); + write!(s, "expected [").unwrap(); + let list = |s: &mut String, indices: &Vec| { + let mut indices = indices.iter().cloned(); + if let Some(first) = indices.next() { + write!(s, "`{}`", substs[first]).unwrap(); + for i in indices { + write!(s, ", `{}`", substs[i]).unwrap(); + } + } + }; + list(&mut s, prev_indices); + write!(s, "], got [").unwrap(); + list(&mut s, &indices); + write!(s, "]").unwrap(); + err.span_label(span, s); err.span_note(prev_span, "previous use here"); err.emit(); } } else { - self.found = Some((span, ty)); + self.found = Some((span, concrete_type, indices)); } } } @@ -1368,7 +1587,7 @@ fn find_existential_constraints<'a, 'tcx>( intravisit::NestedVisitorMap::All(&self.tcx.hir()) } fn visit_item(&mut self, it: &'tcx Item) { - let def_id = self.tcx.hir().local_def_id(it.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(it.hir_id); // the existential type itself or its children are not within its reveal scope if def_id != self.def_id { self.check(def_id); @@ -1376,7 +1595,7 @@ fn find_existential_constraints<'a, 'tcx>( } } fn visit_impl_item(&mut self, it: &'tcx ImplItem) { - let def_id = self.tcx.hir().local_def_id(it.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(it.hir_id); // the existential type itself or its children are not within its reveal scope if def_id != self.def_id { self.check(def_id); @@ -1384,7 +1603,7 @@ fn find_existential_constraints<'a, 'tcx>( } } fn visit_trait_item(&mut self, it: &'tcx TraitItem) { - let def_id = self.tcx.hir().local_def_id(it.id); + let def_id = self.tcx.hir().local_def_id_from_hir_id(it.hir_id); self.check(def_id); intravisit::walk_trait_item(self, it); } @@ -1395,16 +1614,16 @@ fn find_existential_constraints<'a, 'tcx>( tcx, found: None, }; - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); - let parent = tcx.hir().get_parent(node_id); + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); + let parent = tcx.hir().get_parent_item(hir_id); trace!("parent_id: {:?}", parent); - if parent == ast::CRATE_NODE_ID { + if parent == hir::CRATE_HIR_ID { intravisit::walk_crate(&mut locator, tcx.hir().krate()); } else { - trace!("parent: {:?}", tcx.hir().get(parent)); - match tcx.hir().get(parent) { + trace!("parent: {:?}", tcx.hir().get_by_hir_id(parent)); + match tcx.hir().get_by_hir_id(parent) { Node::Item(ref it) => intravisit::walk_item(&mut locator, it), Node::ImplItem(ref it) => intravisit::walk_impl_item(&mut locator, it), Node::TraitItem(ref it) => intravisit::walk_trait_item(&mut locator, it), @@ -1416,7 +1635,7 @@ fn find_existential_constraints<'a, 'tcx>( } match locator.found { - Some((_, ty)) => ty, + Some((_, ty, _)) => ty, None => { let span = tcx.def_span(def_id); tcx.sess.span_err(span, "could not find defining uses"); @@ -1429,11 +1648,11 @@ fn fn_sig<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> ty::PolyFnSig use rustc::hir::*; use rustc::hir::Node::*; - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); let icx = ItemCtxt::new(tcx, def_id); - match tcx.hir().get(node_id) { + match tcx.hir().get_by_hir_id(hir_id) { TraitItem(hir::TraitItem { node: TraitItemKind::Method(sig, _), .. @@ -1452,23 +1671,18 @@ fn fn_sig<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> ty::PolyFnSig node: ForeignItemKind::Fn(ref fn_decl, _, _), .. }) => { - let abi = tcx.hir().get_foreign_abi(node_id); + let abi = tcx.hir().get_foreign_abi_by_hir_id(hir_id); compute_sig_of_foreign_fn_decl(tcx, def_id, fn_decl, abi) } - StructCtor(&VariantData::Tuple(ref fields, _)) - | Variant(&Spanned { - node: - hir::VariantKind { - data: VariantData::Tuple(ref fields, _), - .. - }, + Ctor(data) | Variant(Spanned { + node: hir::VariantKind { data, .. }, .. - }) => { - let ty = tcx.type_of(tcx.hir().get_parent_did(node_id)); - let inputs = fields + }) if data.ctor_hir_id().is_some() => { + let ty = tcx.type_of(tcx.hir().get_parent_did_by_hir_id(hir_id)); + let inputs = data.fields() .iter() - .map(|f| tcx.type_of(tcx.hir().local_def_id(f.id))); + .map(|f| tcx.type_of(tcx.hir().local_def_id_from_hir_id(f.hir_id))); ty::Binder::bind(tcx.mk_fn_sig( inputs, ty, @@ -1512,8 +1726,8 @@ fn impl_trait_ref<'a, 'tcx>( ) -> Option> { let icx = ItemCtxt::new(tcx, def_id); - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); - match tcx.hir().expect_item(node_id).node { + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); + match tcx.hir().expect_item_by_hir_id(hir_id).node { hir::ItemKind::Impl(.., ref opt_trait_ref, _, _) => { opt_trait_ref.as_ref().map(|ast_trait_ref| { let selfty = tcx.type_of(def_id); @@ -1525,8 +1739,8 @@ fn impl_trait_ref<'a, 'tcx>( } fn impl_polarity<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> hir::ImplPolarity { - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); - match tcx.hir().expect_item(node_id).node { + let hir_id = tcx.hir().as_local_hir_id(def_id).unwrap(); + match tcx.hir().expect_item_by_hir_id(hir_id).node { hir::ItemKind::Impl(_, polarity, ..) => polarity, ref item => bug!("impl_polarity: {:?} not an impl", item), } @@ -1584,7 +1798,7 @@ fn is_unsized<'gcx: 'tcx, 'tcx>( } /// Returns the early-bound lifetimes declared in this generics -/// listing. For anything other than fns/methods, this is just all +/// listing. For anything other than fns/methods, this is just all /// the lifetimes that are declared. For fns or methods, we have to /// screen out those that do not appear in any where-clauses etc using /// `resolve_lifetime::early_bound_lifetimes`. @@ -1597,13 +1811,15 @@ fn early_bound_lifetimes_from_generics<'a, 'tcx>( .iter() .filter(move |param| match param.kind { GenericParamKind::Lifetime { .. } => { - let hir_id = tcx.hir().node_to_hir_id(param.id); - !tcx.is_late_bound(hir_id) + !tcx.is_late_bound(param.hir_id) } _ => false, }) } +/// Returns a list of type predicates for the definition with ID `def_id`, including inferred +/// lifetime constraints. This includes all predicates returned by `explicit_predicates_of`, plus +/// inferred constraints concerning which regions outlive other regions. fn predicates_defined_on<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, @@ -1627,9 +1843,13 @@ fn predicates_defined_on<'a, 'tcx>( .predicates .extend(inferred_outlives.iter().map(|&p| (p, span))); } + debug!("predicates_defined_on({:?}) = {:?}", def_id, result); result } +/// Returns a list of all type predicates (explicit and implicit) for the definition with +/// ID `def_id`. This includes all predicates returned by `predicates_defined_on`, plus +/// `Self: Trait` predicates for traits. fn predicates_of<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, @@ -1654,9 +1874,12 @@ fn predicates_of<'a, 'tcx>( .predicates .push((ty::TraitRef::identity(tcx, def_id).to_predicate(), span)); } + debug!("predicates_of(def_id={:?}) = {:?}", def_id, result); result } +/// Returns a list of user-specified type predicates for the definition with ID `def_id`. +/// N.B., this does not include any implied/inferred constraints. fn explicit_predicates_of<'a, 'tcx>( tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId, @@ -1695,8 +1918,11 @@ fn explicit_predicates_of<'a, 'tcx>( } } - let node_id = tcx.hir().as_local_node_id(def_id).unwrap(); - let node = tcx.hir().get(node_id); + let hir_id = match tcx.hir().as_local_hir_id(def_id) { + Some(hir_id) => hir_id, + None => return tcx.predicates_of(def_id), + }; + let node = tcx.hir().get_by_hir_id(hir_id); let mut is_trait = None; let mut is_default_impl_trait = None; @@ -1712,7 +1938,7 @@ fn explicit_predicates_of<'a, 'tcx>( Node::ImplItem(item) => match item.node { ImplItemKind::Existential(ref bounds) => { - let substs = Substs::identity_for_item(tcx, def_id); + let substs = InternalSubsts::identity_for_item(tcx, def_id); let opaque_ty = tcx.mk_opaque(def_id, substs); // Collect the bounds, i.e., the `A+B+'c` in `impl A+B+'c`. @@ -1756,8 +1982,9 @@ fn explicit_predicates_of<'a, 'tcx>( ref bounds, impl_trait_fn, ref generics, + origin: _, }) => { - let substs = Substs::identity_for_item(tcx, def_id); + let substs = InternalSubsts::identity_for_item(tcx, def_id); let opaque_ty = tcx.mk_opaque(def_id, substs); // Collect the bounds, i.e., the `A+B+'c` in `impl A+B+'c`. @@ -1825,7 +2052,7 @@ fn explicit_predicates_of<'a, 'tcx>( let mut index = parent_count + has_own_self as u32; for param in early_bound_lifetimes_from_generics(tcx, ast_generics) { let region = tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion { - def_id: tcx.hir().local_def_id(param.id), + def_id: tcx.hir().local_def_id_from_hir_id(param.hir_id), index, name: param.name.ident().as_interned_str(), })); @@ -1945,7 +2172,8 @@ fn explicit_predicates_of<'a, 'tcx>( }; let assoc_ty = - tcx.mk_projection(tcx.hir().local_def_id(trait_item.id), self_trait_ref.substs); + tcx.mk_projection(tcx.hir().local_def_id_from_hir_id(trait_item.hir_id), + self_trait_ref.substs); let bounds = compute_bounds( &ItemCtxt::new(tcx, def_id), @@ -1981,10 +2209,12 @@ fn explicit_predicates_of<'a, 'tcx>( ); } - Lrc::new(ty::GenericPredicates { + let result = Lrc::new(ty::GenericPredicates { parent: generics.parent, predicates, - }) + }); + debug!("explicit_predicates_of(def_id={:?}) = {:?}", def_id, result); + result } pub enum SizedByDefault { @@ -2050,9 +2280,9 @@ pub fn compute_bounds<'gcx: 'tcx, 'tcx>( } /// Converts a specific `GenericBound` from the AST into a set of -/// predicates that apply to the self-type. A vector is returned -/// because this can be anywhere from zero predicates (`T : ?Sized` adds no -/// predicates) to one (`T : Foo`) to many (`T : Bar` adds `T : Bar` +/// predicates that apply to the self type. A vector is returned +/// because this can be anywhere from zero predicates (`T: ?Sized` adds no +/// predicates) to one (`T: Foo`) to many (`T: Bar` adds `T: Bar` /// and `::X == i32`). fn predicates_from_bound<'tcx>( astconv: &dyn AstConv<'tcx, 'tcx>, @@ -2085,10 +2315,7 @@ fn compute_sig_of_foreign_fn_decl<'a, 'tcx>( abi: abi::Abi, ) -> ty::PolyFnSig<'tcx> { let unsafety = if abi == abi::Abi::RustIntrinsic { - match &*tcx.item_name(def_id).as_str() { - "size_of" | "min_align_of" | "needs_drop" => hir::Unsafety::Normal, - _ => hir::Unsafety::Unsafe, - } + intrisic_operation_unsafety(&*tcx.item_name(def_id).as_str()) } else { hir::Unsafety::Unsafe }; @@ -2100,7 +2327,7 @@ fn compute_sig_of_foreign_fn_decl<'a, 'tcx>( && abi != abi::Abi::PlatformIntrinsic && !tcx.features().simd_ffi { - let check = |ast_ty: &hir::Ty, ty: Ty| { + let check = |ast_ty: &hir::Ty, ty: Ty<'_>| { if ty.is_simd() { tcx.sess .struct_span_err( @@ -2108,7 +2335,7 @@ fn compute_sig_of_foreign_fn_decl<'a, 'tcx>( &format!( "use of SIMD type `{}` in FFI is highly experimental and \ may result in invalid code", - tcx.hir().node_to_pretty_string(ast_ty.id) + tcx.hir().hir_to_pretty_string(ast_ty.hir_id) ), ) .help("add #![feature(simd_ffi)] to the crate attributes to enable") @@ -2135,7 +2362,7 @@ fn is_foreign_item<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) -> bool } fn from_target_feature( - tcx: TyCtxt, + tcx: TyCtxt<'_, '_, '_>, id: DefId, attr: &ast::Attribute, whitelist: &FxHashMap>, @@ -2143,12 +2370,7 @@ fn from_target_feature( ) { let list = match attr.meta_item_list() { Some(list) => list, - None => { - let msg = "#[target_feature] attribute must be of the form \ - #[target_feature(..)]"; - tcx.sess.span_err(attr.span, &msg); - return; - } + None => return, }; let rust_features = tcx.features(); for item in list { @@ -2156,7 +2378,7 @@ fn from_target_feature( if !item.check_name("enable") { let msg = "#[target_feature(..)] only accepts sub-keys of `enable` \ currently"; - tcx.sess.span_err(item.span, &msg); + tcx.sess.span_err(item.span(), &msg); continue; } @@ -2166,7 +2388,7 @@ fn from_target_feature( None => { let msg = "#[target_feature] attribute must be of the form \ #[target_feature(enable = \"..\")]"; - tcx.sess.span_err(item.span, &msg); + tcx.sess.span_err(item.span(), &msg); continue; } }; @@ -2182,7 +2404,7 @@ fn from_target_feature( this target", feature ); - let mut err = tcx.sess.struct_span_err(item.span, &msg); + let mut err = tcx.sess.struct_span_err(item.span(), &msg); if feature.starts_with("+") { let valid = whitelist.contains_key(&feature[1..]); @@ -2207,6 +2429,9 @@ fn from_target_feature( Some("sse4a_target_feature") => rust_features.sse4a_target_feature, Some("tbm_target_feature") => rust_features.tbm_target_feature, Some("wasm_target_feature") => rust_features.wasm_target_feature, + Some("cmpxchg16b_target_feature") => rust_features.cmpxchg16b_target_feature, + Some("adx_target_feature") => rust_features.adx_target_feature, + Some("movbe_target_feature") => rust_features.movbe_target_feature, Some(name) => bug!("unknown target feature gate {}", name), None => true, }; @@ -2214,11 +2439,10 @@ fn from_target_feature( feature_gate::emit_feature_err( &tcx.sess.parse_sess, feature_gate.as_ref().unwrap(), - item.span, + item.span(), feature_gate::GateIssue::Language, &format!("the target feature `{}` is currently unstable", feature), ); - return None; } Some(Symbol::intern(feature)) })); @@ -2275,6 +2499,18 @@ fn codegen_fn_attrs<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefId) -> Codegen codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR; } else if attr.check_name("unwind") { codegen_fn_attrs.flags |= CodegenFnAttrFlags::UNWIND; + } else if attr.check_name("ffi_returns_twice") { + if tcx.is_foreign_item(id) { + codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_RETURNS_TWICE; + } else { + // `#[ffi_returns_twice]` is only allowed `extern fn`s + struct_span_err!( + tcx.sess, + attr.span, + E0724, + "`#[ffi_returns_twice]` may only be used on foreign functions" + ).emit(); + } } else if attr.check_name("rustc_allocator_nounwind") { codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_ALLOCATOR_NOUNWIND; } else if attr.check_name("naked") { @@ -2289,49 +2525,6 @@ fn codegen_fn_attrs<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefId) -> Codegen codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED; } else if attr.check_name("thread_local") { codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL; - } else if attr.check_name("inline") { - codegen_fn_attrs.inline = attrs.iter().fold(InlineAttr::None, |ia, attr| { - if attr.path != "inline" { - return ia; - } - let meta = match attr.meta() { - Some(meta) => meta.node, - None => return ia, - }; - match meta { - MetaItemKind::Word => { - mark_used(attr); - InlineAttr::Hint - } - MetaItemKind::List(ref items) => { - mark_used(attr); - inline_span = Some(attr.span); - if items.len() != 1 { - span_err!( - tcx.sess.diagnostic(), - attr.span, - E0534, - "expected one argument" - ); - InlineAttr::None - } else if list_contains_name(&items[..], "always") { - InlineAttr::Always - } else if list_contains_name(&items[..], "never") { - InlineAttr::Never - } else { - span_err!( - tcx.sess.diagnostic(), - items[0].span, - E0535, - "invalid argument" - ); - - InlineAttr::None - } - } - _ => ia, - } - }); } else if attr.check_name("export_name") { if let Some(s) = attr.value_str() { if s.as_str().contains("\0") { @@ -2345,14 +2538,6 @@ fn codegen_fn_attrs<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefId) -> Codegen ).emit(); } codegen_fn_attrs.export_name = Some(s); - } else { - struct_span_err!( - tcx.sess, - attr.span, - E0558, - "`export_name` attribute has invalid format" - ).span_label(attr.span, "did you mean #[export_name=\"*\"]?") - .emit(); } } else if attr.check_name("target_feature") { if tcx.fn_sig(id).unsafety() == Unsafety::Normal { @@ -2389,6 +2574,76 @@ fn codegen_fn_attrs<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, id: DefId) -> Codegen } } + codegen_fn_attrs.inline = attrs.iter().fold(InlineAttr::None, |ia, attr| { + if attr.path != "inline" { + return ia; + } + match attr.meta().map(|i| i.node) { + Some(MetaItemKind::Word) => { + mark_used(attr); + InlineAttr::Hint + } + Some(MetaItemKind::List(ref items)) => { + mark_used(attr); + inline_span = Some(attr.span); + if items.len() != 1 { + span_err!( + tcx.sess.diagnostic(), + attr.span, + E0534, + "expected one argument" + ); + InlineAttr::None + } else if list_contains_name(&items[..], "always") { + InlineAttr::Always + } else if list_contains_name(&items[..], "never") { + InlineAttr::Never + } else { + span_err!( + tcx.sess.diagnostic(), + items[0].span(), + E0535, + "invalid argument" + ); + + InlineAttr::None + } + } + Some(MetaItemKind::NameValue(_)) => ia, + None => ia, + } + }); + + codegen_fn_attrs.optimize = attrs.iter().fold(OptimizeAttr::None, |ia, attr| { + if attr.path != "optimize" { + return ia; + } + let err = |sp, s| span_err!(tcx.sess.diagnostic(), sp, E0722, "{}", s); + match attr.meta().map(|i| i.node) { + Some(MetaItemKind::Word) => { + err(attr.span, "expected one argument"); + ia + } + Some(MetaItemKind::List(ref items)) => { + mark_used(attr); + inline_span = Some(attr.span); + if items.len() != 1 { + err(attr.span, "expected one argument"); + OptimizeAttr::None + } else if list_contains_name(&items[..], "size") { + OptimizeAttr::Size + } else if list_contains_name(&items[..], "speed") { + OptimizeAttr::Speed + } else { + err(items[0].span(), "invalid argument"); + OptimizeAttr::None + } + } + Some(MetaItemKind::NameValue(_)) => ia, + None => ia, + } + }); + // If a function uses #[target_feature] it can't be inlined into general // purpose functions as they wouldn't have the right target features // enabled. For that reason we also forbid #[inline(always)] as it can't be diff --git a/src/librustc_typeck/constrained_generic_params.rs b/src/librustc_typeck/constrained_generic_params.rs new file mode 100644 index 0000000000000..18bf66ceb3501 --- /dev/null +++ b/src/librustc_typeck/constrained_generic_params.rs @@ -0,0 +1,208 @@ +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::fold::{TypeFoldable, TypeVisitor}; +use rustc::util::nodemap::FxHashSet; +use rustc::mir::interpret::ConstValue; +use syntax::source_map::Span; + +#[derive(Clone, PartialEq, Eq, Hash, Debug)] +pub struct Parameter(pub u32); + +impl From for Parameter { + fn from(param: ty::ParamTy) -> Self { Parameter(param.idx) } +} + +impl From for Parameter { + fn from(param: ty::EarlyBoundRegion) -> Self { Parameter(param.index) } +} + +impl From for Parameter { + fn from(param: ty::ParamConst) -> Self { Parameter(param.index) } +} + +/// Returns the set of parameters constrained by the impl header. +pub fn parameters_for_impl<'tcx>(impl_self_ty: Ty<'tcx>, + impl_trait_ref: Option>) + -> FxHashSet +{ + let vec = match impl_trait_ref { + Some(tr) => parameters_for(&tr, false), + None => parameters_for(&impl_self_ty, false), + }; + vec.into_iter().collect() +} + +/// If `include_projections` is false, returns the list of parameters that are +/// constrained by `t` - i.e., the value of each parameter in the list is +/// uniquely determined by `t` (see RFC 447). If it is true, return the list +/// of parameters whose values are needed in order to constrain `ty` - these +/// differ, with the latter being a superset, in the presence of projections. +pub fn parameters_for<'tcx, T>(t: &T, + include_nonconstraining: bool) + -> Vec + where T: TypeFoldable<'tcx> +{ + + let mut collector = ParameterCollector { + parameters: vec![], + include_nonconstraining, + }; + t.visit_with(&mut collector); + collector.parameters +} + +struct ParameterCollector { + parameters: Vec, + include_nonconstraining: bool +} + +impl<'tcx> TypeVisitor<'tcx> for ParameterCollector { + fn visit_ty(&mut self, t: Ty<'tcx>) -> bool { + match t.sty { + ty::Projection(..) | ty::Opaque(..) if !self.include_nonconstraining => { + // projections are not injective + return false; + } + ty::Param(data) => { + self.parameters.push(Parameter::from(data)); + } + _ => {} + } + + t.super_visit_with(self) + } + + fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool { + if let ty::ReEarlyBound(data) = *r { + self.parameters.push(Parameter::from(data)); + } + false + } + + fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> bool { + if let ConstValue::Param(data) = c.val { + self.parameters.push(Parameter::from(data)); + } + false + } +} + +pub fn identify_constrained_generic_params<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>, + predicates: &ty::GenericPredicates<'tcx>, + impl_trait_ref: Option>, + input_parameters: &mut FxHashSet) +{ + let mut predicates = predicates.predicates.clone(); + setup_constraining_predicates(tcx, &mut predicates, impl_trait_ref, input_parameters); +} + + +/// Order the predicates in `predicates` such that each parameter is +/// constrained before it is used, if that is possible, and add the +/// parameters so constrained to `input_parameters`. For example, +/// imagine the following impl: +/// +/// impl> Trait for U +/// +/// The impl's predicates are collected from left to right. Ignoring +/// the implicit `Sized` bounds, these are +/// * T: Debug +/// * U: Iterator +/// * ::Item = T -- a desugared ProjectionPredicate +/// +/// When we, for example, try to go over the trait-reference +/// `IntoIter as Trait`, we substitute the impl parameters with fresh +/// variables and match them with the impl trait-ref, so we know that +/// `$U = IntoIter`. +/// +/// However, in order to process the `$T: Debug` predicate, we must first +/// know the value of `$T` - which is only given by processing the +/// projection. As we occasionally want to process predicates in a single +/// pass, we want the projection to come first. In fact, as projections +/// can (acyclically) depend on one another - see RFC447 for details - we +/// need to topologically sort them. +/// +/// We *do* have to be somewhat careful when projection targets contain +/// projections themselves, for example in +/// impl Trait for U where +/// /* 0 */ S: Iterator, +/// /* - */ U: Iterator, +/// /* 1 */ ::Item: ToOwned::Item)> +/// /* 2 */ W: Iterator +/// /* 3 */ V: Debug +/// we have to evaluate the projections in the order I wrote them: +/// `V: Debug` requires `V` to be evaluated. The only projection that +/// *determines* `V` is 2 (1 contains it, but *does not determine it*, +/// as it is only contained within a projection), but that requires `W` +/// which is determined by 1, which requires `U`, that is determined +/// by 0. I should probably pick a less tangled example, but I can't +/// think of any. +pub fn setup_constraining_predicates<'tcx>(tcx: TyCtxt<'_, '_, '_>, + predicates: &mut [(ty::Predicate<'tcx>, Span)], + impl_trait_ref: Option>, + input_parameters: &mut FxHashSet) +{ + // The canonical way of doing the needed topological sort + // would be a DFS, but getting the graph and its ownership + // right is annoying, so I am using an in-place fixed-point iteration, + // which is `O(nt)` where `t` is the depth of type-parameter constraints, + // remembering that `t` should be less than 7 in practice. + // + // Basically, I iterate over all projections and swap every + // "ready" projection to the start of the list, such that + // all of the projections before `i` are topologically sorted + // and constrain all the parameters in `input_parameters`. + // + // In the example, `input_parameters` starts by containing `U` - which + // is constrained by the trait-ref - and so on the first pass we + // observe that `::Item = T` is a "ready" projection that + // constrains `T` and swap it to front. As it is the sole projection, + // no more swaps can take place afterwards, with the result being + // * ::Item = T + // * T: Debug + // * U: Iterator + debug!("setup_constraining_predicates: predicates={:?} \ + impl_trait_ref={:?} input_parameters={:?}", + predicates, impl_trait_ref, input_parameters); + let mut i = 0; + let mut changed = true; + while changed { + changed = false; + + for j in i..predicates.len() { + if let ty::Predicate::Projection(ref poly_projection) = predicates[j].0 { + // Note that we can skip binder here because the impl + // trait ref never contains any late-bound regions. + let projection = poly_projection.skip_binder(); + + // Special case: watch out for some kind of sneaky attempt + // to project out an associated type defined by this very + // trait. + let unbound_trait_ref = projection.projection_ty.trait_ref(tcx); + if Some(unbound_trait_ref.clone()) == impl_trait_ref { + continue; + } + + // A projection depends on its input types and determines its output + // type. For example, if we have + // `<::Baz as Iterator>::Output = ::Output` + // Then the projection only applies if `T` is known, but it still + // does not determine `U`. + let inputs = parameters_for(&projection.projection_ty.trait_ref(tcx), true); + let relies_only_on_inputs = inputs.iter().all(|p| input_parameters.contains(&p)); + if !relies_only_on_inputs { + continue; + } + input_parameters.extend(parameters_for(&projection.ty, false)); + } else { + continue; + } + // fancy control flow to bypass borrow checker + predicates.swap(i, j); + i += 1; + changed = true; + } + debug!("setup_constraining_predicates: predicates={:?} \ + i={} impl_trait_ref={:?} input_parameters={:?}", + predicates, i, impl_trait_ref, input_parameters); + } +} diff --git a/src/librustc_typeck/constrained_type_params.rs b/src/librustc_typeck/constrained_type_params.rs deleted file mode 100644 index 25fa33ef9fa4e..0000000000000 --- a/src/librustc_typeck/constrained_type_params.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::fold::{TypeFoldable, TypeVisitor}; -use rustc::util::nodemap::FxHashSet; -use syntax::source_map::Span; - -#[derive(Clone, PartialEq, Eq, Hash, Debug)] -pub struct Parameter(pub u32); - -impl From for Parameter { - fn from(param: ty::ParamTy) -> Self { Parameter(param.idx) } -} - -impl From for Parameter { - fn from(param: ty::EarlyBoundRegion) -> Self { Parameter(param.index) } -} - -/// Return the set of parameters constrained by the impl header. -pub fn parameters_for_impl<'tcx>(impl_self_ty: Ty<'tcx>, - impl_trait_ref: Option>) - -> FxHashSet -{ - let vec = match impl_trait_ref { - Some(tr) => parameters_for(&tr, false), - None => parameters_for(&impl_self_ty, false), - }; - vec.into_iter().collect() -} - -/// If `include_projections` is false, returns the list of parameters that are -/// constrained by `t` - i.e., the value of each parameter in the list is -/// uniquely determined by `t` (see RFC 447). If it is true, return the list -/// of parameters whose values are needed in order to constrain `ty` - these -/// differ, with the latter being a superset, in the presence of projections. -pub fn parameters_for<'tcx, T>(t: &T, - include_nonconstraining: bool) - -> Vec - where T: TypeFoldable<'tcx> -{ - - let mut collector = ParameterCollector { - parameters: vec![], - include_nonconstraining, - }; - t.visit_with(&mut collector); - collector.parameters -} - -struct ParameterCollector { - parameters: Vec, - include_nonconstraining: bool -} - -impl<'tcx> TypeVisitor<'tcx> for ParameterCollector { - fn visit_ty(&mut self, t: Ty<'tcx>) -> bool { - match t.sty { - ty::Projection(..) | ty::Opaque(..) if !self.include_nonconstraining => { - // projections are not injective - return false; - } - ty::Param(data) => { - self.parameters.push(Parameter::from(data)); - } - _ => {} - } - - t.super_visit_with(self) - } - - fn visit_region(&mut self, r: ty::Region<'tcx>) -> bool { - if let ty::ReEarlyBound(data) = *r { - self.parameters.push(Parameter::from(data)); - } - false - } -} - -pub fn identify_constrained_type_params<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>, - predicates: &ty::GenericPredicates<'tcx>, - impl_trait_ref: Option>, - input_parameters: &mut FxHashSet) -{ - let mut predicates = predicates.predicates.clone(); - setup_constraining_predicates(tcx, &mut predicates, impl_trait_ref, input_parameters); -} - - -/// Order the predicates in `predicates` such that each parameter is -/// constrained before it is used, if that is possible, and add the -/// parameters so constrained to `input_parameters`. For example, -/// imagine the following impl: -/// -/// impl> Trait for U -/// -/// The impl's predicates are collected from left to right. Ignoring -/// the implicit `Sized` bounds, these are -/// * T: Debug -/// * U: Iterator -/// * ::Item = T -- a desugared ProjectionPredicate -/// -/// When we, for example, try to go over the trait-reference -/// `IntoIter as Trait`, we substitute the impl parameters with fresh -/// variables and match them with the impl trait-ref, so we know that -/// `$U = IntoIter`. -/// -/// However, in order to process the `$T: Debug` predicate, we must first -/// know the value of `$T` - which is only given by processing the -/// projection. As we occasionally want to process predicates in a single -/// pass, we want the projection to come first. In fact, as projections -/// can (acyclically) depend on one another - see RFC447 for details - we -/// need to topologically sort them. -/// -/// We *do* have to be somewhat careful when projection targets contain -/// projections themselves, for example in -/// impl Trait for U where -/// /* 0 */ S: Iterator, -/// /* - */ U: Iterator, -/// /* 1 */ ::Item: ToOwned::Item)> -/// /* 2 */ W: Iterator -/// /* 3 */ V: Debug -/// we have to evaluate the projections in the order I wrote them: -/// `V: Debug` requires `V` to be evaluated. The only projection that -/// *determines* `V` is 2 (1 contains it, but *does not determine it*, -/// as it is only contained within a projection), but that requires `W` -/// which is determined by 1, which requires `U`, that is determined -/// by 0. I should probably pick a less tangled example, but I can't -/// think of any. -pub fn setup_constraining_predicates<'tcx>(tcx: TyCtxt, - predicates: &mut [(ty::Predicate<'tcx>, Span)], - impl_trait_ref: Option>, - input_parameters: &mut FxHashSet) -{ - // The canonical way of doing the needed topological sort - // would be a DFS, but getting the graph and its ownership - // right is annoying, so I am using an in-place fixed-point iteration, - // which is `O(nt)` where `t` is the depth of type-parameter constraints, - // remembering that `t` should be less than 7 in practice. - // - // Basically, I iterate over all projections and swap every - // "ready" projection to the start of the list, such that - // all of the projections before `i` are topologically sorted - // and constrain all the parameters in `input_parameters`. - // - // In the example, `input_parameters` starts by containing `U` - which - // is constrained by the trait-ref - and so on the first pass we - // observe that `::Item = T` is a "ready" projection that - // constrains `T` and swap it to front. As it is the sole projection, - // no more swaps can take place afterwards, with the result being - // * ::Item = T - // * T: Debug - // * U: Iterator - debug!("setup_constraining_predicates: predicates={:?} \ - impl_trait_ref={:?} input_parameters={:?}", - predicates, impl_trait_ref, input_parameters); - let mut i = 0; - let mut changed = true; - while changed { - changed = false; - - for j in i..predicates.len() { - if let ty::Predicate::Projection(ref poly_projection) = predicates[j].0 { - // Note that we can skip binder here because the impl - // trait ref never contains any late-bound regions. - let projection = poly_projection.skip_binder(); - - // Special case: watch out for some kind of sneaky attempt - // to project out an associated type defined by this very - // trait. - let unbound_trait_ref = projection.projection_ty.trait_ref(tcx); - if Some(unbound_trait_ref.clone()) == impl_trait_ref { - continue; - } - - // A projection depends on its input types and determines its output - // type. For example, if we have - // `<::Baz as Iterator>::Output = ::Output` - // Then the projection only applies if `T` is known, but it still - // does not determine `U`. - let inputs = parameters_for(&projection.projection_ty.trait_ref(tcx), true); - let relies_only_on_inputs = inputs.iter().all(|p| input_parameters.contains(&p)); - if !relies_only_on_inputs { - continue; - } - input_parameters.extend(parameters_for(&projection.ty, false)); - } else { - continue; - } - // fancy control flow to bypass borrow checker - predicates.swap(i, j); - i += 1; - changed = true; - } - debug!("setup_constraining_predicates: predicates={:?} \ - i={} impl_trait_ref={:?} input_parameters={:?}", - predicates, i, impl_trait_ref, input_parameters); - } -} diff --git a/src/librustc_typeck/diagnostics.rs b/src/librustc_typeck/diagnostics.rs index a0dbaf5ad504b..22f24df450f46 100644 --- a/src/librustc_typeck/diagnostics.rs +++ b/src/librustc_typeck/diagnostics.rs @@ -1,13 +1,4 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - +// ignore-tidy-linelength #![allow(non_snake_case)] register_long_diagnostics! { @@ -358,13 +349,14 @@ fn main() { "##, E0044: r##" -You can't use type parameters on foreign items. Example of erroneous code: +You can't use type or const parameters on foreign items. +Example of erroneous code: ```compile_fail,E0044 extern { fn some_func(x: T); } ``` -To fix this, replace the type parameter with the specializations that you +To fix this, replace the generic parameter with the specializations that you need: ``` @@ -431,7 +423,7 @@ impl Foo for Bar { E0049: r##" This error indicates that an attempted implementation of a trait method -has the wrong number of type parameters. +has the wrong number of type or const parameters. For example, the trait below has a method `foo` with a type parameter `T`, but the implementation of `foo` for the type `Bar` is missing this parameter: @@ -526,7 +518,7 @@ recursion limit (which can be set via the `recursion_limit` attribute). For a somewhat artificial example: ```compile_fail,E0055 -#![recursion_limit="2"] +#![recursion_limit="5"] struct Foo; @@ -536,9 +528,9 @@ impl Foo { fn main() { let foo = Foo; - let ref_foo = &&Foo; + let ref_foo = &&&&&Foo; - // error, reached the recursion limit while auto-dereferencing `&&Foo` + // error, reached the recursion limit while auto-dereferencing `&&&&&Foo` ref_foo.foo(); } ``` @@ -773,7 +765,7 @@ function's return type and the value being returned. "##, E0070: r##" -The left-hand side of an assignment operator must be a place expression. An +The left-hand side of an assignment operator must be a place expression. A place expression represents a memory location and can be a variable (with optional namespacing), a dereference, an indexing expression or a field reference. @@ -1040,6 +1032,7 @@ enum NightsWatch {} ``` "##, +// FIXME(const_generics:docs): example of inferring const parameter. E0087: r##" #### Note: this error code is no longer emitted by the compiler. @@ -1160,8 +1153,8 @@ fn main() { "##, E0091: r##" -You gave an unnecessary type parameter in a type alias. Erroneous code -example: +You gave an unnecessary type or const parameter in a type alias. Erroneous +code example: ```compile_fail,E0091 type Foo = u32; // error: type parameter `T` is unused @@ -1169,7 +1162,7 @@ type Foo = u32; // error: type parameter `T` is unused type Foo = Box; // error: type parameter `B` is unused ``` -Please check you didn't write too many type parameters. Example: +Please check you didn't write too many parameters. Example: ``` type Foo = u32; // ok! @@ -1297,41 +1290,34 @@ fn main() { "##, E0109: r##" -You tried to give a type parameter to a type which doesn't need it. Erroneous -code example: +You tried to provide a generic argument to a type which doesn't need it. +Erroneous code example: ```compile_fail,E0109 -type X = u32; // error: type parameters are not allowed on this type +type X = u32; // error: type arguments are not allowed for this type +type Y = bool<'static>; // error: lifetime parameters are not allowed on + // this type ``` -Please check that you used the correct type and recheck its definition. Perhaps -it doesn't need the type parameter. +Check that you used the correct argument and that the definition is correct. Example: ``` -type X = u32; // this compiles +type X = u32; // ok! +type Y = bool; // ok! ``` -Note that type parameters for enum-variant constructors go after the variant, -not after the enum (`Option::None::`, not `Option::::None`). +Note that generic arguments for enum variant constructors go after the variant, +not after the enum. For example, you would write `Option::None::`, +rather than `Option::::None`. "##, E0110: r##" -You tried to give a lifetime parameter to a type which doesn't need it. -Erroneous code example: - -```compile_fail,E0110 -type X = u32<'static>; // error: lifetime parameters are not allowed on - // this type -``` - -Please check that the correct type was used and recheck its definition; perhaps -it doesn't need the lifetime parameter. Example: +#### Note: this error code is no longer emitted by the compiler. -``` -type X = u32; // ok! -``` +You tried to provide a lifetime to a type which doesn't need it. +See `E0109` for more details. "##, E0116: r##" @@ -1553,7 +1539,9 @@ fn f() {} It is not possible to declare type parameters on a function that has the `start` attribute. Such a function must have the following type signature (for more -information: http://doc.rust-lang.org/stable/book/first-edition/no-stdlib.html): +information, view [the unstable book][1]): + +[1]: https://doc.rust-lang.org/unstable-book/language-features/lang-items.html#writing-an-executable-without-stdlib ``` # let _: @@ -2875,8 +2863,8 @@ E0370: r##" The maximum value of an enum was reached, so it cannot be automatically set in the next enum value. Erroneous code example: -```compile_fail -#[deny(overflowing_literals)] +```compile_fail,E0370 +#[repr(i64)] enum Foo { X = 0x7fffffffffffffff, Y, // error: enum discriminant overflowed on value after @@ -2889,6 +2877,7 @@ To fix this, please set manually the next enum value or put the enum variant with the maximum value at the end of the enum. Examples: ``` +#[repr(i64)] enum Foo { X = 0x7fffffffffffffff, Y = 0, // ok! @@ -2898,6 +2887,7 @@ enum Foo { Or: ``` +#[repr(i64)] enum Foo { Y = 0, // ok! X = 0x7fffffffffffffff, @@ -2927,10 +2917,11 @@ impl Baz for Bar { } // Note: This is OK E0374: r##" A struct without a field containing an unsized type cannot implement -`CoerceUnsized`. An -[unsized type](https://doc.rust-lang.org/book/first-edition/unsized-types.html) -is any type that the compiler doesn't know the length or alignment of at -compile time. Any struct containing an unsized type is also unsized. +`CoerceUnsized`. An [unsized type][1] is any type that the compiler +doesn't know the length or alignment of at compile time. Any struct +containing an unsized type is also unsized. + +[1]: https://doc.rust-lang.org/book/ch19-04-advanced-types.html#dynamically-sized-types-and-the-sized-trait Example of erroneous code: @@ -2987,9 +2978,9 @@ A struct with more than one field containing an unsized type cannot implement `CoerceUnsized`. This only occurs when you are trying to coerce one of the types in your struct to another type in the struct. In this case we try to impl `CoerceUnsized` from `T` to `U` which are both types that the struct -takes. An [unsized type] is any type that the compiler doesn't know the length -or alignment of at compile time. Any struct containing an unsized type is also -unsized. +takes. An [unsized type][1] is any type that the compiler doesn't know the +length or alignment of at compile time. Any struct containing an unsized type +is also unsized. Example of erroneous code: @@ -3034,7 +3025,7 @@ fn coerce_foo, U>(t: T) -> Foo { } ``` -[unsized type]: https://doc.rust-lang.org/book/first-edition/unsized-types.html +[1]: https://doc.rust-lang.org/book/ch19-04-advanced-types.html#dynamically-sized-types-and-the-sized-trait "##, E0376: r##" @@ -3042,11 +3033,12 @@ The type you are trying to impl `CoerceUnsized` for is not a struct. `CoerceUnsized` can only be implemented for a struct. Unsized types are already able to be coerced without an implementation of `CoerceUnsized` whereas a struct containing an unsized type needs to know the unsized type -field it's containing is able to be coerced. An -[unsized type](https://doc.rust-lang.org/book/first-edition/unsized-types.html) +field it's containing is able to be coerced. An [unsized type][1] is any type that the compiler doesn't know the length or alignment of at compile time. Any struct containing an unsized type is also unsized. +[1]: https://doc.rust-lang.org/book/ch19-04-advanced-types.html#dynamically-sized-types-and-the-sized-trait + Example of erroneous code: ```compile_fail,E0376 @@ -3379,180 +3371,6 @@ extern "platform-intrinsic" { ``` "##, -E0440: r##" -A platform-specific intrinsic function has the wrong number of type -parameters. Erroneous code example: - -```compile_fail,E0440 -#![feature(repr_simd)] -#![feature(platform_intrinsics)] - -#[repr(simd)] -struct f64x2(f64, f64); - -extern "platform-intrinsic" { - fn x86_mm_movemask_pd(x: f64x2) -> i32; - // error: platform-specific intrinsic has wrong number of type - // parameters -} -``` - -Please refer to the function declaration to see if it corresponds -with yours. Example: - -``` -#![feature(repr_simd)] -#![feature(platform_intrinsics)] - -#[repr(simd)] -struct f64x2(f64, f64); - -extern "platform-intrinsic" { - fn x86_mm_movemask_pd(x: f64x2) -> i32; -} -``` -"##, - -E0441: r##" -An unknown platform-specific intrinsic function was used. Erroneous -code example: - -```compile_fail,E0441 -#![feature(repr_simd)] -#![feature(platform_intrinsics)] - -#[repr(simd)] -struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16); - -extern "platform-intrinsic" { - fn x86_mm_adds_ep16(x: i16x8, y: i16x8) -> i16x8; - // error: unrecognized platform-specific intrinsic function -} -``` - -Please verify that the function name wasn't misspelled, and ensure -that it is declared in the rust source code (in the file -src/librustc_platform_intrinsics/x86.rs). Example: - -``` -#![feature(repr_simd)] -#![feature(platform_intrinsics)] - -#[repr(simd)] -struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16); - -extern "platform-intrinsic" { - fn x86_mm_adds_epi16(x: i16x8, y: i16x8) -> i16x8; // ok! -} -``` -"##, - -E0442: r##" -Intrinsic argument(s) and/or return value have the wrong type. -Erroneous code example: - -```compile_fail,E0442 -#![feature(repr_simd)] -#![feature(platform_intrinsics)] - -#[repr(simd)] -struct i8x16(i8, i8, i8, i8, i8, i8, i8, i8, - i8, i8, i8, i8, i8, i8, i8, i8); -#[repr(simd)] -struct i32x4(i32, i32, i32, i32); -#[repr(simd)] -struct i64x2(i64, i64); - -extern "platform-intrinsic" { - fn x86_mm_adds_epi16(x: i8x16, y: i32x4) -> i64x2; - // error: intrinsic arguments/return value have wrong type -} -``` - -To fix this error, please refer to the function declaration to give -it the awaited types. Example: - -``` -#![feature(repr_simd)] -#![feature(platform_intrinsics)] - -#[repr(simd)] -struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16); - -extern "platform-intrinsic" { - fn x86_mm_adds_epi16(x: i16x8, y: i16x8) -> i16x8; // ok! -} -``` -"##, - -E0443: r##" -Intrinsic argument(s) and/or return value have the wrong type. -Erroneous code example: - -```compile_fail,E0443 -#![feature(repr_simd)] -#![feature(platform_intrinsics)] - -#[repr(simd)] -struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16); -#[repr(simd)] -struct i64x8(i64, i64, i64, i64, i64, i64, i64, i64); - -extern "platform-intrinsic" { - fn x86_mm_adds_epi16(x: i16x8, y: i16x8) -> i64x8; - // error: intrinsic argument/return value has wrong type -} -``` - -To fix this error, please refer to the function declaration to give -it the awaited types. Example: - -``` -#![feature(repr_simd)] -#![feature(platform_intrinsics)] - -#[repr(simd)] -struct i16x8(i16, i16, i16, i16, i16, i16, i16, i16); - -extern "platform-intrinsic" { - fn x86_mm_adds_epi16(x: i16x8, y: i16x8) -> i16x8; // ok! -} -``` -"##, - -E0444: r##" -A platform-specific intrinsic function has wrong number of arguments. -Erroneous code example: - -```compile_fail,E0444 -#![feature(repr_simd)] -#![feature(platform_intrinsics)] - -#[repr(simd)] -struct f64x2(f64, f64); - -extern "platform-intrinsic" { - fn x86_mm_movemask_pd(x: f64x2, y: f64x2, z: f64x2) -> i32; - // error: platform-specific intrinsic has invalid number of arguments -} -``` - -Please refer to the function declaration to see if it corresponds -with yours. Example: - -``` -#![feature(repr_simd)] -#![feature(platform_intrinsics)] - -#[repr(simd)] -struct f64x2(f64, f64); - -extern "platform-intrinsic" { - fn x86_mm_movemask_pd(x: f64x2) -> i32; // ok! -} -``` -"##, - E0516: r##" The `typeof` keyword is currently reserved but unimplemented. Erroneous code example: @@ -3795,29 +3613,6 @@ For more information about the inline attribute, https: read://doc.rust-lang.org/reference.html#inline-attributes "##, -E0558: r##" -The `export_name` attribute was malformed. - -Erroneous code example: - -```ignore (error-emitted-at-codegen-which-cannot-be-handled-by-compile_fail) -#[export_name] // error: `export_name` attribute has invalid format -pub fn something() {} - -fn main() {} -``` - -The `export_name` attribute expects a string in order to determine the name of -the exported symbol. Example: - -``` -#[export_name = "some_function"] // ok! -pub fn something() {} - -fn main() {} -``` -"##, - E0559: r##" An unknown field was specified into an enum's structure variant. @@ -4089,8 +3884,10 @@ let c = 86u8 as char; // ok! assert_eq!(c, 'V'); ``` -For more information about casts, take a look at The Book: -https://doc.rust-lang.org/book/first-edition/casting-between-types.html +For more information about casts, take a look at the Type cast section in +[The Reference Book][1]. + +[1]: https://doc.rust-lang.org/reference/expressions/operator-expr.html#type-cast-expressions "##, E0605: r##" @@ -4118,8 +3915,10 @@ let v = 0 as *const u8; v as *const i8; // ok! ``` -For more information about casts, take a look at The Book: -https://doc.rust-lang.org/book/first-edition/casting-between-types.html +For more information about casts, take a look at the Type cast section in +[The Reference Book][1]. + +[1]: https://doc.rust-lang.org/reference/expressions/operator-expr.html#type-cast-expressions "##, E0606: r##" @@ -4140,8 +3939,10 @@ let x = &0u8; let y: u32 = *x as u32; // We dereference it first and then cast it. ``` -For more information about casts, take a look at The Book: -https://doc.rust-lang.org/book/first-edition/casting-between-types.html +For more information about casts, take a look at the Type cast section in +[The Reference Book][1]. + +[1]: https://doc.rust-lang.org/reference/expressions/operator-expr.html#type-cast-expressions "##, E0607: r##" @@ -4167,8 +3968,10 @@ pointer holds is their size. To fix this error, don't try to cast directly between thin and fat pointers. -For more information about casts, take a look at The Book: -https://doc.rust-lang.org/book/first-edition/casting-between-types.html +For more information about casts, take a look at the Type cast section in +[The Reference Book][1]. + +[1]: https://doc.rust-lang.org/reference/expressions/operator-expr.html#type-cast-expressions "##, E0609: r##" @@ -4226,8 +4029,8 @@ println!("x: {}, y: {}", variable.x, variable.y); ``` For more information about primitives and structs, take a look at The Book: -https://doc.rust-lang.org/book/first-edition/primitive-types.html -https://doc.rust-lang.org/book/first-edition/structs.html +https://doc.rust-lang.org/book/ch03-02-data-types.html +https://doc.rust-lang.org/book/ch05-00-structs.html "##, E0614: r##" @@ -4538,11 +4341,12 @@ foo.method(); // Ok! "##, E0638: r##" -This error indicates that the struct or enum must be matched non-exhaustively -as it has been marked as `non_exhaustive`. +This error indicates that the struct, enum or enum variant must be matched +non-exhaustively as it has been marked as `non_exhaustive`. When applied within a crate, downstream users of the crate will need to use the `_` pattern when matching enums and use the `..` pattern when matching structs. +Downstream crates cannot match against non-exhaustive enum variants. For example, in the below example, since the enum is marked as `non_exhaustive`, it is required that downstream crates match non-exhaustively @@ -4587,10 +4391,10 @@ Similarly, for structs, match with `..` to avoid this error. "##, E0639: r##" -This error indicates that the struct or enum cannot be instantiated from -outside of the defining crate as it has been marked as `non_exhaustive` and as -such more fields/variants may be added in future that could cause adverse side -effects for this code. +This error indicates that the struct, enum or enum variant cannot be +instantiated from outside of the defining crate as it has been marked +as `non_exhaustive` and as such more fields/variants may be added in +future that could cause adverse side effects for this code. It is recommended that you look for a `new` function or equivalent in the crate's documentation. @@ -4826,6 +4630,21 @@ type, it's not allowed to override anything in those implementations, as it would be ambiguous which override should actually be used. "##, + +E0720: r##" +An `impl Trait` type expands to a recursive type. + +An `impl Trait` type must be expandable to a concrete type that contains no +`impl Trait` types. For example the following example tries to create an +`impl Trait` type `T` that is equal to `[T, T]`: + +```compile_fail,E0720 +fn make_recursive_type() -> impl Sized { + [make_recursive_type(), make_recursive_type()] +} +``` +"##, + } register_diagnostics! { @@ -4891,6 +4710,7 @@ register_diagnostics! { // E0372, // coherence not object safe E0377, // the trait `CoerceUnsized` may only be implemented for a coercion // between structures with the same definition +// E0558, // replaced with a generic attribute input check E0533, // `{}` does not name a unit variant, unit struct or a constant // E0563, // cannot determine a type for this `impl Trait`: {} // removed in 6383de15 E0564, // only named lifetimes are allowed in `impl Trait`, @@ -4910,4 +4730,6 @@ register_diagnostics! { E0645, // trait aliases not finished E0698, // type inside generator must be known in this context E0719, // duplicate values for associated type binding + E0722, // Malformed #[optimize] attribute + E0724, // `#[ffi_returns_twice]` is only allowed in foreign functions } diff --git a/src/librustc_typeck/impl_wf_check.rs b/src/librustc_typeck/impl_wf_check.rs index a2071fd6d47e0..2b72f43d36f72 100644 --- a/src/librustc_typeck/impl_wf_check.rs +++ b/src/librustc_typeck/impl_wf_check.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This pass enforces various "well-formedness constraints" on impls. //! Logically, it is part of wfcheck -- but we do it early so that we //! can stop compilation afterwards, since part of the trait matching @@ -18,25 +8,26 @@ //! specialization errors. These things can (and probably should) be //! fixed, but for the moment it's easier to do these checks early. -use constrained_type_params as ctp; +use crate::constrained_generic_params as ctp; use rustc::hir; use rustc::hir::itemlikevisit::ItemLikeVisitor; use rustc::hir::def_id::DefId; use rustc::ty::{self, TyCtxt}; +use rustc::ty::query::Providers; use rustc::util::nodemap::{FxHashMap, FxHashSet}; use std::collections::hash_map::Entry::{Occupied, Vacant}; use syntax_pos::Span; /// Checks that all the type/lifetime parameters on an impl also -/// appear in the trait ref or self-type (or are constrained by a +/// appear in the trait ref or self type (or are constrained by a /// where-clause). These rules are needed to ensure that, given a /// trait ref like `>`, we can derive the values of all /// parameters on the impl (which is needed to make specialization /// possible). /// /// However, in the case of lifetimes, we only enforce these rules if -/// the lifetime parameter is used in an associated type. This is a +/// the lifetime parameter is used in an associated type. This is a /// concession to backwards compatibility; see comment at the end of /// the fn for details. /// @@ -49,7 +40,7 @@ use syntax_pos::Span; /// impl Trait> for Bar { ... } /// // ^ T appears in `Foo`, ok. /// -/// impl Trait for Bar where Bar: Iterator { ... } +/// impl Trait for Bar where Bar: Iterator { ... } /// // ^ T is bound to `::Item`, ok. /// /// impl<'a> Trait for Bar { } @@ -62,7 +53,23 @@ pub fn impl_wf_check<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { // We will tag this as part of the WF check -- logically, it is, // but it's one that we must perform earlier than the rest of // WfCheck. - tcx.hir().krate().visit_all_item_likes(&mut ImplWfCheck { tcx }); + for &module in tcx.hir().krate().modules.keys() { + tcx.ensure().check_mod_impl_wf(tcx.hir().local_def_id(module)); + } +} + +fn check_mod_impl_wf<'tcx>(tcx: TyCtxt<'_, 'tcx, 'tcx>, module_def_id: DefId) { + tcx.hir().visit_item_likes_in_module( + module_def_id, + &mut ImplWfCheck { tcx } + ); +} + +pub fn provide(providers: &mut Providers<'_>) { + *providers = Providers { + check_mod_impl_wf, + ..*providers + }; } struct ImplWfCheck<'a, 'tcx: 'a> { @@ -72,7 +79,7 @@ struct ImplWfCheck<'a, 'tcx: 'a> { impl<'a, 'tcx> ItemLikeVisitor<'tcx> for ImplWfCheck<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx hir::Item) { if let hir::ItemKind::Impl(.., ref impl_item_refs) = item.node { - let impl_def_id = self.tcx.hir().local_def_id(item.id); + let impl_def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); enforce_impl_params_are_constrained(self.tcx, impl_def_id, impl_item_refs); @@ -96,12 +103,12 @@ fn enforce_impl_params_are_constrained<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let impl_trait_ref = tcx.impl_trait_ref(impl_def_id); let mut input_parameters = ctp::parameters_for_impl(impl_self_ty, impl_trait_ref); - ctp::identify_constrained_type_params( + ctp::identify_constrained_generic_params( tcx, &impl_predicates, impl_trait_ref, &mut input_parameters); // Disallow unconstrained lifetimes, but only if they appear in assoc types. let lifetimes_in_associated_types: FxHashSet<_> = impl_item_refs.iter() - .map(|item_ref| tcx.hir().local_def_id(item_ref.id.node_id)) + .map(|item_ref| tcx.hir().local_def_id_from_hir_id(item_ref.id.hir_id)) .filter(|&def_id| { let item = tcx.associated_item(def_id); item.kind == ty::AssociatedKind::Type && item.defaultness.has_value() @@ -113,7 +120,7 @@ fn enforce_impl_params_are_constrained<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, for param in &impl_generics.params { match param.kind { // Disallow ANY unconstrained type parameters. - ty::GenericParamDefKind::Type {..} => { + ty::GenericParamDefKind::Type { .. } => { let param_ty = ty::ParamTy::for_def(param); if !input_parameters.contains(&ctp::Parameter::from(param_ty)) { report_unused_parameter(tcx, @@ -132,6 +139,15 @@ fn enforce_impl_params_are_constrained<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ¶m.name.to_string()); } } + ty::GenericParamDefKind::Const => { + let param_ct = ty::ParamConst::for_def(param); + if !input_parameters.contains(&ctp::Parameter::from(param_ct)) { + report_unused_parameter(tcx, + tcx.def_span(param.def_id), + "const", + ¶m_ct.to_string()); + } + } } } @@ -155,7 +171,7 @@ fn enforce_impl_params_are_constrained<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, // used elsewhere are not projected back out. } -fn report_unused_parameter(tcx: TyCtxt, +fn report_unused_parameter(tcx: TyCtxt<'_, '_, '_>, span: Span, kind: &str, name: &str) diff --git a/src/librustc_typeck/lib.rs b/src/librustc_typeck/lib.rs index 8d6fb8b7f3948..710c84a6bc980 100644 --- a/src/librustc_typeck/lib.rs +++ b/src/librustc_typeck/lib.rs @@ -1,16 +1,6 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - /*! -# typeck.rs +# typeck The type checker is responsible for: @@ -65,9 +55,7 @@ This API is completely unstable and subject to change. */ -#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", - html_favicon_url = "https://doc.rust-lang.org/favicon.ico", - html_root_url = "https://doc.rust-lang.org/nightly/")] +#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")] #![allow(non_camel_case_types)] @@ -76,26 +64,20 @@ This API is completely unstable and subject to change. #![feature(crate_visibility_modifier)] #![feature(exhaustive_patterns)] #![feature(nll)] -#![feature(quote)] -#![feature(refcell_replace_swap)] #![feature(rustc_diagnostic_macros)] #![feature(slice_patterns)] -#![feature(slice_sort_by_cached_key)] #![feature(never_type)] #![recursion_limit="256"] +#![deny(rust_2018_idioms)] +#![cfg_attr(not(stage0), deny(internal))] +#![allow(explicit_outlives_requirements)] + #[macro_use] extern crate log; #[macro_use] extern crate syntax; -extern crate syntax_pos; -extern crate arena; #[macro_use] extern crate rustc; -extern crate rustc_platform_intrinsics as intrinsics; -extern crate rustc_data_structures; -extern crate rustc_errors as errors; -extern crate rustc_target; -extern crate smallvec; // N.B., this module needs to be declared first so diagnostics are // registered before they are used. @@ -106,46 +88,64 @@ mod check; mod check_unused; mod coherence; mod collect; -mod constrained_type_params; +mod constrained_generic_params; mod structured_errors; mod impl_wf_check; mod namespace; mod outlives; mod variance; -use hir::Node; use rustc_target::spec::abi::Abi; -use rustc::hir; +use rustc::hir::{self, Node}; +use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::infer::InferOk; use rustc::lint; use rustc::middle; use rustc::session; +use rustc::util::common::ErrorReported; +use rustc::session::config::{EntryFnType, nightly_options}; use rustc::traits::{ObligationCause, ObligationCauseCode, TraitEngine, TraitEngineExt}; -use rustc::ty::subst::Substs; +use rustc::ty::subst::SubstsRef; use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::query::Providers; use rustc::util; use rustc::util::profiling::ProfileCategory; -use session::{CompileIncomplete, config}; use syntax_pos::Span; -use syntax::ast; use util::common::time; use std::iter; +pub use collect::checked_type_of; + pub struct TypeAndSubsts<'tcx> { - substs: &'tcx Substs<'tcx>, + substs: SubstsRef<'tcx>, ty: Ty<'tcx>, } -fn require_c_abi_if_variadic(tcx: TyCtxt, - decl: &hir::FnDecl, - abi: Abi, - span: Span) { - if decl.variadic && !(abi == Abi::C || abi == Abi::Cdecl) { +fn check_type_alias_enum_variants_enabled<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, + span: Span) { + if !tcx.features().type_alias_enum_variants { + let mut err = tcx.sess.struct_span_err( + span, + "enum variants on type aliases are experimental" + ); + if nightly_options::is_nightly_build() { + help!(&mut err, + "add `#![feature(type_alias_enum_variants)]` to the \ + crate attributes to enable"); + } + err.emit(); + } +} + +fn require_c_abi_if_c_variadic(tcx: TyCtxt<'_, '_, '_>, + decl: &hir::FnDecl, + abi: Abi, + span: Span) { + if decl.c_variadic && !(abi == Abi::C || abi == Abi::Cdecl) { let mut err = struct_span_err!(tcx.sess, span, E0045, - "variadic function must have C or cdecl calling convention"); - err.span_label(span, "variadics require C or cdecl calling convention").emit(); + "C-variadic function must have C or cdecl calling convention"); + err.span_label(span, "C-variadics require C or cdecl calling convention").emit(); } } @@ -177,14 +177,13 @@ fn require_same_types<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, }) } -fn check_main_fn_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - main_id: ast::NodeId, - main_span: Span) { - let main_def_id = tcx.hir().local_def_id(main_id); +fn check_main_fn_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, main_def_id: DefId) { + let main_id = tcx.hir().as_local_hir_id(main_def_id).unwrap(); + let main_span = tcx.def_span(main_def_id); let main_t = tcx.type_of(main_def_id); match main_t.sty { ty::FnDef(..) => { - if let Some(Node::Item(it)) = tcx.hir().find(main_id) { + if let Some(Node::Item(it)) = tcx.hir().find_by_hir_id(main_id) { if let hir::ItemKind::Fn(.., ref generics, _) = it.node { let mut error = false; if !generics.params.is_empty() { @@ -243,14 +242,13 @@ fn check_main_fn_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -fn check_start_fn_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - start_id: ast::NodeId, - start_span: Span) { - let start_def_id = tcx.hir().local_def_id(start_id); +fn check_start_fn_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, start_def_id: DefId) { + let start_id = tcx.hir().as_local_hir_id(start_def_id).unwrap(); + let start_span = tcx.def_span(start_def_id); let start_t = tcx.type_of(start_def_id); match start_t.sty { ty::FnDef(..) => { - if let Some(Node::Item(it)) = tcx.hir().find(start_id) { + if let Some(Node::Item(it)) = tcx.hir().find_by_hir_id(start_id) { if let hir::ItemKind::Fn(.., ref generics, _) = it.node { let mut error = false; if !generics.params.is_empty() { @@ -302,39 +300,43 @@ fn check_start_fn_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } fn check_for_entry_fn<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - if let Some((id, sp, entry_type)) = *tcx.sess.entry_fn.borrow() { - match entry_type { - config::EntryFnType::Main => check_main_fn_ty(tcx, id, sp), - config::EntryFnType::Start => check_start_fn_ty(tcx, id, sp), - } + match tcx.entry_fn(LOCAL_CRATE) { + Some((def_id, EntryFnType::Main)) => check_main_fn_ty(tcx, def_id), + Some((def_id, EntryFnType::Start)) => check_start_fn_ty(tcx, def_id), + _ => {} } } -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { collect::provide(providers); coherence::provide(providers); check::provide(providers); variance::provide(providers); outlives::provide(providers); + impl_wf_check::provide(providers); } pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) - -> Result<(), CompileIncomplete> + -> Result<(), ErrorReported> { - tcx.sess.profiler(|p| p.start_activity(ProfileCategory::TypeChecking)); + tcx.sess.profiler(|p| p.start_activity(ProfileCategory::TypeChecking, "type-check crate")); // this ensures that later parts of type checking can assume that items // have valid types and not error tcx.sess.track_errors(|| { - time(tcx.sess, "type collecting", || - collect::collect_item_types(tcx)); - + time(tcx.sess, "type collecting", || { + for &module in tcx.hir().krate().modules.keys() { + tcx.ensure().collect_mod_item_types(tcx.hir().local_def_id(module)); + } + }); })?; - tcx.sess.track_errors(|| { - time(tcx.sess, "outlives testing", || - outlives::test::test_inferred_outlives(tcx)); - })?; + if tcx.features().rustc_attrs { + tcx.sess.track_errors(|| { + time(tcx.sess, "outlives testing", || + outlives::test::test_inferred_outlives(tcx)); + })?; + } tcx.sess.track_errors(|| { time(tcx.sess, "impl wf inference", || @@ -346,33 +348,45 @@ pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) coherence::check_coherence(tcx)); })?; - tcx.sess.track_errors(|| { - time(tcx.sess, "variance testing", || - variance::test::test_variance(tcx)); - })?; + if tcx.features().rustc_attrs { + tcx.sess.track_errors(|| { + time(tcx.sess, "variance testing", || + variance::test::test_variance(tcx)); + })?; + } time(tcx.sess, "wf checking", || check::check_wf_new(tcx))?; - time(tcx.sess, "item-types checking", || check::check_item_types(tcx))?; + time(tcx.sess, "item-types checking", || { + tcx.sess.track_errors(|| { + for &module in tcx.hir().krate().modules.keys() { + tcx.ensure().check_mod_item_types(tcx.hir().local_def_id(module)); + } + }) + })?; - time(tcx.sess, "item-bodies checking", || check::check_item_bodies(tcx))?; + time(tcx.sess, "item-bodies checking", || tcx.typeck_item_bodies(LOCAL_CRATE)); check_unused::check_crate(tcx); check_for_entry_fn(tcx); - tcx.sess.profiler(|p| p.end_activity(ProfileCategory::TypeChecking)); + tcx.sess.profiler(|p| p.end_activity(ProfileCategory::TypeChecking, "type-check crate")); - tcx.sess.compile_status() + if tcx.sess.err_count() == 0 { + Ok(()) + } else { + Err(ErrorReported) + } } -/// A quasi-deprecated helper used in rustdoc and save-analysis to get +/// A quasi-deprecated helper used in rustdoc and clippy to get /// the type from a HIR node. pub fn hir_ty_to_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, hir_ty: &hir::Ty) -> Ty<'tcx> { // In case there are any projections etc, find the "environment" // def-id that will be used to determine the traits/predicates in // scope. This is derived from the enclosing item-like thing. - let env_node_id = tcx.hir().get_parent(hir_ty.id); - let env_def_id = tcx.hir().local_def_id(env_node_id); + let env_node_id = tcx.hir().get_parent_item(hir_ty.hir_id); + let env_def_id = tcx.hir().local_def_id_from_hir_id(env_node_id); let item_cx = self::collect::ItemCtxt::new(tcx, env_def_id); astconv::AstConv::ast_ty_to_ty(&item_cx, hir_ty) @@ -383,8 +397,8 @@ pub fn hir_trait_to_predicates<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, hir_trait: // In case there are any projections etc, find the "environment" // def-id that will be used to determine the traits/predicates in // scope. This is derived from the enclosing item-like thing. - let env_node_id = tcx.hir().get_parent(hir_trait.ref_id); - let env_def_id = tcx.hir().local_def_id(env_node_id); + let env_hir_id = tcx.hir().get_parent_item(hir_trait.hir_ref_id); + let env_def_id = tcx.hir().local_def_id_from_hir_id(env_hir_id); let item_cx = self::collect::ItemCtxt::new(tcx, env_def_id); let mut projections = Vec::new(); let (principal, _) = astconv::AstConv::instantiate_poly_trait_ref_inner( diff --git a/src/librustc_typeck/namespace.rs b/src/librustc_typeck/namespace.rs index 690bf1c550c0e..e8f6272810a37 100644 --- a/src/librustc_typeck/namespace.rs +++ b/src/librustc_typeck/namespace.rs @@ -1,13 +1,3 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir; use rustc::ty; diff --git a/src/librustc_typeck/outlives/explicit.rs b/src/librustc_typeck/outlives/explicit.rs index 9b374cf932fa9..574086f780a9d 100644 --- a/src/librustc_typeck/outlives/explicit.rs +++ b/src/librustc_typeck/outlives/explicit.rs @@ -1,16 +1,6 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir::def_id::DefId; use rustc::ty::{self, OutlivesPredicate, TyCtxt}; -use util::nodemap::FxHashMap; +use crate::util::nodemap::FxHashMap; use super::utils::*; diff --git a/src/librustc_typeck/outlives/implicit_infer.rs b/src/librustc_typeck/outlives/implicit_infer.rs index 315e5feea3fcf..b560f3b497920 100644 --- a/src/librustc_typeck/outlives/implicit_infer.rs +++ b/src/librustc_typeck/outlives/implicit_infer.rs @@ -1,15 +1,4 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use rustc::hir; -use hir::Node; +use rustc::hir::{self, Node}; use rustc::hir::def_id::DefId; use rustc::hir::itemlikevisit::ItemLikeVisitor; use rustc::ty::subst::{Kind, Subst, UnpackedKind}; @@ -22,7 +11,7 @@ use super::utils::*; /// Infer predicates for the items in the crate. /// -/// global_inferred_outlives: this is initially the empty map that +/// `global_inferred_outlives`: this is initially the empty map that /// was generated by walking the items in the crate. This will /// now be filled with inferred predicates. pub fn infer_predicates<'tcx>( @@ -63,16 +52,16 @@ pub struct InferVisitor<'cx, 'tcx: 'cx> { impl<'cx, 'tcx> ItemLikeVisitor<'tcx> for InferVisitor<'cx, 'tcx> { fn visit_item(&mut self, item: &hir::Item) { - let item_did = self.tcx.hir().local_def_id(item.id); + let item_did = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); debug!("InferVisitor::visit_item(item={:?})", item_did); - let node_id = self + let hir_id = self .tcx .hir() - .as_local_node_id(item_did) + .as_local_hir_id(item_did) .expect("expected local def-id"); - let item = match self.tcx.hir().get(node_id) { + let item = match self.tcx.hir().get_by_hir_id(hir_id) { Node::Item(item) => item, _ => bug!(), }; @@ -204,27 +193,28 @@ fn insert_required_predicates_to_be_wf<'tcx>( debug!("Dynamic"); debug!("field_ty = {}", &field_ty); debug!("ty in field = {}", &ty); - let ex_trait_ref = obj.principal(); - // Here, we are passing the type `usize` as a - // placeholder value with the function - // `with_self_ty`, since there is no concrete type - // `Self` for a `dyn Trait` at this - // stage. Therefore when checking explicit - // predicates in `check_explicit_predicates` we - // need to ignore checking the explicit_map for - // Self type. - let substs = ex_trait_ref - .with_self_ty(tcx, tcx.types.usize) - .skip_binder() - .substs; - check_explicit_predicates( - tcx, - &ex_trait_ref.skip_binder().def_id, - substs, - required_predicates, - explicit_map, - IgnoreSelfTy(true), - ); + if let Some(ex_trait_ref) = obj.principal() { + // Here, we are passing the type `usize` as a + // placeholder value with the function + // `with_self_ty`, since there is no concrete type + // `Self` for a `dyn Trait` at this + // stage. Therefore when checking explicit + // predicates in `check_explicit_predicates` we + // need to ignore checking the explicit_map for + // Self type. + let substs = ex_trait_ref + .with_self_ty(tcx, tcx.types.usize) + .skip_binder() + .substs; + check_explicit_predicates( + tcx, + &ex_trait_ref.skip_binder().def_id, + substs, + required_predicates, + explicit_map, + IgnoreSelfTy(true), + ); + } } ty::Projection(obj) => { diff --git a/src/librustc_typeck/outlives/mod.rs b/src/librustc_typeck/outlives/mod.rs index 5796df2744b2b..913990ee87897 100644 --- a/src/librustc_typeck/outlives/mod.rs +++ b/src/librustc_typeck/outlives/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use hir::Node; use rustc::hir; use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; @@ -22,7 +12,7 @@ mod implicit_infer; pub mod test; mod utils; -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { *providers = Providers { inferred_outlives_of, inferred_outlives_crate, @@ -36,10 +26,10 @@ fn inferred_outlives_of<'a, 'tcx>( ) -> Lrc>> { let id = tcx .hir() - .as_local_node_id(item_def_id) + .as_local_hir_id(item_def_id) .expect("expected local def-id"); - match tcx.hir().get(id) { + match tcx.hir().get_by_hir_id(id) { Node::Item(item) => match item.node { hir::ItemKind::Struct(..) | hir::ItemKind::Enum(..) | hir::ItemKind::Union(..) => { let crate_map = tcx.inferred_outlives_crate(LOCAL_CRATE); @@ -108,14 +98,22 @@ fn inferred_outlives_crate<'tcx>( .map(|(&def_id, set)| { let vec: Vec> = set .iter() - .map( + .filter_map( |ty::OutlivesPredicate(kind1, region2)| match kind1.unpack() { - UnpackedKind::Type(ty1) => ty::Predicate::TypeOutlives(ty::Binder::bind( - ty::OutlivesPredicate(ty1, region2), - )), - UnpackedKind::Lifetime(region1) => ty::Predicate::RegionOutlives( - ty::Binder::bind(ty::OutlivesPredicate(region1, region2)), - ), + UnpackedKind::Type(ty1) => { + Some(ty::Predicate::TypeOutlives(ty::Binder::bind( + ty::OutlivesPredicate(ty1, region2) + ))) + } + UnpackedKind::Lifetime(region1) => { + Some(ty::Predicate::RegionOutlives( + ty::Binder::bind(ty::OutlivesPredicate(region1, region2)) + )) + } + UnpackedKind::Const(_) => { + // Generic consts don't impose any constraints. + None + } }, ).collect(); (def_id, Lrc::new(vec)) diff --git a/src/librustc_typeck/outlives/test.rs b/src/librustc_typeck/outlives/test.rs index d855675d39019..e10c836120718 100644 --- a/src/librustc_typeck/outlives/test.rs +++ b/src/librustc_typeck/outlives/test.rs @@ -1,13 +1,3 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir; use rustc::hir::itemlikevisit::ItemLikeVisitor; use rustc::ty::TyCtxt; @@ -24,7 +14,7 @@ struct OutlivesTest<'a, 'tcx: 'a> { impl<'a, 'tcx> ItemLikeVisitor<'tcx> for OutlivesTest<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx hir::Item) { - let item_def_id = self.tcx.hir().local_def_id(item.id); + let item_def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); // For unit testing: check for a special "rustc_outlives" // attribute and report an error with various results if found. diff --git a/src/librustc_typeck/outlives/utils.rs b/src/librustc_typeck/outlives/utils.rs index 6ed59837eb49a..ee552ca9cbb25 100644 --- a/src/librustc_typeck/outlives/utils.rs +++ b/src/librustc_typeck/outlives/utils.rs @@ -1,13 +1,3 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::ty::outlives::Component; use rustc::ty::subst::{Kind, UnpackedKind}; use rustc::ty::{self, Region, RegionKind, Ty, TyCtxt}; @@ -128,6 +118,10 @@ pub fn insert_outlives_predicate<'tcx>( } required_predicates.insert(ty::OutlivesPredicate(kind, outlived_region)); } + + UnpackedKind::Const(_) => { + // Generic consts don't impose any constraints. + } } } diff --git a/src/librustc_typeck/structured_errors.rs b/src/librustc_typeck/structured_errors.rs index 12863cc66a053..3e3eab8cf4cfb 100644 --- a/src/librustc_typeck/structured_errors.rs +++ b/src/librustc_typeck/structured_errors.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::session::Session; use syntax_pos::Span; use errors::{Applicability, DiagnosticId, DiagnosticBuilder}; @@ -73,7 +63,7 @@ impl<'tcx> StructuredDiagnostic<'tcx> for VariadicError<'tcx> { ) }; if let Ok(snippet) = self.sess.source_map().span_to_snippet(self.span) { - err.span_suggestion_with_applicability( + err.span_suggestion( self.span, &format!("cast the value to `{}`", self.cast_ty), format!("{} as {}", snippet, self.cast_ty), @@ -147,7 +137,7 @@ To fix this error, don't try to cast directly between thin and fat pointers. For more information about casts, take a look at The Book: -https://doc.rust-lang.org/book/first-edition/casting-between-types.html"); +https://doc.rust-lang.org/reference/expressions/operator-expr.html#type-cast-expressions"); err } } diff --git a/src/librustc_typeck/variance/constraints.rs b/src/librustc_typeck/variance/constraints.rs index 6ea919469e04b..f2406bd8540b5 100644 --- a/src/librustc_typeck/variance/constraints.rs +++ b/src/librustc_typeck/variance/constraints.rs @@ -1,22 +1,12 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Constraint construction and representation //! //! The second pass over the AST determines the set of constraints. //! We walk the set of items and, for each member, generate new constraints. use hir::def_id::DefId; -use rustc::ty::subst::{Substs, UnpackedKind}; +use rustc::mir::interpret::ConstValue; +use rustc::ty::subst::{SubstsRef, UnpackedKind}; use rustc::ty::{self, Ty, TyCtxt}; -use syntax::ast; use rustc::hir; use rustc::hir::itemlikevisit::ItemLikeVisitor; @@ -51,7 +41,7 @@ pub struct Constraint<'a> { /// } /// /// then while we are visiting `Bar`, the `CurrentItem` would have -/// the def-id and the start of `Foo`'s inferreds. +/// the `DefId` and the start of `Foo`'s inferreds. pub struct CurrentItem { inferred_start: InferredIndex, } @@ -82,31 +72,31 @@ impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for ConstraintContext<'a, 'tcx> { match item.node { hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => { - self.visit_node_helper(item.id); + self.visit_node_helper(item.hir_id); if let hir::VariantData::Tuple(..) = *struct_def { - self.visit_node_helper(struct_def.id()); + self.visit_node_helper(struct_def.ctor_hir_id().unwrap()); } } hir::ItemKind::Enum(ref enum_def, _) => { - self.visit_node_helper(item.id); + self.visit_node_helper(item.hir_id); for variant in &enum_def.variants { if let hir::VariantData::Tuple(..) = variant.node.data { - self.visit_node_helper(variant.node.data.id()); + self.visit_node_helper(variant.node.data.ctor_hir_id().unwrap()); } } } hir::ItemKind::Fn(..) => { - self.visit_node_helper(item.id); + self.visit_node_helper(item.hir_id); } hir::ItemKind::ForeignMod(ref foreign_mod) => { for foreign_item in &foreign_mod.items { if let hir::ForeignItemKind::Fn(..) = foreign_item.node { - self.visit_node_helper(foreign_item.id); + self.visit_node_helper(foreign_item.hir_id); } } } @@ -117,21 +107,21 @@ impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for ConstraintContext<'a, 'tcx> { fn visit_trait_item(&mut self, trait_item: &hir::TraitItem) { if let hir::TraitItemKind::Method(..) = trait_item.node { - self.visit_node_helper(trait_item.id); + self.visit_node_helper(trait_item.hir_id); } } fn visit_impl_item(&mut self, impl_item: &hir::ImplItem) { if let hir::ImplItemKind::Method(..) = impl_item.node { - self.visit_node_helper(impl_item.id); + self.visit_node_helper(impl_item.hir_id); } } } impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { - fn visit_node_helper(&mut self, id: ast::NodeId) { + fn visit_node_helper(&mut self, id: hir::HirId) { let tcx = self.terms_cx.tcx; - let def_id = tcx.hir().local_def_id(id); + let def_id = tcx.hir().local_def_id_from_hir_id(id); self.build_constraints_for_item(def_id); } @@ -141,14 +131,14 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { fn build_constraints_for_item(&mut self, def_id: DefId) { let tcx = self.tcx(); - debug!("build_constraints_for_item({})", tcx.item_path_str(def_id)); + debug!("build_constraints_for_item({})", tcx.def_path_str(def_id)); // Skip items with no generics - there's nothing to infer in them. if tcx.generics_of(def_id).count() == 0 { return; } - let id = tcx.hir().as_local_node_id(def_id).unwrap(); + let id = tcx.hir().as_local_hir_id(def_id).unwrap(); let inferred_start = self.terms_cx.inferred_starts[&id]; let current_item = &CurrentItem { inferred_start }; match tcx.type_of(def_id).sty { @@ -232,7 +222,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { fn add_constraints_from_invariant_substs(&mut self, current: &CurrentItem, - substs: &Substs<'tcx>, + substs: SubstsRef<'tcx>, variance: VarianceTermPtr<'a>) { debug!("add_constraints_from_invariant_substs: substs={:?} variance={:?}", substs, @@ -240,12 +230,19 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { // Trait are always invariant so we can take advantage of that. let variance_i = self.invariant(variance); - for ty in substs.types() { - self.add_constraints_from_ty(current, ty, variance_i); - } - for region in substs.regions() { - self.add_constraints_from_region(current, region, variance_i); + for k in substs { + match k.unpack() { + UnpackedKind::Lifetime(lt) => { + self.add_constraints_from_region(current, lt, variance_i) + } + UnpackedKind::Type(ty) => { + self.add_constraints_from_ty(current, ty, variance_i) + } + UnpackedKind::Const(ct) => { + self.add_constraints_from_const(current, ct, variance_i) + } + } } } @@ -278,7 +275,11 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { self.add_constraints_from_mt(current, &ty::TypeAndMut { ty, mutbl }, variance); } - ty::Array(typ, _) | + ty::Array(typ, len) => { + self.add_constraints_from_ty(current, typ, variance); + self.add_constraints_from_const(current, len, variance); + } + ty::Slice(typ) => { self.add_constraints_from_ty(current, typ, variance); } @@ -311,11 +312,12 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { let contra = self.contravariant(variance); self.add_constraints_from_region(current, r, contra); - let poly_trait_ref = data - .principal() - .with_self_ty(self.tcx(), self.tcx().types.err); - self.add_constraints_from_trait_ref( - current, *poly_trait_ref.skip_binder(), variance); + if let Some(poly_trait_ref) = data.principal() { + let poly_trait_ref = + poly_trait_ref.with_self_ty(self.tcx(), self.tcx().types.err); + self.add_constraints_from_trait_ref( + current, *poly_trait_ref.skip_binder(), variance); + } for projection in data.projection_bounds() { self.add_constraints_from_ty( @@ -353,7 +355,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { fn add_constraints_from_substs(&mut self, current: &CurrentItem, def_id: DefId, - substs: &Substs<'tcx>, + substs: SubstsRef<'tcx>, variance: VarianceTermPtr<'a>) { debug!("add_constraints_from_substs(def_id={:?}, substs={:?}, variance={:?})", def_id, @@ -365,7 +367,7 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { return; } - let (local, remote) = if let Some(id) = self.tcx().hir().as_local_node_id(def_id) { + let (local, remote) = if let Some(id) = self.tcx().hir().as_local_hir_id(def_id) { (Some(self.terms_cx.inferred_starts[&id]), None) } else { (None, Some(self.tcx().variances_of(def_id))) @@ -393,6 +395,9 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { UnpackedKind::Type(ty) => { self.add_constraints_from_ty(current, ty, variance_i) } + UnpackedKind::Const(ct) => { + self.add_constraints_from_const(current, ct, variance_i) + } } } } @@ -444,6 +449,24 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { } } + fn add_constraints_from_const( + &mut self, + current: &CurrentItem, + ct: &ty::Const<'tcx>, + variance: VarianceTermPtr<'a> + ) { + debug!( + "add_constraints_from_const(ct={:?}, variance={:?})", + ct, + variance + ); + + self.add_constraints_from_ty(current, ct.ty, variance); + if let ConstValue::Param(ref data) = ct.val { + self.add_constraint(current, data.index, variance); + } + } + /// Adds constraints appropriate for a mutability-type pair /// appearing in a context with ambient variance `variance` fn add_constraints_from_mt(&mut self, diff --git a/src/librustc_typeck/variance/mod.rs b/src/librustc_typeck/variance/mod.rs index 4039281ffb780..9b9a6bace96b1 100644 --- a/src/librustc_typeck/variance/mod.rs +++ b/src/librustc_typeck/variance/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Module for inferring the variance of type and lifetime parameters. See the [rustc guide] //! chapter for more info. //! @@ -37,7 +27,7 @@ pub mod test; /// Code for transforming variances. mod xform; -pub fn provide(providers: &mut Providers) { +pub fn provide(providers: &mut Providers<'_>) { *providers = Providers { variances_of, crate_variances, @@ -56,12 +46,12 @@ fn crate_variances<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum) fn variances_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item_def_id: DefId) -> Lrc> { - let id = tcx.hir().as_local_node_id(item_def_id).expect("expected local def-id"); + let id = tcx.hir().as_local_hir_id(item_def_id).expect("expected local def-id"); let unsupported = || { // Variance not relevant. - span_bug!(tcx.hir().span(id), "asked to compute variance for wrong kind of item") + span_bug!(tcx.hir().span_by_hir_id(id), "asked to compute variance for wrong kind of item") }; - match tcx.hir().get(id) { + match tcx.hir().get_by_hir_id(id) { Node::Item(item) => match item.node { hir::ItemKind::Enum(..) | hir::ItemKind::Struct(..) | @@ -89,7 +79,7 @@ fn variances_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item_def_id: DefId) _ => unsupported() }, - Node::Variant(_) | Node::StructCtor(_) => {} + Node::Variant(_) | Node::Ctor(..) => {} _ => unsupported() } @@ -101,4 +91,3 @@ fn variances_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, item_def_id: DefId) .unwrap_or(&crate_map.empty_variance) .clone() } - diff --git a/src/librustc_typeck/variance/solve.rs b/src/librustc_typeck/variance/solve.rs index 365c65bc04888..cec33ba87dea4 100644 --- a/src/librustc_typeck/variance/solve.rs +++ b/src/librustc_typeck/variance/solve.rs @@ -1,13 +1,3 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Constraint solving //! //! The final phase iterates over the constraints, refining the variance @@ -33,7 +23,7 @@ struct SolveContext<'a, 'tcx: 'a> { solutions: Vec, } -pub fn solve_constraints(constraints_cx: ConstraintContext) -> ty::CrateVariancesMap { +pub fn solve_constraints(constraints_cx: ConstraintContext<'_, '_>) -> ty::CrateVariancesMap { let ConstraintContext { terms_cx, constraints, .. } = constraints_cx; let mut solutions = vec![ty::Bivariant; terms_cx.inferred_terms.len()]; @@ -93,7 +83,7 @@ impl<'a, 'tcx> SolveContext<'a, 'tcx> { let solutions = &self.solutions; self.terms_cx.inferred_starts.iter().map(|(&id, &InferredIndex(start))| { - let def_id = tcx.hir().local_def_id(id); + let def_id = tcx.hir().local_def_id_from_hir_id(id); let generics = tcx.generics_of(def_id); let mut variances = solutions[start..start+generics.count()].to_vec(); diff --git a/src/librustc_typeck/variance/terms.rs b/src/librustc_typeck/variance/terms.rs index 75ff5bb0c5405..ac686e40076eb 100644 --- a/src/librustc_typeck/variance/terms.rs +++ b/src/librustc_typeck/variance/terms.rs @@ -1,13 +1,3 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - // Representing terms // // Terms are structured as a straightforward tree. Rather than rely on @@ -22,10 +12,9 @@ use arena::TypedArena; use rustc::ty::{self, TyCtxt}; use std::fmt; -use syntax::ast; use rustc::hir; use rustc::hir::itemlikevisit::ItemLikeVisitor; -use util::nodemap::NodeMap; +use crate::util::nodemap::HirIdMap; use self::VarianceTerm::*; @@ -42,7 +31,7 @@ pub enum VarianceTerm<'a> { } impl<'a> fmt::Debug for VarianceTerm<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { ConstantTerm(c1) => write!(f, "{:?}", c1), TransformTerm(v1, v2) => write!(f, "({:?} \u{00D7} {:?})", v1, v2), @@ -65,11 +54,11 @@ pub struct TermsContext<'a, 'tcx: 'a> { // For marker types, UnsafeCell, and other lang items where // variance is hardcoded, records the item-id and the hardcoded // variance. - pub lang_items: Vec<(ast::NodeId, Vec)>, + pub lang_items: Vec<(hir::HirId, Vec)>, // Maps from the node id of an item to the first inferred index // used for its type & region parameters. - pub inferred_starts: NodeMap, + pub inferred_starts: HirIdMap, // Maps from an InferredIndex to the term for that variable. pub inferred_terms: Vec>, @@ -96,7 +85,7 @@ pub fn determine_parameters_to_be_inferred<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx> terms_cx } -fn lang_items(tcx: TyCtxt) -> Vec<(ast::NodeId, Vec)> { +fn lang_items(tcx: TyCtxt<'_, '_, '_>) -> Vec<(hir::HirId, Vec)> { let lang_items = tcx.lang_items(); let all = vec![ (lang_items.phantom_data(), vec![ty::Covariant]), @@ -106,14 +95,14 @@ fn lang_items(tcx: TyCtxt) -> Vec<(ast::NodeId, Vec)> { all.into_iter() // iterating over (Option, Variance) .filter(|&(ref d,_)| d.is_some()) .map(|(d, v)| (d.unwrap(), v)) // (DefId, Variance) - .filter_map(|(d, v)| tcx.hir().as_local_node_id(d).map(|n| (n, v))) // (NodeId, Variance) + .filter_map(|(d, v)| tcx.hir().as_local_hir_id(d).map(|n| (n, v))) // (HirId, Variance) .collect() } impl<'a, 'tcx> TermsContext<'a, 'tcx> { - fn add_inferreds_for_item(&mut self, id: ast::NodeId) { + fn add_inferreds_for_item(&mut self, id: hir::HirId) { let tcx = self.tcx; - let def_id = tcx.hir().local_def_id(id); + let def_id = tcx.hir().local_def_id_from_hir_id(id); let count = tcx.generics_of(def_id).count(); if count == 0 { @@ -139,36 +128,36 @@ impl<'a, 'tcx> TermsContext<'a, 'tcx> { impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for TermsContext<'a, 'tcx> { fn visit_item(&mut self, item: &hir::Item) { debug!("add_inferreds for item {}", - self.tcx.hir().node_to_string(item.id)); + self.tcx.hir().hir_to_string(item.hir_id)); match item.node { hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => { - self.add_inferreds_for_item(item.id); + self.add_inferreds_for_item(item.hir_id); if let hir::VariantData::Tuple(..) = *struct_def { - self.add_inferreds_for_item(struct_def.id()); + self.add_inferreds_for_item(struct_def.ctor_hir_id().unwrap()); } } hir::ItemKind::Enum(ref enum_def, _) => { - self.add_inferreds_for_item(item.id); + self.add_inferreds_for_item(item.hir_id); for variant in &enum_def.variants { if let hir::VariantData::Tuple(..) = variant.node.data { - self.add_inferreds_for_item(variant.node.data.id()); + self.add_inferreds_for_item(variant.node.data.ctor_hir_id().unwrap()); } } } hir::ItemKind::Fn(..) => { - self.add_inferreds_for_item(item.id); + self.add_inferreds_for_item(item.hir_id); } hir::ItemKind::ForeignMod(ref foreign_mod) => { for foreign_item in &foreign_mod.items { if let hir::ForeignItemKind::Fn(..) = foreign_item.node { - self.add_inferreds_for_item(foreign_item.id); + self.add_inferreds_for_item(foreign_item.hir_id); } } } @@ -179,13 +168,13 @@ impl<'a, 'tcx, 'v> ItemLikeVisitor<'v> for TermsContext<'a, 'tcx> { fn visit_trait_item(&mut self, trait_item: &hir::TraitItem) { if let hir::TraitItemKind::Method(..) = trait_item.node { - self.add_inferreds_for_item(trait_item.id); + self.add_inferreds_for_item(trait_item.hir_id); } } fn visit_impl_item(&mut self, impl_item: &hir::ImplItem) { if let hir::ImplItemKind::Method(..) = impl_item.node { - self.add_inferreds_for_item(impl_item.id); + self.add_inferreds_for_item(impl_item.hir_id); } } } diff --git a/src/librustc_typeck/variance/test.rs b/src/librustc_typeck/variance/test.rs index 7ae90e953e474..d04b1b276a2cc 100644 --- a/src/librustc_typeck/variance/test.rs +++ b/src/librustc_typeck/variance/test.rs @@ -1,13 +1,3 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir; use rustc::hir::itemlikevisit::ItemLikeVisitor; use rustc::ty::TyCtxt; @@ -22,7 +12,7 @@ struct VarianceTest<'a, 'tcx: 'a> { impl<'a, 'tcx> ItemLikeVisitor<'tcx> for VarianceTest<'a, 'tcx> { fn visit_item(&mut self, item: &'tcx hir::Item) { - let item_def_id = self.tcx.hir().local_def_id(item.id); + let item_def_id = self.tcx.hir().local_def_id_from_hir_id(item.hir_id); // For unit testing: check for a special "rustc_variance" // attribute and report an error with various results if found. diff --git a/src/librustc_typeck/variance/xform.rs b/src/librustc_typeck/variance/xform.rs index 7106ca4d420a8..969463d8e7cbc 100644 --- a/src/librustc_typeck/variance/xform.rs +++ b/src/librustc_typeck/variance/xform.rs @@ -1,13 +1,3 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::ty; pub fn glb(v1: ty::Variance, v2: ty::Variance) -> ty::Variance { diff --git a/src/librustdoc/Cargo.toml b/src/librustdoc/Cargo.toml index 8bac007b748ac..4d2c03a4f2fdb 100644 --- a/src/librustdoc/Cargo.toml +++ b/src/librustdoc/Cargo.toml @@ -2,6 +2,7 @@ authors = ["The Rust Project Developers"] name = "rustdoc" version = "0.0.0" +edition = "2018" [lib] name = "rustdoc" @@ -9,6 +10,6 @@ path = "lib.rs" [dependencies] pulldown-cmark = { version = "0.1.2", default-features = false } -minifier = "0.0.20" +minifier = "0.0.29" tempfile = "3" -parking_lot = "0.6.4" +parking_lot = "0.7" diff --git a/src/librustdoc/clean/auto_trait.rs b/src/librustdoc/clean/auto_trait.rs index ac9680b4570c6..555cb1bd64f6e 100644 --- a/src/librustdoc/clean/auto_trait.rs +++ b/src/librustdoc/clean/auto_trait.rs @@ -1,29 +1,19 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir; use rustc::traits::auto_trait as auto; use rustc::ty::{self, TypeFoldable}; use std::fmt::Debug; -use self::def_ctor::{get_def_from_def_id, get_def_from_node_id}; +use self::def_ctor::{get_def_from_def_id, get_def_from_hir_id}; use super::*; -pub struct AutoTraitFinder<'a, 'tcx: 'a, 'rcx: 'a, 'cstore: 'rcx> { - pub cx: &'a core::DocContext<'a, 'tcx, 'rcx, 'cstore>, +pub struct AutoTraitFinder<'a, 'tcx> { + pub cx: &'a core::DocContext<'tcx>, pub f: auto::AutoTraitFinder<'a, 'tcx>, } -impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { - pub fn new(cx: &'a core::DocContext<'a, 'tcx, 'rcx, 'cstore>) -> Self { +impl<'a, 'tcx> AutoTraitFinder<'a, 'tcx> { + pub fn new(cx: &'a core::DocContext<'tcx>) -> Self { let f = auto::AutoTraitFinder::new(&cx.tcx); AutoTraitFinder { cx, f } @@ -35,9 +25,9 @@ impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { }) } - pub fn get_with_node_id(&self, id: ast::NodeId, name: String) -> Vec { - get_def_from_node_id(&self.cx, id, name, &|def_ctor, name| { - let did = self.cx.tcx.hir().local_def_id(id); + pub fn get_with_hir_id(&self, id: hir::HirId, name: String) -> Vec { + get_def_from_hir_id(&self.cx, id, name, &|def_ctor, name| { + let did = self.cx.tcx.hir().local_def_id_from_hir_id(id); self.get_auto_trait_impls(did, &def_ctor, Some(name)) }) } @@ -125,7 +115,6 @@ impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { if result.is_auto() { let trait_ = hir::TraitRef { path: get_path_for_type(self.cx.tcx, trait_def_id, hir::def::Def::Trait), - ref_id: ast::DUMMY_NODE_ID, hir_ref_id: hir::DUMMY_HIR_ID, }; @@ -230,7 +219,10 @@ impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { } } - fn get_lifetime(&self, region: Region, names_map: &FxHashMap) -> Lifetime { + fn get_lifetime( + &self, region: Region<'_>, + names_map: &FxHashMap + ) -> Lifetime { self.region_name(region) .map(|name| { names_map.get(&name).unwrap_or_else(|| { @@ -241,7 +233,7 @@ impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { .clone() } - fn region_name(&self, region: Region) -> Option { + fn region_name(&self, region: Region<'_>) -> Option { match region { &ty::ReEarlyBound(r) => Some(r.name.to_string()), _ => None, @@ -269,7 +261,7 @@ impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { // we need to create the Generics. let mut finished: FxHashMap<_, Vec<_>> = Default::default(); - let mut vid_map: FxHashMap = Default::default(); + let mut vid_map: FxHashMap, RegionDeps<'_>> = Default::default(); // Flattening is done in two parts. First, we insert all of the constraints // into a map. Each RegionTarget (either a RegionVid or a Region) maps @@ -443,7 +435,7 @@ impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { let new_ty = match &poly_trait.trait_ { &Type::ResolvedPath { ref path, - ref typarams, + ref param_names, ref did, ref is_generic, } => { @@ -452,7 +444,13 @@ impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { .expect("segments were empty"); let (old_input, old_output) = match last_segment.args { - GenericArgs::AngleBracketed { types, .. } => (types, None), + GenericArgs::AngleBracketed { args, .. } => { + let types = args.iter().filter_map(|arg| match arg { + GenericArg::Type(ty) => Some(ty.clone()), + _ => None, + }).collect(); + (types, None) + } GenericArgs::Parenthesized { inputs, output, .. } => { (inputs, output) } @@ -477,7 +475,7 @@ impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { Type::ResolvedPath { path: new_path, - typarams: typarams.clone(), + param_names: param_names.clone(), did: did.clone(), is_generic: *is_generic, } @@ -544,7 +542,7 @@ impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { did, param_env, type_generics, existing_predicates ); - // The `Sized` trait must be handled specially, since we only only display it when + // The `Sized` trait must be handled specially, since we only display it when // it is *not* required (i.e., '?Sized') let sized_trait = self.cx .tcx @@ -570,7 +568,7 @@ impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { (replaced.clone(), replaced.clean(self.cx)) }); - let full_generics = (&type_generics, &tcx.predicates_of(did)); + let full_generics = (&type_generics, &tcx.explicit_predicates_of(did)); let Generics { params: mut generic_params, .. @@ -584,6 +582,10 @@ impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { let mut ty_to_fn: FxHashMap, Option)> = Default::default(); for (orig_p, p) in clean_where_predicates { + if p.is_none() { + continue; + } + let p = p.unwrap(); match p { WherePredicate::BoundPredicate { ty, mut bounds } => { // Writing a projection trait bound of the form @@ -673,7 +675,7 @@ impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { match **trait_ { Type::ResolvedPath { path: ref trait_path, - ref typarams, + ref param_names, ref did, ref is_generic, } => { @@ -728,7 +730,7 @@ impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { PolyTrait { trait_: Type::ResolvedPath { path: new_trait_path, - typarams: typarams.clone(), + param_names: param_names.clone(), did: did.clone(), is_generic: *is_generic, }, @@ -779,6 +781,7 @@ impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { } } GenericParamDefKind::Lifetime => {} + GenericParamDefKind::Const { .. } => {} } } @@ -847,7 +850,7 @@ impl<'a, 'tcx, 'rcx, 'cstore> AutoTraitFinder<'a, 'tcx, 'rcx, 'cstore> { vec.sort_by_cached_key(|x| format!("{:?}", x)) } - fn is_fn_ty(&self, tcx: &TyCtxt, ty: &Type) -> bool { + fn is_fn_ty(&self, tcx: &TyCtxt<'_, '_, '_>, ty: &Type) -> bool { match &ty { &&Type::ResolvedPath { ref did, .. } => { *did == tcx.require_lang_item(lang_items::FnTraitLangItem) diff --git a/src/librustdoc/clean/blanket_impl.rs b/src/librustdoc/clean/blanket_impl.rs index ed0056ed26251..570c61f1ffc99 100644 --- a/src/librustdoc/clean/blanket_impl.rs +++ b/src/librustdoc/clean/blanket_impl.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc::hir; use rustc::traits; use rustc::ty::ToPredicate; @@ -15,18 +5,18 @@ use rustc::ty::subst::Subst; use rustc::infer::InferOk; use syntax_pos::DUMMY_SP; -use core::DocAccessLevels; +use crate::core::DocAccessLevels; use super::*; -use self::def_ctor::{get_def_from_def_id, get_def_from_node_id}; +use self::def_ctor::{get_def_from_def_id, get_def_from_hir_id}; -pub struct BlanketImplFinder<'a, 'tcx: 'a, 'rcx: 'a, 'cstore: 'rcx> { - pub cx: &'a core::DocContext<'a, 'tcx, 'rcx, 'cstore>, +pub struct BlanketImplFinder<'a, 'tcx> { + pub cx: &'a core::DocContext<'tcx>, } -impl<'a, 'tcx, 'rcx, 'cstore> BlanketImplFinder <'a, 'tcx, 'rcx, 'cstore> { - pub fn new(cx: &'a core::DocContext<'a, 'tcx, 'rcx, 'cstore>) -> Self { +impl<'a, 'tcx> BlanketImplFinder<'a, 'tcx> { + pub fn new(cx: &'a core::DocContext<'tcx>) -> Self { BlanketImplFinder { cx } } @@ -36,9 +26,9 @@ impl<'a, 'tcx, 'rcx, 'cstore> BlanketImplFinder <'a, 'tcx, 'rcx, 'cstore> { }) } - pub fn get_with_node_id(&self, id: ast::NodeId, name: String) -> Vec { - get_def_from_node_id(&self.cx, id, name, &|def_ctor, name| { - let did = self.cx.tcx.hir().local_def_id(id); + pub fn get_with_hir_id(&self, id: hir::HirId, name: String) -> Vec { + get_def_from_hir_id(&self.cx, id, name, &|def_ctor, name| { + let did = self.cx.tcx.hir().local_def_id_from_hir_id(id); self.get_blanket_impls(did, &def_ctor, Some(name)) }) } @@ -133,7 +123,6 @@ impl<'a, 'tcx, 'rcx, 'cstore> BlanketImplFinder <'a, 'tcx, 'rcx, 'cstore> { path: get_path_for_type(infcx.tcx, trait_def_id, hir::def::Def::Trait), - ref_id: ast::DUMMY_NODE_ID, hir_ref_id: hir::DUMMY_HIR_ID, }; let provided_trait_methods = @@ -143,7 +132,7 @@ impl<'a, 'tcx, 'rcx, 'cstore> BlanketImplFinder <'a, 'tcx, 'rcx, 'cstore> { .collect(); let ty = self.cx.get_real_ty(def_id, def_ctor, &real_name, generics); - let predicates = infcx.tcx.predicates_of(impl_def_id); + let predicates = infcx.tcx.explicit_predicates_of(impl_def_id); impls.push(Item { source: infcx.tcx.def_span(impl_def_id).clean(self.cx), diff --git a/src/librustdoc/clean/cfg.rs b/src/librustdoc/clean/cfg.rs index 847786d123efc..69445451503cc 100644 --- a/src/librustdoc/clean/cfg.rs +++ b/src/librustdoc/clean/cfg.rs @@ -1,29 +1,20 @@ -// Copyright 2017 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. +//! The representation of a `#[doc(cfg(...))]` attribute. -//! Representation of a `#[doc(cfg(...))]` attribute. - -// FIXME: Once RFC #1868 is implemented, switch to use those structures instead. +// FIXME: Once the portability lint RFC is implemented (see tracking issue #41619), +// switch to use those structures instead. use std::mem; use std::fmt::{self, Write}; use std::ops; use syntax::symbol::Symbol; -use syntax::ast::{MetaItem, MetaItemKind, NestedMetaItem, NestedMetaItemKind, LitKind}; +use syntax::ast::{MetaItem, MetaItemKind, NestedMetaItem, LitKind}; use syntax::parse::ParseSess; use syntax::feature_gate::Features; use syntax_pos::Span; -use html::escape::Escape; +use crate::html::escape::Escape; #[derive(Clone, RustcEncodable, RustcDecodable, Debug, PartialEq, Eq, Hash)] pub enum Cfg { @@ -33,7 +24,7 @@ pub enum Cfg { False, /// A generic configuration option, e.g., `test` or `target_os = "linux"`. Cfg(Symbol, Option), - /// Negate a configuration requirement, i.e., `not(x)`. + /// Negates a configuration requirement, i.e., `not(x)`. Not(Box), /// Union of a list of configuration requirements, i.e., `any(...)`. Any(Vec), @@ -50,9 +41,9 @@ pub struct InvalidCfgError { impl Cfg { /// Parses a `NestedMetaItem` into a `Cfg`. fn parse_nested(nested_cfg: &NestedMetaItem) -> Result { - match nested_cfg.node { - NestedMetaItemKind::MetaItem(ref cfg) => Cfg::parse(cfg), - NestedMetaItemKind::Literal(ref lit) => Err(InvalidCfgError { + match nested_cfg { + NestedMetaItem::MetaItem(ref cfg) => Cfg::parse(cfg), + NestedMetaItem::Literal(ref lit) => Err(InvalidCfgError { msg: "unexpected literal", span: lit.span, }), @@ -67,7 +58,13 @@ impl Cfg { /// If the content is not properly formatted, it will return an error indicating what and where /// the error is. pub fn parse(cfg: &MetaItem) -> Result { - let name = cfg.name(); + let name = match cfg.ident() { + Some(ident) => ident.name, + None => return Err(InvalidCfgError { + msg: "expected a single identifier", + span: cfg.span + }), + }; match cfg.node { MetaItemKind::Word => Ok(Cfg::Cfg(name, None)), MetaItemKind::NameValue(ref lit) => match lit.node { @@ -270,7 +267,7 @@ impl ops::BitOr for Cfg { struct Html<'a>(&'a Cfg, bool); fn write_with_opt_paren( - fmt: &mut fmt::Formatter, + fmt: &mut fmt::Formatter<'_>, has_paren: bool, obj: T, ) -> fmt::Result { @@ -286,7 +283,7 @@ fn write_with_opt_paren( impl<'a> fmt::Display for Html<'a> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { match *self.0 { Cfg::Not(ref child) => match **child { Cfg::Any(ref sub_cfgs) => { @@ -379,6 +376,7 @@ impl<'a> fmt::Display for Html<'a> { "pc" => "PC", "rumprun" => "Rumprun", "sun" => "Sun", + "fortanix" => "Fortanix", _ => "" }, ("target_env", Some(env)) => match &*env.as_str() { @@ -387,6 +385,7 @@ impl<'a> fmt::Display for Html<'a> { "musl" => "musl", "newlib" => "Newlib", "uclibc" => "uClibc", + "sgx" => "SGX", _ => "", }, ("target_endian", Some(endian)) => return write!(fmt, "{}-endian", endian), @@ -431,33 +430,33 @@ mod test { fn dummy_meta_item_word(name: &str) -> MetaItem { MetaItem { - ident: Path::from_ident(Ident::from_str(name)), + path: Path::from_ident(Ident::from_str(name)), node: MetaItemKind::Word, span: DUMMY_SP, } } macro_rules! dummy_meta_item_list { - ($name:ident, [$($list:ident),* $(,)*]) => { + ($name:ident, [$($list:ident),* $(,)?]) => { MetaItem { - ident: Path::from_ident(Ident::from_str(stringify!($name))), + path: Path::from_ident(Ident::from_str(stringify!($name))), node: MetaItemKind::List(vec![ $( - dummy_spanned(NestedMetaItemKind::MetaItem( + NestedMetaItem::MetaItem( dummy_meta_item_word(stringify!($list)), - )), + ), )* ]), span: DUMMY_SP, } }; - ($name:ident, [$($list:expr),* $(,)*]) => { + ($name:ident, [$($list:expr),* $(,)?]) => { MetaItem { - ident: Path::from_ident(Ident::from_str(stringify!($name))), + path: Path::from_ident(Ident::from_str(stringify!($name))), node: MetaItemKind::List(vec![ $( - dummy_spanned(NestedMetaItemKind::MetaItem($list)), + NestedMetaItem::MetaItem($list), )* ]), span: DUMMY_SP, @@ -594,7 +593,7 @@ mod test { assert_eq!(Cfg::parse(&mi), Ok(word_cfg("all"))); let mi = MetaItem { - ident: Path::from_ident(Ident::from_str("all")), + path: Path::from_ident(Ident::from_str("all")), node: MetaItemKind::NameValue(dummy_spanned(LitKind::Str( Symbol::intern("done"), StrStyle::Cooked, @@ -629,7 +628,7 @@ mod test { fn test_parse_err() { with_globals(|| { let mi = MetaItem { - ident: Path::from_ident(Ident::from_str("foo")), + path: Path::from_ident(Ident::from_str("foo")), node: MetaItemKind::NameValue(dummy_spanned(LitKind::Bool(false))), span: DUMMY_SP, }; diff --git a/src/librustdoc/clean/def_ctor.rs b/src/librustdoc/clean/def_ctor.rs index b14c36a59e844..405a2e66d6e5c 100644 --- a/src/librustdoc/clean/def_ctor.rs +++ b/src/librustdoc/clean/def_ctor.rs @@ -1,18 +1,8 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use core::DocContext; +use crate::core::DocContext; use super::*; -pub fn get_def_from_def_id(cx: &DocContext, +pub fn get_def_from_def_id(cx: &DocContext<'_>, def_id: DefId, callback: &F, ) -> Vec @@ -48,13 +38,13 @@ where F: Fn(& dyn Fn(DefId) -> Def) -> Vec { } } -pub fn get_def_from_node_id(cx: &DocContext, - id: ast::NodeId, - name: String, - callback: &F, +pub fn get_def_from_hir_id(cx: &DocContext<'_>, + id: hir::HirId, + name: String, + callback: &F, ) -> Vec where F: Fn(& dyn Fn(DefId) -> Def, String) -> Vec { - let item = &cx.tcx.hir().expect_item(id).node; + let item = &cx.tcx.hir().expect_item_by_hir_id(id).node; callback(&match *item { hir::ItemKind::Struct(_, _) => Def::Struct, diff --git a/src/librustdoc/clean/inline.rs b/src/librustdoc/clean/inline.rs index 217345548c8c9..8da71cf708aa0 100644 --- a/src/librustdoc/clean/inline.rs +++ b/src/librustdoc/clean/inline.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Support for inlining external documentation into the current AST. use std::iter::once; @@ -23,9 +13,9 @@ use rustc_metadata::cstore::LoadedMacro; use rustc::ty; use rustc::util::nodemap::FxHashSet; -use core::{DocContext, DocAccessLevels}; -use doctree; -use clean::{ +use crate::core::{DocContext, DocAccessLevels}; +use crate::doctree; +use crate::clean::{ self, GetDefId, ToSource, @@ -45,10 +35,18 @@ use super::Clean; /// /// The returned value is `None` if the definition could not be inlined, /// and `Some` of a vector of items if it was successfully expanded. -pub fn try_inline(cx: &DocContext, def: Def, name: ast::Name, visited: &mut FxHashSet) +pub fn try_inline( + cx: &DocContext<'_>, + def: Def, + name: ast::Name, + visited: &mut FxHashSet +) -> Option> { - if def == Def::Err { return None } - let did = def.def_id(); + let did = if let Some(did) = def.opt_def_id() { + did + } else { + return None; + }; if did.is_local() { return None } let mut ret = Vec::new(); let inner = match def { @@ -90,9 +88,7 @@ pub fn try_inline(cx: &DocContext, def: Def, name: ast::Name, visited: &mut FxHa Def::Variant(..) => return None, // Assume that enum variants and struct types are re-exported next to // their constructors. - Def::VariantCtor(..) | - Def::StructCtor(..) | - Def::SelfCtor(..) => return Some(Vec::new()), + Def::Ctor(..) | Def::SelfCtor(..) => return Some(Vec::new()), Def::Mod(did) => { record_extern_fqn(cx, did, clean::TypeKind::Module); clean::ModuleItem(build_module(cx, did, visited)) @@ -131,7 +127,7 @@ pub fn try_inline(cx: &DocContext, def: Def, name: ast::Name, visited: &mut FxHa Some(ret) } -pub fn try_inline_glob(cx: &DocContext, def: Def, visited: &mut FxHashSet) +pub fn try_inline_glob(cx: &DocContext<'_>, def: Def, visited: &mut FxHashSet) -> Option> { if def == Def::Err { return None } @@ -148,7 +144,7 @@ pub fn try_inline_glob(cx: &DocContext, def: Def, visited: &mut FxHashSet } } -pub fn load_attrs(cx: &DocContext, did: DefId) -> clean::Attributes { +pub fn load_attrs(cx: &DocContext<'_>, did: DefId) -> clean::Attributes { cx.tcx.get_attrs(did).clean(cx) } @@ -156,7 +152,7 @@ pub fn load_attrs(cx: &DocContext, did: DefId) -> clean::Attributes { /// /// These names are used later on by HTML rendering to generate things like /// source links back to the original item. -pub fn record_extern_fqn(cx: &DocContext, did: DefId, kind: clean::TypeKind) { +pub fn record_extern_fqn(cx: &DocContext<'_>, did: DefId, kind: clean::TypeKind) { let mut crate_name = cx.tcx.crate_name(did.krate).to_string(); if did.is_local() { crate_name = cx.crate_name.clone().unwrap_or(crate_name); @@ -184,7 +180,7 @@ pub fn record_extern_fqn(cx: &DocContext, did: DefId, kind: clean::TypeKind) { } } -pub fn build_external_trait(cx: &DocContext, did: DefId) -> clean::Trait { +pub fn build_external_trait(cx: &DocContext<'_>, did: DefId) -> clean::Trait { let auto_trait = cx.tcx.trait_def(did).has_auto_impl; let trait_items = cx.tcx.associated_items(did).map(|item| item.clean(cx)).collect(); let predicates = cx.tcx.predicates_of(did); @@ -204,30 +200,35 @@ pub fn build_external_trait(cx: &DocContext, did: DefId) -> clean::Trait { } } -fn build_external_function(cx: &DocContext, did: DefId) -> clean::Function { +fn build_external_function(cx: &DocContext<'_>, did: DefId) -> clean::Function { let sig = cx.tcx.fn_sig(did); - let constness = if cx.tcx.is_const_fn(did) { + let constness = if cx.tcx.is_min_const_fn(did) { hir::Constness::Const } else { hir::Constness::NotConst }; let predicates = cx.tcx.predicates_of(did); + let generics = (cx.tcx.generics_of(did), &predicates).clean(cx); + let decl = (did, sig).clean(cx); + let (all_types, ret_types) = clean::get_all_types(&generics, &decl, cx); clean::Function { - decl: (did, sig).clean(cx), - generics: (cx.tcx.generics_of(did), &predicates).clean(cx), + decl, + generics, header: hir::FnHeader { unsafety: sig.unsafety(), abi: sig.abi(), constness, asyncness: hir::IsAsync::NotAsync, - } + }, + all_types, + ret_types, } } -fn build_enum(cx: &DocContext, did: DefId) -> clean::Enum { - let predicates = cx.tcx.predicates_of(did); +fn build_enum(cx: &DocContext<'_>, did: DefId) -> clean::Enum { + let predicates = cx.tcx.explicit_predicates_of(did); clean::Enum { generics: (cx.tcx.generics_of(did), &predicates).clean(cx), @@ -236,8 +237,8 @@ fn build_enum(cx: &DocContext, did: DefId) -> clean::Enum { } } -fn build_struct(cx: &DocContext, did: DefId) -> clean::Struct { - let predicates = cx.tcx.predicates_of(did); +fn build_struct(cx: &DocContext<'_>, did: DefId) -> clean::Struct { + let predicates = cx.tcx.explicit_predicates_of(did); let variant = cx.tcx.adt_def(did).non_enum_variant(); clean::Struct { @@ -252,8 +253,8 @@ fn build_struct(cx: &DocContext, did: DefId) -> clean::Struct { } } -fn build_union(cx: &DocContext, did: DefId) -> clean::Union { - let predicates = cx.tcx.predicates_of(did); +fn build_union(cx: &DocContext<'_>, did: DefId) -> clean::Union { + let predicates = cx.tcx.explicit_predicates_of(did); let variant = cx.tcx.adt_def(did).non_enum_variant(); clean::Union { @@ -264,8 +265,8 @@ fn build_union(cx: &DocContext, did: DefId) -> clean::Union { } } -fn build_type_alias(cx: &DocContext, did: DefId) -> clean::Typedef { - let predicates = cx.tcx.predicates_of(did); +fn build_type_alias(cx: &DocContext<'_>, did: DefId) -> clean::Typedef { + let predicates = cx.tcx.explicit_predicates_of(did); clean::Typedef { type_: cx.tcx.type_of(did).clean(cx), @@ -273,7 +274,7 @@ fn build_type_alias(cx: &DocContext, did: DefId) -> clean::Typedef { } } -pub fn build_impls(cx: &DocContext, did: DefId) -> Vec { +pub fn build_impls(cx: &DocContext<'_>, did: DefId) -> Vec { let tcx = cx.tcx; let mut impls = Vec::new(); @@ -284,7 +285,7 @@ pub fn build_impls(cx: &DocContext, did: DefId) -> Vec { impls } -pub fn build_impl(cx: &DocContext, did: DefId, ret: &mut Vec) { +pub fn build_impl(cx: &DocContext<'_>, did: DefId, ret: &mut Vec) { if !cx.renderinfo.borrow_mut().inlined.insert(did) { return } @@ -303,8 +304,8 @@ pub fn build_impl(cx: &DocContext, did: DefId, ret: &mut Vec) { } } - let for_ = if let Some(nodeid) = tcx.hir().as_local_node_id(did) { - match tcx.hir().expect_item(nodeid).node { + let for_ = if let Some(hir_id) = tcx.hir().as_local_hir_id(did) { + match tcx.hir().expect_item_by_hir_id(hir_id).node { hir::ItemKind::Impl(.., ref t, _) => { t.clean(cx) } @@ -324,9 +325,9 @@ pub fn build_impl(cx: &DocContext, did: DefId, ret: &mut Vec) { } } - let predicates = tcx.predicates_of(did); - let (trait_items, generics) = if let Some(nodeid) = tcx.hir().as_local_node_id(did) { - match tcx.hir().expect_item(nodeid).node { + let predicates = tcx.explicit_predicates_of(did); + let (trait_items, generics) = if let Some(hir_id) = tcx.hir().as_local_hir_id(did) { + match tcx.hir().expect_item_by_hir_id(hir_id).node { hir::ItemKind::Impl(.., ref gen, _, _, ref item_ids) => { ( item_ids.iter() @@ -394,7 +395,11 @@ pub fn build_impl(cx: &DocContext, did: DefId, ret: &mut Vec) { }); } -fn build_module(cx: &DocContext, did: DefId, visited: &mut FxHashSet) -> clean::Module { +fn build_module( + cx: &DocContext<'_>, + did: DefId, + visited: &mut FxHashSet +) -> clean::Module { let mut items = Vec::new(); fill_in(cx, did, &mut items, visited); return clean::Module { @@ -402,7 +407,7 @@ fn build_module(cx: &DocContext, did: DefId, visited: &mut FxHashSet) -> is_crate: false, }; - fn fill_in(cx: &DocContext, did: DefId, items: &mut Vec, + fn fill_in(cx: &DocContext<'_>, did: DefId, items: &mut Vec, visited: &mut FxHashSet) { // If we're re-exporting a re-export it may actually re-export something in // two namespaces, so the target may be listed twice. Make sure we only @@ -419,22 +424,22 @@ fn build_module(cx: &DocContext, did: DefId, visited: &mut FxHashSet) -> } } -pub fn print_inlined_const(cx: &DocContext, did: DefId) -> String { - if let Some(node_id) = cx.tcx.hir().as_local_node_id(did) { - cx.tcx.hir().node_to_pretty_string(node_id) +pub fn print_inlined_const(cx: &DocContext<'_>, did: DefId) -> String { + if let Some(node_id) = cx.tcx.hir().as_local_hir_id(did) { + cx.tcx.hir().hir_to_pretty_string(node_id) } else { cx.tcx.rendered_const(did) } } -fn build_const(cx: &DocContext, did: DefId) -> clean::Constant { +fn build_const(cx: &DocContext<'_>, did: DefId) -> clean::Constant { clean::Constant { type_: cx.tcx.type_of(did).clean(cx), expr: print_inlined_const(cx, did) } } -fn build_static(cx: &DocContext, did: DefId, mutable: bool) -> clean::Static { +fn build_static(cx: &DocContext<'_>, did: DefId, mutable: bool) -> clean::Static { clean::Static { type_: cx.tcx.type_of(did).clean(cx), mutability: if mutable {clean::Mutable} else {clean::Immutable}, @@ -442,7 +447,7 @@ fn build_static(cx: &DocContext, did: DefId, mutable: bool) -> clean::Static { } } -fn build_macro(cx: &DocContext, did: DefId, name: ast::Name) -> clean::ItemEnum { +fn build_macro(cx: &DocContext<'_>, did: DefId, name: ast::Name) -> clean::ItemEnum { let imported_from = cx.tcx.original_crate_name(did.krate); match cx.cstore.load_macro_untracked(did, cx.sess()) { LoadedMacro::MacroDef(def) => { @@ -544,7 +549,7 @@ fn separate_supertrait_bounds(mut g: clean::Generics) (g, ty_bounds) } -pub fn record_extern_trait(cx: &DocContext, did: DefId) { +pub fn record_extern_trait(cx: &DocContext<'_>, did: DefId) { if did.is_local() { return; } diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index 1e33ec8c37661..e994c661fdceb 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This module contains the "cleaned" pieces of the AST, and the functions //! that clean them. @@ -23,16 +13,16 @@ use rustc_data_structures::sync::Lrc; use rustc_target::spec::abi::Abi; use rustc_typeck::hir_ty_to_ty; use rustc::infer::region_constraints::{RegionConstraintData, Constraint}; -use rustc::mir::interpret::ConstValue; use rustc::middle::resolve_lifetime as rl; use rustc::middle::lang_items; use rustc::middle::stability; -use rustc::mir::interpret::GlobalId; -use rustc::hir::{self, GenericArg, HirVec}; +use rustc::mir::interpret::{GlobalId, ConstValue}; +use rustc::hir::{self, HirVec}; use rustc::hir::def::{self, Def, CtorKind}; use rustc::hir::def_id::{CrateNum, DefId, CRATE_DEF_INDEX, LOCAL_CRATE}; -use rustc::ty::subst::Substs; -use rustc::ty::{self, TyCtxt, Region, RegionVid, Ty, AdtKind}; +use rustc::hir::map::DisambiguatedDefPathData; +use rustc::ty::subst::{Kind, InternalSubsts, SubstsRef, UnpackedKind}; +use rustc::ty::{self, DefIdTree, TyCtxt, Region, RegionVid, Ty, AdtKind}; use rustc::ty::fold::TypeFolder; use rustc::ty::layout::VariantIdx; use rustc::util::nodemap::{FxHashMap, FxHashSet}; @@ -59,11 +49,12 @@ use std::u32; use parking_lot::ReentrantMutex; -use core::{self, DocContext}; -use doctree; -use visit_ast; -use html::render::{cache, ExternalLocation}; -use html::item_type::ItemType; +use crate::core::{self, DocContext}; +use crate::doctree; +use crate::visit_ast; +use crate::html::render::{cache, ExternalLocation}; +use crate::html::item_type::ItemType; + use self::cfg::Cfg; use self::auto_trait::AutoTraitFinder; @@ -81,56 +72,56 @@ thread_local!(pub static MAX_DEF_ID: RefCell> = Defau const FN_OUTPUT_NAME: &'static str = "Output"; // extract the stability index for a node from tcx, if possible -fn get_stability(cx: &DocContext, def_id: DefId) -> Option { +fn get_stability(cx: &DocContext<'_>, def_id: DefId) -> Option { cx.tcx.lookup_stability(def_id).clean(cx) } -fn get_deprecation(cx: &DocContext, def_id: DefId) -> Option { +fn get_deprecation(cx: &DocContext<'_>, def_id: DefId) -> Option { cx.tcx.lookup_deprecation(def_id).clean(cx) } pub trait Clean { - fn clean(&self, cx: &DocContext) -> T; + fn clean(&self, cx: &DocContext<'_>) -> T; } impl, U> Clean> for [T] { - fn clean(&self, cx: &DocContext) -> Vec { + fn clean(&self, cx: &DocContext<'_>) -> Vec { self.iter().map(|x| x.clean(cx)).collect() } } impl, U, V: Idx> Clean> for IndexVec { - fn clean(&self, cx: &DocContext) -> IndexVec { + fn clean(&self, cx: &DocContext<'_>) -> IndexVec { self.iter().map(|x| x.clean(cx)).collect() } } impl, U> Clean for P { - fn clean(&self, cx: &DocContext) -> U { + fn clean(&self, cx: &DocContext<'_>) -> U { (**self).clean(cx) } } impl, U> Clean for Rc { - fn clean(&self, cx: &DocContext) -> U { + fn clean(&self, cx: &DocContext<'_>) -> U { (**self).clean(cx) } } impl, U> Clean> for Option { - fn clean(&self, cx: &DocContext) -> Option { + fn clean(&self, cx: &DocContext<'_>) -> Option { self.as_ref().map(|v| v.clean(cx)) } } impl Clean for ty::Binder where T: Clean { - fn clean(&self, cx: &DocContext) -> U { + fn clean(&self, cx: &DocContext<'_>) -> U { self.skip_binder().clean(cx) } } impl, U> Clean> for P<[T]> { - fn clean(&self, cx: &DocContext) -> Vec { + fn clean(&self, cx: &DocContext<'_>) -> Vec { self.iter().map(|x| x.clean(cx)).collect() } } @@ -149,9 +140,9 @@ pub struct Crate { pub masked_crates: FxHashSet, } -impl<'a, 'tcx, 'rcx, 'cstore> Clean for visit_ast::RustdocVisitor<'a, 'tcx, 'rcx, 'cstore> { - fn clean(&self, cx: &DocContext) -> Crate { - use ::visit_lib::LibEmbargoVisitor; +impl<'a, 'tcx> Clean for visit_ast::RustdocVisitor<'a, 'tcx> { + fn clean(&self, cx: &DocContext<'_>) -> Crate { + use crate::visit_lib::LibEmbargoVisitor; { let mut r = cx.renderinfo.borrow_mut(); @@ -176,7 +167,12 @@ impl<'a, 'tcx, 'rcx, 'cstore> Clean for visit_ast::RustdocVisitor<'a, 'tc match module.inner { ModuleItem(ref module) => { for it in &module.items { - if it.is_extern_crate() && it.attrs.has_doc_flag("masked") { + // `compiler_builtins` should be masked too, but we can't apply + // `#[doc(masked)]` to the injected `extern crate` because it's unstable. + if it.is_extern_crate() + && (it.attrs.has_doc_flag("masked") + || self.cx.tcx.is_compiler_builtins(it.def_id.krate)) + { masked_crates.insert(it.def_id.krate); } } @@ -239,7 +235,7 @@ pub struct ExternalCrate { } impl Clean for CrateNum { - fn clean(&self, cx: &DocContext) -> ExternalCrate { + fn clean(&self, cx: &DocContext<'_>) -> ExternalCrate { let root = DefId { krate: *self, index: CRATE_DEF_INDEX }; let krate_span = cx.tcx.def_span(root); let krate_src = cx.sess().source_map().span_to_filename(krate_span); @@ -282,16 +278,16 @@ impl Clean for CrateNum { }; let primitives = if root.is_local() { cx.tcx.hir().krate().module.item_ids.iter().filter_map(|&id| { - let item = cx.tcx.hir().expect_item(id.id); + let item = cx.tcx.hir().expect_item_by_hir_id(id.id); match item.node { hir::ItemKind::Mod(_) => { - as_primitive(Def::Mod(cx.tcx.hir().local_def_id(id.id))) + as_primitive(Def::Mod(cx.tcx.hir().local_def_id_from_hir_id(id.id))) } hir::ItemKind::Use(ref path, hir::UseKind::Single) if item.vis.node.is_pub() => { as_primitive(path.def).map(|(_, prim, attrs)| { // Pretend the primitive is local. - (cx.tcx.hir().local_def_id(id.id), prim, attrs) + (cx.tcx.hir().local_def_id_from_hir_id(id.id), prim, attrs) }) } _ => None @@ -324,15 +320,15 @@ impl Clean for CrateNum { }; let keywords = if root.is_local() { cx.tcx.hir().krate().module.item_ids.iter().filter_map(|&id| { - let item = cx.tcx.hir().expect_item(id.id); + let item = cx.tcx.hir().expect_item_by_hir_id(id.id); match item.node { hir::ItemKind::Mod(_) => { - as_keyword(Def::Mod(cx.tcx.hir().local_def_id(id.id))) + as_keyword(Def::Mod(cx.tcx.hir().local_def_id_from_hir_id(id.id))) } hir::ItemKind::Use(ref path, hir::UseKind::Single) if item.vis.node.is_pub() => { as_keyword(path.def).map(|(_, prim, attrs)| { - (cx.tcx.hir().local_def_id(id.id), prim, attrs) + (cx.tcx.hir().local_def_id_from_hir_id(id.id), prim, attrs) }) } _ => None @@ -371,7 +367,7 @@ pub struct Item { } impl fmt::Debug for Item { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { let fake = MAX_DEF_ID.with(|m| m.borrow().get(&self.def_id.krate) .map(|id| self.def_id >= *id).unwrap_or(false)); @@ -425,6 +421,9 @@ impl Item { pub fn is_enum(&self) -> bool { self.type_() == ItemType::Enum } + pub fn is_variant(&self) -> bool { + self.type_() == ItemType::Variant + } pub fn is_associated_type(&self) -> bool { self.type_() == ItemType::AssociatedType } @@ -478,7 +477,7 @@ impl Item { classes.push("unstable"); } - if !s.deprecated_since.is_empty() { + if s.deprecation.is_some() { classes.push("deprecated"); } @@ -496,13 +495,34 @@ impl Item { pub fn is_non_exhaustive(&self) -> bool { self.attrs.other_attrs.iter() - .any(|a| a.name().as_str() == "non_exhaustive") + .any(|a| a.check_name("non_exhaustive")) } /// Returns a documentation-level item type from the item. pub fn type_(&self) -> ItemType { ItemType::from(self) } + + /// Returns the info in the item's `#[deprecated]` or `#[rustc_deprecated]` attributes. + /// + /// If the item is not deprecated, returns `None`. + pub fn deprecation(&self) -> Option<&Deprecation> { + self.deprecation + .as_ref() + .or_else(|| self.stability.as_ref().and_then(|s| s.deprecation.as_ref())) + } + pub fn is_default(&self) -> bool { + match self.inner { + ItemEnum::MethodItem(ref meth) => { + if let Some(defaultness) = meth.defaultness { + defaultness.has_value() && !defaultness.is_final() + } else { + false + } + } + _ => false, + } + } } #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] @@ -519,6 +539,7 @@ pub enum ItemEnum { StaticItem(Static), ConstantItem(Constant), TraitItem(Trait), + TraitAliasItem(TraitAlias), ImplItem(Impl), /// A method signature only. Used for required methods in traits (ie, /// non-default-methods). @@ -556,6 +577,7 @@ impl ItemEnum { ItemEnum::TyMethodItem(ref i) => &i.generics, ItemEnum::MethodItem(ref i) => &i.generics, ItemEnum::ForeignFunctionItem(ref f) => &f.generics, + ItemEnum::TraitAliasItem(ref ta) => &ta.generics, _ => return None, }) } @@ -576,7 +598,7 @@ pub struct Module { } impl Clean for doctree::Module { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { let name = if self.name.is_some() { self.name.expect("No name provided").clean(cx) } else { @@ -589,7 +611,7 @@ impl Clean for doctree::Module { let attrs = self.attrs.clean(cx); let mut items: Vec = vec![]; - items.extend(self.extern_crates.iter().map(|x| x.clean(cx))); + items.extend(self.extern_crates.iter().flat_map(|x| x.clean(cx))); items.extend(self.imports.iter().flat_map(|x| x.clean(cx))); items.extend(self.structs.iter().map(|x| x.clean(cx))); items.extend(self.unions.iter().map(|x| x.clean(cx))); @@ -605,6 +627,7 @@ impl Clean for doctree::Module { items.extend(self.impls.iter().flat_map(|x| x.clean(cx))); items.extend(self.macros.iter().map(|x| x.clean(cx))); items.extend(self.proc_macros.iter().map(|x| x.clean(cx))); + items.extend(self.trait_aliases.iter().map(|x| x.clean(cx))); // determine if we should display the inner contents or // the outer `mod` item for the source code. @@ -687,7 +710,7 @@ impl AttributesExt for [ast::Attribute] { } pub trait NestedAttributesExt { - /// Returns whether the attribute list contains a specific `Word` + /// Returns `true` if the attribute list contains a specific `Word` fn has_word(self, word: &str) -> bool; } @@ -769,15 +792,15 @@ pub struct Attributes { impl Attributes { /// Extracts the content from an attribute `#[doc(cfg(content))]`. fn extract_cfg(mi: &ast::MetaItem) -> Option<&ast::MetaItem> { - use syntax::ast::NestedMetaItemKind::MetaItem; + use syntax::ast::NestedMetaItem::MetaItem; if let ast::MetaItemKind::List(ref nmis) = mi.node { if nmis.len() == 1 { - if let MetaItem(ref cfg_mi) = nmis[0].node { + if let MetaItem(ref cfg_mi) = nmis[0] { if cfg_mi.check_name("cfg") { if let ast::MetaItemKind::List(ref cfg_nmis) = cfg_mi.node { if cfg_nmis.len() == 1 { - if let MetaItem(ref content_mi) = cfg_nmis[0].node { + if let MetaItem(ref content_mi) = cfg_nmis[0] { return Some(content_mi); } } @@ -939,11 +962,12 @@ impl Attributes { } } - /// Get links as a vector + /// Gets links as a vector /// /// Cache must be populated before call pub fn links(&self, krate: &CrateNum) -> Vec<(String, String)> { - use html::format::href; + use crate::html::format::href; + self.links.iter().filter_map(|&(ref s, did, ref fragment)| { match did { Some(did) => { @@ -968,11 +992,13 @@ impl Attributes { "https://doc.rust-lang.org/nightly", }; // This is a primitive so the url is done "by hand". + let tail = fragment.find('#').unwrap_or_else(|| fragment.len()); Some((s.clone(), - format!("{}{}std/primitive.{}.html", + format!("{}{}std/primitive.{}.html{}", url, if !url.ends_with('/') { "/" } else { "" }, - fragment))) + &fragment[..tail], + &fragment[tail..]))) } else { panic!("This isn't a primitive?!"); } @@ -1013,7 +1039,7 @@ impl AttributesExt for Attributes { } impl Clean for [ast::Attribute] { - fn clean(&self, cx: &DocContext) -> Attributes { + fn clean(&self, cx: &DocContext<'_>) -> Attributes { Attributes::from_ast(cx.sess().diagnostic(), self) } } @@ -1025,7 +1051,7 @@ pub enum GenericBound { } impl GenericBound { - fn maybe_sized(cx: &DocContext) -> GenericBound { + fn maybe_sized(cx: &DocContext<'_>) -> GenericBound { let did = cx.tcx.require_lang_item(lang_items::SizedTraitLangItem); let empty = cx.tcx.intern_substs(&[]); let path = external_path(cx, &cx.tcx.item_name(did).as_str(), @@ -1034,7 +1060,7 @@ impl GenericBound { GenericBound::TraitBound(PolyTrait { trait_: ResolvedPath { path, - typarams: None, + param_names: None, did, is_generic: false, }, @@ -1042,7 +1068,7 @@ impl GenericBound { }, hir::TraitBoundModifier::Maybe) } - fn is_sized_bound(&self, cx: &DocContext) -> bool { + fn is_sized_bound(&self, cx: &DocContext<'_>) -> bool { use rustc::hir::TraitBoundModifier as TBM; if let GenericBound::TraitBound(PolyTrait { ref trait_, .. }, TBM::None) = *self { if trait_.def_id() == cx.tcx.lang_items().sized_trait() { @@ -1061,14 +1087,15 @@ impl GenericBound { fn get_trait_type(&self) -> Option { if let GenericBound::TraitBound(PolyTrait { ref trait_, .. }, _) = *self { - return Some(trait_.clone()); + Some(trait_.clone()) + } else { + None } - None } } impl Clean for hir::GenericBound { - fn clean(&self, cx: &DocContext) -> GenericBound { + fn clean(&self, cx: &DocContext<'_>) -> GenericBound { match *self { hir::GenericBound::Outlives(lt) => GenericBound::Outlives(lt.clean(cx)), hir::GenericBound::Trait(ref t, modifier) => { @@ -1078,24 +1105,37 @@ impl Clean for hir::GenericBound { } } -fn external_generic_args(cx: &DocContext, trait_did: Option, has_self: bool, - bindings: Vec, substs: &Substs) -> GenericArgs { - let lifetimes = substs.regions().filter_map(|v| v.clean(cx)).collect(); - let types = substs.types().skip(has_self as usize).collect::>(); +fn external_generic_args( + cx: &DocContext<'_>, + trait_did: Option, + has_self: bool, + bindings: Vec, + substs: SubstsRef<'_>, +) -> GenericArgs { + let mut skip_self = has_self; + let mut ty_sty = None; + let args: Vec<_> = substs.iter().filter_map(|kind| match kind.unpack() { + UnpackedKind::Lifetime(lt) => { + lt.clean(cx).and_then(|lt| Some(GenericArg::Lifetime(lt))) + } + UnpackedKind::Type(_) if skip_self => { + skip_self = false; + None + } + UnpackedKind::Type(ty) => { + ty_sty = Some(&ty.sty); + Some(GenericArg::Type(ty.clean(cx))) + } + UnpackedKind::Const(ct) => Some(GenericArg::Const(ct.clean(cx))), + }).collect(); match trait_did { // Attempt to sugar an external path like Fn<(A, B,), C> to Fn(A, B) -> C Some(did) if cx.tcx.lang_items().fn_trait_kind(did).is_some() => { - assert_eq!(types.len(), 1); - let inputs = match types[0].sty { - ty::Tuple(ref tys) => tys.iter().map(|t| t.clean(cx)).collect(), - _ => { - return GenericArgs::AngleBracketed { - lifetimes, - types: types.clean(cx), - bindings, - } - } + assert!(ty_sty.is_some()); + let inputs = match ty_sty { + Some(ty::Tuple(ref tys)) => tys.iter().map(|t| t.clean(cx)).collect(), + _ => return GenericArgs::AngleBracketed { args, bindings }, }; let output = None; // FIXME(#20299) return type comes from a projection now @@ -1103,25 +1143,18 @@ fn external_generic_args(cx: &DocContext, trait_did: Option, has_self: bo // ty::Tuple(ref v) if v.is_empty() => None, // -> () // _ => Some(types[1].clean(cx)) // }; - GenericArgs::Parenthesized { - inputs, - output, - } + GenericArgs::Parenthesized { inputs, output } }, _ => { - GenericArgs::AngleBracketed { - lifetimes, - types: types.clean(cx), - bindings, - } + GenericArgs::AngleBracketed { args, bindings } } } } // trait_did should be set to a trait's DefId if called on a TraitRef, in order to sugar // from Fn<(A, B,), C> to Fn(A, B) -> C -fn external_path(cx: &DocContext, name: &str, trait_did: Option, has_self: bool, - bindings: Vec, substs: &Substs) -> Path { +fn external_path(cx: &DocContext<'_>, name: &str, trait_did: Option, has_self: bool, + bindings: Vec, substs: SubstsRef<'_>) -> Path { Path { global: false, def: Def::Err, @@ -1133,7 +1166,7 @@ fn external_path(cx: &DocContext, name: &str, trait_did: Option, has_self } impl<'a, 'tcx> Clean for (&'a ty::TraitRef<'tcx>, Vec) { - fn clean(&self, cx: &DocContext) -> GenericBound { + fn clean(&self, cx: &DocContext<'_>) -> GenericBound { let (trait_ref, ref bounds) = *self; inline::record_extern_fqn(cx, trait_ref.def_id, TypeKind::Trait); let path = external_path(cx, &cx.tcx.item_name(trait_ref.def_id).as_str(), @@ -1165,7 +1198,7 @@ impl<'a, 'tcx> Clean for (&'a ty::TraitRef<'tcx>, Vec PolyTrait { trait_: ResolvedPath { path, - typarams: None, + param_names: None, did: trait_ref.def_id, is_generic: false, }, @@ -1177,13 +1210,13 @@ impl<'a, 'tcx> Clean for (&'a ty::TraitRef<'tcx>, Vec } impl<'tcx> Clean for ty::TraitRef<'tcx> { - fn clean(&self, cx: &DocContext) -> GenericBound { + fn clean(&self, cx: &DocContext<'_>) -> GenericBound { (self, vec![]).clean(cx) } } -impl<'tcx> Clean>> for Substs<'tcx> { - fn clean(&self, cx: &DocContext) -> Option> { +impl<'tcx> Clean>> for InternalSubsts<'tcx> { + fn clean(&self, cx: &DocContext<'_>) -> Option> { let mut v = Vec::new(); v.extend(self.regions().filter_map(|r| r.clean(cx)).map(GenericBound::Outlives)); v.extend(self.types().map(|t| GenericBound::TraitBound(PolyTrait { @@ -1210,10 +1243,9 @@ impl Lifetime { } impl Clean for hir::Lifetime { - fn clean(&self, cx: &DocContext) -> Lifetime { - if self.id != ast::DUMMY_NODE_ID { - let hir_id = cx.tcx.hir().node_to_hir_id(self.id); - let def = cx.tcx.named_region(hir_id); + fn clean(&self, cx: &DocContext<'_>) -> Lifetime { + if self.hir_id != hir::DUMMY_HIR_ID { + let def = cx.tcx.named_region(self.hir_id); match def { Some(rl::Region::EarlyBound(_, node_id, _)) | Some(rl::Region::LateBound(_, node_id, _)) | @@ -1230,7 +1262,7 @@ impl Clean for hir::Lifetime { } impl Clean for hir::GenericParam { - fn clean(&self, _: &DocContext) -> Lifetime { + fn clean(&self, _: &DocContext<'_>) -> Lifetime { match self.kind { hir::GenericParamKind::Lifetime { .. } => { if self.bounds.len() > 0 { @@ -1253,14 +1285,23 @@ impl Clean for hir::GenericParam { } } +impl Clean for hir::ConstArg { + fn clean(&self, cx: &DocContext<'_>) -> Constant { + Constant { + type_: cx.tcx.type_of(cx.tcx.hir().body_owner_def_id(self.value.body)).clean(cx), + expr: print_const_expr(cx, self.value.body), + } + } +} + impl<'tcx> Clean for ty::GenericParamDef { - fn clean(&self, _cx: &DocContext) -> Lifetime { + fn clean(&self, _cx: &DocContext<'_>) -> Lifetime { Lifetime(self.name.to_string()) } } impl Clean> for ty::RegionKind { - fn clean(&self, cx: &DocContext) -> Option { + fn clean(&self, cx: &DocContext<'_>) -> Option { match *self { ty::ReStatic => Some(Lifetime::statik()), ty::ReLateBound(_, ty::BrNamed(_, name)) => Some(Lifetime(name.to_string())), @@ -1273,7 +1314,10 @@ impl Clean> for ty::RegionKind { ty::RePlaceholder(..) | ty::ReEmpty | ty::ReClosureBound(_) | - ty::ReErased => None + ty::ReErased => { + debug!("Cannot clean region {:?}", self); + None + } } } } @@ -1285,8 +1329,18 @@ pub enum WherePredicate { EqPredicate { lhs: Type, rhs: Type }, } +impl WherePredicate { + pub fn get_bounds(&self) -> Option<&[GenericBound]> { + match *self { + WherePredicate::BoundPredicate { ref bounds, .. } => Some(bounds), + WherePredicate::RegionPredicate { ref bounds, .. } => Some(bounds), + _ => None, + } + } +} + impl Clean for hir::WherePredicate { - fn clean(&self, cx: &DocContext) -> WherePredicate { + fn clean(&self, cx: &DocContext<'_>) -> WherePredicate { match *self { hir::WherePredicate::BoundPredicate(ref wbp) => { WherePredicate::BoundPredicate { @@ -1312,16 +1366,16 @@ impl Clean for hir::WherePredicate { } } -impl<'a> Clean for ty::Predicate<'a> { - fn clean(&self, cx: &DocContext) -> WherePredicate { +impl<'a> Clean> for ty::Predicate<'a> { + fn clean(&self, cx: &DocContext<'_>) -> Option { use rustc::ty::Predicate; match *self { - Predicate::Trait(ref pred) => pred.clean(cx), - Predicate::Subtype(ref pred) => pred.clean(cx), + Predicate::Trait(ref pred) => Some(pred.clean(cx)), + Predicate::Subtype(ref pred) => Some(pred.clean(cx)), Predicate::RegionOutlives(ref pred) => pred.clean(cx), Predicate::TypeOutlives(ref pred) => pred.clean(cx), - Predicate::Projection(ref pred) => pred.clean(cx), + Predicate::Projection(ref pred) => Some(pred.clean(cx)), Predicate::WellFormed(..) | Predicate::ObjectSafe(..) | @@ -1332,7 +1386,7 @@ impl<'a> Clean for ty::Predicate<'a> { } impl<'a> Clean for ty::TraitPredicate<'a> { - fn clean(&self, cx: &DocContext) -> WherePredicate { + fn clean(&self, cx: &DocContext<'_>) -> WherePredicate { WherePredicate::BoundPredicate { ty: self.trait_ref.self_ty().clean(cx), bounds: vec![self.trait_ref.clean(cx)] @@ -1341,35 +1395,50 @@ impl<'a> Clean for ty::TraitPredicate<'a> { } impl<'tcx> Clean for ty::SubtypePredicate<'tcx> { - fn clean(&self, _cx: &DocContext) -> WherePredicate { + fn clean(&self, _cx: &DocContext<'_>) -> WherePredicate { panic!("subtype predicates are an internal rustc artifact \ and should not be seen by rustdoc") } } -impl<'tcx> Clean for ty::OutlivesPredicate, ty::Region<'tcx>> { - fn clean(&self, cx: &DocContext) -> WherePredicate { +impl<'tcx> Clean> for + ty::OutlivesPredicate,ty::Region<'tcx>> { + + fn clean(&self, cx: &DocContext<'_>) -> Option { let ty::OutlivesPredicate(ref a, ref b) = *self; - WherePredicate::RegionPredicate { + + match (a, b) { + (ty::ReEmpty, ty::ReEmpty) => { + return None; + }, + _ => {} + } + + Some(WherePredicate::RegionPredicate { lifetime: a.clean(cx).expect("failed to clean lifetime"), bounds: vec![GenericBound::Outlives(b.clean(cx).expect("failed to clean bounds"))] - } + }) } } -impl<'tcx> Clean for ty::OutlivesPredicate, ty::Region<'tcx>> { - fn clean(&self, cx: &DocContext) -> WherePredicate { +impl<'tcx> Clean> for ty::OutlivesPredicate, ty::Region<'tcx>> { + fn clean(&self, cx: &DocContext<'_>) -> Option { let ty::OutlivesPredicate(ref ty, ref lt) = *self; - WherePredicate::BoundPredicate { + match lt { + ty::ReEmpty => return None, + _ => {} + } + + Some(WherePredicate::BoundPredicate { ty: ty.clean(cx), bounds: vec![GenericBound::Outlives(lt.clean(cx).expect("failed to clean lifetimes"))] - } + }) } } impl<'tcx> Clean for ty::ProjectionPredicate<'tcx> { - fn clean(&self, cx: &DocContext) -> WherePredicate { + fn clean(&self, cx: &DocContext<'_>) -> WherePredicate { WherePredicate::EqPredicate { lhs: self.projection_ty.clean(cx), rhs: self.ty.clean(cx) @@ -1378,7 +1447,7 @@ impl<'tcx> Clean for ty::ProjectionPredicate<'tcx> { } impl<'tcx> Clean for ty::ProjectionTy<'tcx> { - fn clean(&self, cx: &DocContext) -> Type { + fn clean(&self, cx: &DocContext<'_>) -> Type { let trait_ = match self.trait_ref(cx.tcx).clean(cx) { GenericBound::TraitBound(t, _) => t.trait_, GenericBound::Outlives(_) => panic!("cleaning a trait got a lifetime"), @@ -1400,6 +1469,29 @@ pub enum GenericParamDefKind { default: Option, synthetic: Option, }, + Const { + did: DefId, + ty: Type, + }, +} + +impl GenericParamDefKind { + pub fn is_type(&self) -> bool { + match *self { + GenericParamDefKind::Type { .. } => true, + _ => false, + } + } + + pub fn get_type(&self, cx: &DocContext<'_>) -> Option { + match *self { + GenericParamDefKind::Type { did, .. } => { + rustc_typeck::checked_type_of(cx.tcx, did, false).map(|t| t.clean(cx)) + } + GenericParamDefKind::Const { ref ty, .. } => Some(ty.clone()), + GenericParamDefKind::Lifetime => None, + } + } } #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Debug, Hash)] @@ -1412,20 +1504,36 @@ pub struct GenericParamDef { impl GenericParamDef { pub fn is_synthetic_type_param(&self) -> bool { match self.kind { - GenericParamDefKind::Lifetime => false, + GenericParamDefKind::Lifetime | + GenericParamDefKind::Const { .. } => false, GenericParamDefKind::Type { ref synthetic, .. } => synthetic.is_some(), } } + + pub fn is_type(&self) -> bool { + self.kind.is_type() + } + + pub fn get_type(&self, cx: &DocContext<'_>) -> Option { + self.kind.get_type(cx) + } + + pub fn get_bounds(&self) -> Option<&[GenericBound]> { + match self.kind { + GenericParamDefKind::Type { ref bounds, .. } => Some(bounds), + _ => None, + } + } } -impl<'tcx> Clean for ty::GenericParamDef { - fn clean(&self, cx: &DocContext) -> GenericParamDef { +impl Clean for ty::GenericParamDef { + fn clean(&self, cx: &DocContext<'_>) -> GenericParamDef { let (name, kind) = match self.kind { ty::GenericParamDefKind::Lifetime => { (self.name.to_string(), GenericParamDefKind::Lifetime) } ty::GenericParamDefKind::Type { has_default, .. } => { - cx.renderinfo.borrow_mut().external_typarams + cx.renderinfo.borrow_mut().external_param_names .insert(self.def_id, self.name.clean(cx)); let default = if has_default { Some(cx.tcx.type_of(self.def_id).clean(cx)) @@ -1439,6 +1547,12 @@ impl<'tcx> Clean for ty::GenericParamDef { synthetic: None, }) } + ty::GenericParamDefKind::Const { .. } => { + (self.name.clean(cx), GenericParamDefKind::Const { + did: self.def_id, + ty: cx.tcx.type_of(self.def_id).clean(cx), + }) + } }; GenericParamDef { @@ -1449,7 +1563,7 @@ impl<'tcx> Clean for ty::GenericParamDef { } impl Clean for hir::GenericParam { - fn clean(&self, cx: &DocContext) -> GenericParamDef { + fn clean(&self, cx: &DocContext<'_>) -> GenericParamDef { let (name, kind) = match self.kind { hir::GenericParamKind::Lifetime { .. } => { let name = if self.bounds.len() > 0 { @@ -1468,14 +1582,20 @@ impl Clean for hir::GenericParam { }; (name, GenericParamDefKind::Lifetime) } - hir::GenericParamKind::Type { ref default, synthetic, .. } => { + hir::GenericParamKind::Type { ref default, synthetic } => { (self.name.ident().name.clean(cx), GenericParamDefKind::Type { - did: cx.tcx.hir().local_def_id(self.id), + did: cx.tcx.hir().local_def_id_from_hir_id(self.hir_id), bounds: self.bounds.clean(cx), default: default.clean(cx), synthetic: synthetic, }) } + hir::GenericParamKind::Const { ref ty } => { + (self.name.ident().name.clean(cx), GenericParamDefKind::Const { + did: cx.tcx.hir().local_def_id_from_hir_id(self.hir_id), + ty: ty.clean(cx), + }) + } }; GenericParamDef { @@ -1493,7 +1613,7 @@ pub struct Generics { } impl Clean for hir::Generics { - fn clean(&self, cx: &DocContext) -> Generics { + fn clean(&self, cx: &DocContext<'_>) -> Generics { // Synthetic type-parameters are inserted after normal ones. // In order for normal parameters to be able to refer to synthetic ones, // scans them first. @@ -1515,6 +1635,7 @@ impl Clean for hir::Generics { GenericParamDefKind::Type { did, ref bounds, .. } => { cx.impl_trait_bounds.borrow_mut().insert(did, bounds.clone()); } + GenericParamDefKind::Const { .. } => unreachable!(), } param }) @@ -1548,6 +1669,7 @@ impl Clean for hir::Generics { break } } + GenericParamDefKind::Const { .. } => {} } } } @@ -1561,7 +1683,7 @@ impl Clean for hir::Generics { impl<'a, 'tcx> Clean for (&'a ty::Generics, &'a Lrc>) { - fn clean(&self, cx: &DocContext) -> Generics { + fn clean(&self, cx: &DocContext<'_>) -> Generics { use self::WherePredicate as WP; let (gens, preds) = *self; @@ -1578,10 +1700,13 @@ impl<'a, 'tcx> Clean for (&'a ty::Generics, } Some(param.clean(cx)) } + ty::GenericParamDefKind::Const { .. } => { + unimplemented!() // FIXME(const_generics) + } }).collect::>(); let mut where_predicates = preds.predicates.iter() - .map(|(p, _)| p.clean(cx)) + .flat_map(|(p, _)| p.clean(cx)) .collect::>(); // Type parameters and have a Sized bound by default unless removed with @@ -1627,6 +1752,7 @@ impl<'a, 'tcx> Clean for (&'a ty::Generics, .flat_map(|param| match param.kind { ty::GenericParamDefKind::Lifetime => Some(param.clean(cx)), ty::GenericParamDefKind::Type { .. } => None, + ty::GenericParamDefKind::Const { .. } => Some(param.clean(cx)), }).chain(simplify::ty_params(stripped_typarams).into_iter()) .collect(), where_predicates: simplify::where_clauses(cx, where_predicates), @@ -1634,22 +1760,142 @@ impl<'a, 'tcx> Clean for (&'a ty::Generics, } } +/// The point of this function is to replace bounds with types. +/// +/// i.e. `[T, U]` when you have the following bounds: `T: Display, U: Option` will return +/// `[Display, Option]` (we just returns the list of the types, we don't care about the +/// wrapped types in here). +fn get_real_types( + generics: &Generics, + arg: &Type, + cx: &DocContext<'_>, + recurse: i32, +) -> FxHashSet { + let arg_s = arg.to_string(); + let mut res = FxHashSet::default(); + if recurse >= 10 { // FIXME: remove this whole recurse thing when the recursion bug is fixed + return res; + } + if arg.is_full_generic() { + if let Some(where_pred) = generics.where_predicates.iter().find(|g| { + match g { + &WherePredicate::BoundPredicate { ref ty, .. } => ty.def_id() == arg.def_id(), + _ => false, + } + }) { + let bounds = where_pred.get_bounds().unwrap_or_else(|| &[]); + for bound in bounds.iter() { + match *bound { + GenericBound::TraitBound(ref poly_trait, _) => { + for x in poly_trait.generic_params.iter() { + if !x.is_type() { + continue + } + if let Some(ty) = x.get_type(cx) { + let adds = get_real_types(generics, &ty, cx, recurse + 1); + if !adds.is_empty() { + res.extend(adds); + } else if !ty.is_full_generic() { + res.insert(ty); + } + } + } + } + _ => {} + } + } + } + if let Some(bound) = generics.params.iter().find(|g| { + g.is_type() && g.name == arg_s + }) { + for bound in bound.get_bounds().unwrap_or_else(|| &[]) { + if let Some(ty) = bound.get_trait_type() { + let adds = get_real_types(generics, &ty, cx, recurse + 1); + if !adds.is_empty() { + res.extend(adds); + } else if !ty.is_full_generic() { + res.insert(ty.clone()); + } + } + } + } + } else { + res.insert(arg.clone()); + if let Some(gens) = arg.generics() { + for gen in gens.iter() { + if gen.is_full_generic() { + let adds = get_real_types(generics, gen, cx, recurse + 1); + if !adds.is_empty() { + res.extend(adds); + } + } else { + res.insert(gen.clone()); + } + } + } + } + res +} + +/// Return the full list of types when bounds have been resolved. +/// +/// i.e. `fn foo>(x: u32, y: B)` will return +/// `[u32, Display, Option]`. +pub fn get_all_types( + generics: &Generics, + decl: &FnDecl, + cx: &DocContext<'_>, +) -> (Vec, Vec) { + let mut all_types = FxHashSet::default(); + for arg in decl.inputs.values.iter() { + if arg.type_.is_self_type() { + continue; + } + let args = get_real_types(generics, &arg.type_, cx, 0); + if !args.is_empty() { + all_types.extend(args); + } else { + all_types.insert(arg.type_.clone()); + } + } + + let ret_types = match decl.output { + FunctionRetTy::Return(ref return_type) => { + let mut ret = get_real_types(generics, &return_type, cx, 0); + if ret.is_empty() { + ret.insert(return_type.clone()); + } + ret.into_iter().collect() + } + _ => Vec::new(), + }; + (all_types.into_iter().collect(), ret_types) +} + #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct Method { pub generics: Generics, pub decl: FnDecl, pub header: hir::FnHeader, + pub defaultness: Option, + pub all_types: Vec, + pub ret_types: Vec, } -impl<'a> Clean for (&'a hir::MethodSig, &'a hir::Generics, hir::BodyId) { - fn clean(&self, cx: &DocContext) -> Method { +impl<'a> Clean for (&'a hir::MethodSig, &'a hir::Generics, hir::BodyId, + Option) { + fn clean(&self, cx: &DocContext<'_>) -> Method { let (generics, decl) = enter_impl_trait(cx, || { (self.1.clean(cx), (&*self.0.decl, self.2).clean(cx)) }); + let (all_types, ret_types) = get_all_types(&generics, &decl, cx); Method { decl, generics, header: self.0.header, + defaultness: self.3, + all_types, + ret_types, } } } @@ -1659,6 +1905,8 @@ pub struct TyMethod { pub header: hir::FnHeader, pub decl: FnDecl, pub generics: Generics, + pub all_types: Vec, + pub ret_types: Vec, } #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] @@ -1666,14 +1914,23 @@ pub struct Function { pub decl: FnDecl, pub generics: Generics, pub header: hir::FnHeader, + pub all_types: Vec, + pub ret_types: Vec, } impl Clean for doctree::Function { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { let (generics, decl) = enter_impl_trait(cx, || { (self.generics.clean(cx), (&self.decl, self.body).clean(cx)) }); + let did = cx.tcx.hir().local_def_id_from_hir_id(self.id); + let constness = if cx.tcx.is_min_const_fn(did) { + hir::Constness::Const + } else { + hir::Constness::NotConst + }; + let (all_types, ret_types) = get_all_types(&generics, &decl, cx); Item { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), @@ -1681,11 +1938,13 @@ impl Clean for doctree::Function { visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id), + def_id: did, inner: FunctionItem(Function { decl, generics, - header: self.header, + header: hir::FnHeader { constness, ..self.header }, + all_types, + ret_types, }), } } @@ -1695,7 +1954,6 @@ impl Clean for doctree::Function { pub struct FnDecl { pub inputs: Arguments, pub output: FunctionRetTy, - pub variadic: bool, pub attrs: Attributes, } @@ -1703,6 +1961,30 @@ impl FnDecl { pub fn self_type(&self) -> Option { self.inputs.values.get(0).and_then(|v| v.to_self()) } + + /// Returns the sugared return type for an async function. + /// + /// For example, if the return type is `impl std::future::Future`, this function + /// will return `i32`. + /// + /// # Panics + /// + /// This function will panic if the return type does not match the expected sugaring for async + /// functions. + pub fn sugared_async_return_type(&self) -> FunctionRetTy { + match &self.output { + FunctionRetTy::Return(Type::ImplTrait(bounds)) => { + match &bounds[0] { + GenericBound::TraitBound(PolyTrait { trait_, .. }, ..) => { + let bindings = trait_.bindings().unwrap(); + FunctionRetTy::Return(bindings[0].ty.clone()) + } + _ => panic!("unexpected desugaring of async function"), + } + } + _ => panic!("unexpected desugaring of async function"), + } + } } #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Debug, Hash)] @@ -1711,7 +1993,7 @@ pub struct Arguments { } impl<'a> Clean for (&'a [hir::Ty], &'a [ast::Ident]) { - fn clean(&self, cx: &DocContext) -> Arguments { + fn clean(&self, cx: &DocContext<'_>) -> Arguments { Arguments { values: self.0.iter().enumerate().map(|(i, ty)| { let mut name = self.1.get(i).map(|ident| ident.to_string()) @@ -1729,7 +2011,7 @@ impl<'a> Clean for (&'a [hir::Ty], &'a [ast::Ident]) { } impl<'a> Clean for (&'a [hir::Ty], hir::BodyId) { - fn clean(&self, cx: &DocContext) -> Arguments { + fn clean(&self, cx: &DocContext<'_>) -> Arguments { let body = cx.tcx.hir().body(self.1); Arguments { @@ -1746,20 +2028,19 @@ impl<'a> Clean for (&'a [hir::Ty], hir::BodyId) { impl<'a, A: Copy> Clean for (&'a hir::FnDecl, A) where (&'a [hir::Ty], A): Clean { - fn clean(&self, cx: &DocContext) -> FnDecl { + fn clean(&self, cx: &DocContext<'_>) -> FnDecl { FnDecl { inputs: (&self.0.inputs[..], self.1).clean(cx), output: self.0.output.clean(cx), - variadic: self.0.variadic, - attrs: Attributes::default() + attrs: Attributes::default(), } } } impl<'a, 'tcx> Clean for (DefId, ty::PolyFnSig<'tcx>) { - fn clean(&self, cx: &DocContext) -> FnDecl { + fn clean(&self, cx: &DocContext<'_>) -> FnDecl { let (did, sig) = *self; - let mut names = if cx.tcx.hir().as_local_node_id(did).is_some() { + let mut names = if cx.tcx.hir().as_local_hir_id(did).is_some() { vec![].into_iter() } else { cx.tcx.fn_arg_names(did).into_iter() @@ -1768,7 +2049,6 @@ impl<'a, 'tcx> Clean for (DefId, ty::PolyFnSig<'tcx>) { FnDecl { output: Return(sig.skip_binder().output().clean(cx)), attrs: Attributes::default(), - variadic: sig.skip_binder().variadic, inputs: Arguments { values: sig.skip_binder().inputs().iter().map(|t| { Argument { @@ -1818,7 +2098,7 @@ pub enum FunctionRetTy { } impl Clean for hir::FunctionRetTy { - fn clean(&self, cx: &DocContext) -> FunctionRetTy { + fn clean(&self, cx: &DocContext<'_>) -> FunctionRetTy { match *self { hir::Return(ref typ) => Return(typ.clean(cx)), hir::DefaultReturn(..) => DefaultReturn, @@ -1847,14 +2127,14 @@ pub struct Trait { } impl Clean for doctree::Trait { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { let attrs = self.attrs.clean(cx); let is_spotlight = attrs.has_doc_flag("spotlight"); Item { name: Some(self.name.clean(cx)), attrs: attrs, source: self.whence.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id), + def_id: cx.tcx.hir().local_def_id_from_hir_id(self.id), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), @@ -1864,15 +2144,40 @@ impl Clean for doctree::Trait { items: self.items.clean(cx), generics: self.generics.clean(cx), bounds: self.bounds.clean(cx), - is_spotlight: is_spotlight, + is_spotlight, is_auto: self.is_auto.clean(cx), }), } } } +#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +pub struct TraitAlias { + pub generics: Generics, + pub bounds: Vec, +} + +impl Clean for doctree::TraitAlias { + fn clean(&self, cx: &DocContext<'_>) -> Item { + let attrs = self.attrs.clean(cx); + Item { + name: Some(self.name.clean(cx)), + attrs, + source: self.whence.clean(cx), + def_id: cx.tcx.hir().local_def_id_from_hir_id(self.id), + visibility: self.vis.clean(cx), + stability: self.stab.clean(cx), + deprecation: self.depr.clean(cx), + inner: TraitAliasItem(TraitAlias { + generics: self.generics.clean(cx), + bounds: self.bounds.clean(cx), + }), + } + } +} + impl Clean for hir::IsAuto { - fn clean(&self, _: &DocContext) -> bool { + fn clean(&self, _: &DocContext<'_>) -> bool { match *self { hir::IsAuto::Yes => true, hir::IsAuto::No => false, @@ -1881,13 +2186,13 @@ impl Clean for hir::IsAuto { } impl Clean for hir::TraitRef { - fn clean(&self, cx: &DocContext) -> Type { - resolve_type(cx, self.path.clean(cx), self.ref_id) + fn clean(&self, cx: &DocContext<'_>) -> Type { + resolve_type(cx, self.path.clean(cx), self.hir_ref_id) } } impl Clean for hir::PolyTraitRef { - fn clean(&self, cx: &DocContext) -> PolyTrait { + fn clean(&self, cx: &DocContext<'_>) -> PolyTrait { PolyTrait { trait_: self.trait_ref.clean(cx), generic_params: self.bound_generic_params.clean(cx) @@ -1896,51 +2201,55 @@ impl Clean for hir::PolyTraitRef { } impl Clean for hir::TraitItem { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { let inner = match self.node { hir::TraitItemKind::Const(ref ty, default) => { AssociatedConstItem(ty.clean(cx), default.map(|e| print_const_expr(cx, e))) } hir::TraitItemKind::Method(ref sig, hir::TraitMethod::Provided(body)) => { - MethodItem((sig, &self.generics, body).clean(cx)) + MethodItem((sig, &self.generics, body, None).clean(cx)) } hir::TraitItemKind::Method(ref sig, hir::TraitMethod::Required(ref names)) => { let (generics, decl) = enter_impl_trait(cx, || { (self.generics.clean(cx), (&*sig.decl, &names[..]).clean(cx)) }); + let (all_types, ret_types) = get_all_types(&generics, &decl, cx); TyMethodItem(TyMethod { header: sig.header, decl, generics, + all_types, + ret_types, }) } hir::TraitItemKind::Type(ref bounds, ref default) => { AssociatedTypeItem(bounds.clean(cx), default.clean(cx)) } }; + let local_did = cx.tcx.hir().local_def_id_from_hir_id(self.hir_id); Item { name: Some(self.ident.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.span.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id), + def_id: local_did, visibility: None, - stability: get_stability(cx, cx.tcx.hir().local_def_id(self.id)), - deprecation: get_deprecation(cx, cx.tcx.hir().local_def_id(self.id)), + stability: get_stability(cx, local_did), + deprecation: get_deprecation(cx, local_did), inner, } } } impl Clean for hir::ImplItem { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { let inner = match self.node { hir::ImplItemKind::Const(ref ty, expr) => { AssociatedConstItem(ty.clean(cx), Some(print_const_expr(cx, expr))) } hir::ImplItemKind::Method(ref sig, body) => { - MethodItem((sig, &self.generics, body).clean(cx)) + MethodItem((sig, &self.generics, body, Some(self.defaultness)).clean(cx)) } hir::ImplItemKind::Type(ref ty) => TypedefItem(Typedef { type_: ty.clean(cx), @@ -1951,21 +2260,22 @@ impl Clean for hir::ImplItem { generics: Generics::default(), }, true), }; + let local_did = cx.tcx.hir().local_def_id_from_hir_id(self.hir_id); Item { name: Some(self.ident.name.clean(cx)), source: self.span.clean(cx), attrs: self.attrs.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id), + def_id: local_did, visibility: self.vis.clean(cx), - stability: get_stability(cx, cx.tcx.hir().local_def_id(self.id)), - deprecation: get_deprecation(cx, cx.tcx.hir().local_def_id(self.id)), + stability: get_stability(cx, local_did), + deprecation: get_deprecation(cx, local_did), inner, } } } impl<'tcx> Clean for ty::AssociatedItem { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { let inner = match self.kind { ty::AssociatedKind::Const => { let ty = cx.tcx.type_of(self.def_id); @@ -1978,7 +2288,7 @@ impl<'tcx> Clean for ty::AssociatedItem { } ty::AssociatedKind::Method => { let generics = (cx.tcx.generics_of(self.def_id), - &cx.tcx.predicates_of(self.def_id)).clean(cx); + &cx.tcx.explicit_predicates_of(self.def_id)).clean(cx); let sig = cx.tcx.fn_sig(self.def_id); let mut decl = (self.def_id, sig).clean(cx); @@ -2008,8 +2318,9 @@ impl<'tcx> Clean for ty::AssociatedItem { ty::ImplContainer(_) => true, ty::TraitContainer(_) => self.defaultness.has_value() }; + let (all_types, ret_types) = get_all_types(&generics, &decl, cx); if provided { - let constness = if cx.tcx.is_const_fn(self.def_id) { + let constness = if cx.tcx.is_min_const_fn(self.def_id) { hir::Constness::Const } else { hir::Constness::NotConst @@ -2022,7 +2333,10 @@ impl<'tcx> Clean for ty::AssociatedItem { abi: sig.abi(), constness, asyncness: hir::IsAsync::NotAsync, - } + }, + defaultness: Some(self.defaultness), + all_types, + ret_types, }) } else { TyMethodItem(TyMethod { @@ -2033,7 +2347,9 @@ impl<'tcx> Clean for ty::AssociatedItem { abi: sig.abi(), constness: hir::Constness::NotConst, asyncness: hir::IsAsync::NotAsync, - } + }, + all_types, + ret_types, }) } } @@ -2045,7 +2361,7 @@ impl<'tcx> Clean for ty::AssociatedItem { // are actually located on the trait/impl itself, so we need to load // all of the generics from there and then look for bounds that are // applied to this associated type in question. - let predicates = cx.tcx.predicates_of(did); + let predicates = cx.tcx.explicit_predicates_of(did); let generics = (cx.tcx.generics_of(did), &predicates).clean(cx); let mut bounds = generics.where_predicates.iter().filter_map(|pred| { let (name, self_type, trait_, bounds) = match *pred { @@ -2126,12 +2442,12 @@ pub struct PolyTrait { /// it does not preserve mutability or boxes. #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Debug, Hash)] pub enum Type { - /// structs/enums/traits (most that'd be an hir::TyKind::Path) + /// Structs/enums/traits (most that'd be an `hir::TyKind::Path`). ResolvedPath { path: Path, - typarams: Option>, + param_names: Option>, did: DefId, - /// true if is a `T::Name` path for associated types + /// `true` if is a `T::Name` path for associated types. is_generic: bool, }, /// For parameterized types, so the consumer of the JSON don't go @@ -2146,6 +2462,7 @@ pub enum Type { Slice(Box), Array(Box, String), Never, + CVarArgs, Unique(Box), RawPointer(Mutability, Box), BorrowedRef { @@ -2184,6 +2501,7 @@ pub enum PrimitiveType { Reference, Fn, Never, + CVarArgs, } #[derive(Clone, RustcEncodable, RustcDecodable, Copy, Debug)] @@ -2202,6 +2520,7 @@ pub enum TypeKind { Macro, Attr, Derive, + TraitAlias, } pub trait GetDefId { @@ -2247,12 +2566,15 @@ impl Type { } } - pub fn generics(&self) -> Option<&[Type]> { + pub fn generics(&self) -> Option> { match *self { ResolvedPath { ref path, .. } => { path.segments.last().and_then(|seg| { - if let GenericArgs::AngleBracketed { ref types, .. } = seg.args { - Some(&**types) + if let GenericArgs::AngleBracketed { ref args, .. } = seg.args { + Some(args.iter().filter_map(|arg| match arg { + GenericArg::Type(ty) => Some(ty.clone()), + _ => None, + }).collect()) } else { None } @@ -2261,13 +2583,35 @@ impl Type { _ => None, } } + + pub fn bindings(&self) -> Option<&[TypeBinding]> { + match *self { + ResolvedPath { ref path, .. } => { + path.segments.last().and_then(|seg| { + if let GenericArgs::AngleBracketed { ref bindings, .. } = seg.args { + Some(&**bindings) + } else { + None + } + }) + } + _ => None + } + } + + pub fn is_full_generic(&self) -> bool { + match *self { + Type::Generic(_) => true, + _ => false, + } + } } impl GetDefId for Type { fn def_id(&self) -> Option { match *self { ResolvedPath { did, .. } => Some(did), - Primitive(p) => ::html::render::cache().primitive_locations.get(&p).cloned(), + Primitive(p) => crate::html::render::cache().primitive_locations.get(&p).cloned(), BorrowedRef { type_: box Generic(..), .. } => Primitive(PrimitiveType::Reference).def_id(), BorrowedRef { ref type_, .. } => type_.def_id(), @@ -2347,6 +2691,7 @@ impl PrimitiveType { Reference => "reference", Fn => "fn", Never => "never", + CVarArgs => "...", } } @@ -2391,11 +2736,12 @@ impl From for PrimitiveType { } impl Clean for hir::Ty { - fn clean(&self, cx: &DocContext) -> Type { + fn clean(&self, cx: &DocContext<'_>) -> Type { use rustc::hir::*; match self.node { TyKind::Never => Never, + TyKind::CVarArgs(_) => CVarArgs, TyKind::Ptr(ref m) => RawPointer(m.mutbl.clean(cx), box m.ty.clean(cx)), TyKind::Rptr(ref l, ref m) => { let lifetime = if l.is_elided() { @@ -2408,22 +2754,22 @@ impl Clean for hir::Ty { } TyKind::Slice(ref ty) => Slice(box ty.clean(cx)), TyKind::Array(ref ty, ref length) => { - let def_id = cx.tcx.hir().local_def_id(length.id); + let def_id = cx.tcx.hir().local_def_id_from_hir_id(length.hir_id); let param_env = cx.tcx.param_env(def_id); - let substs = Substs::identity_for_item(cx.tcx, def_id); + let substs = InternalSubsts::identity_for_item(cx.tcx, def_id); let cid = GlobalId { instance: ty::Instance::new(def_id, substs), promoted: None }; - let length = cx.tcx.const_eval(param_env.and(cid)).unwrap_or_else(|_| { - ty::Const::unevaluated(cx.tcx, def_id, substs, cx.tcx.types.usize) - }); - let length = print_const(cx, length); + let length = match cx.tcx.const_eval(param_env.and(cid)) { + Ok(length) => print_const(cx, length), + Err(_) => "_".to_string(), + }; Array(box ty.clean(cx), length) }, TyKind::Tup(ref tys) => Tuple(tys.clean(cx)), TyKind::Def(item_id, _) => { - let item = cx.tcx.hir().expect_item(item_id.id); + let item = cx.tcx.hir().expect_item_by_hir_id(item_id.id); if let hir::ItemKind::Existential(ref ty) = item.node { ImplTrait(ty.bounds.clean(cx)) } else { @@ -2444,9 +2790,9 @@ impl Clean for hir::Ty { let mut alias = None; if let Def::TyAlias(def_id) = path.def { // Substitute private type aliases - if let Some(node_id) = cx.tcx.hir().as_local_node_id(def_id) { + if let Some(hir_id) = cx.tcx.hir().as_local_hir_id(def_id) { if !cx.renderinfo.borrow().access_levels.is_exported(def_id) { - alias = Some(&cx.tcx.hir().expect_item(node_id).node); + alias = Some(&cx.tcx.hir().expect_item_by_hir_id(hir_id).node); } } }; @@ -2455,6 +2801,7 @@ impl Clean for hir::Ty { let provided_params = &path.segments.last().expect("segments were empty"); let mut ty_substs = FxHashMap::default(); let mut lt_substs = FxHashMap::default(); + let mut const_substs = FxHashMap::default(); provided_params.with_generic_args(|generic_args| { let mut indices: GenericParamCount = Default::default(); for param in generics.params.iter() { @@ -2463,7 +2810,7 @@ impl Clean for hir::Ty { let mut j = 0; let lifetime = generic_args.args.iter().find_map(|arg| { match arg { - GenericArg::Lifetime(lt) => { + hir::GenericArg::Lifetime(lt) => { if indices.lifetimes == j { return Some(lt); } @@ -2476,7 +2823,7 @@ impl Clean for hir::Ty { if let Some(lt) = lifetime.cloned() { if !lt.is_elided() { let lt_def_id = - cx.tcx.hir().local_def_id(param.id); + cx.tcx.hir().local_def_id_from_hir_id(param.hir_id); lt_substs.insert(lt_def_id, lt.clean(cx)); } } @@ -2484,11 +2831,12 @@ impl Clean for hir::Ty { } hir::GenericParamKind::Type { ref default, .. } => { let ty_param_def = - Def::TyParam(cx.tcx.hir().local_def_id(param.id)); + Def::TyParam( + cx.tcx.hir().local_def_id_from_hir_id(param.hir_id)); let mut j = 0; let type_ = generic_args.args.iter().find_map(|arg| { match arg { - GenericArg::Type(ty) => { + hir::GenericArg::Type(ty) => { if indices.types == j { return Some(ty); } @@ -2506,12 +2854,35 @@ impl Clean for hir::Ty { } indices.types += 1; } + hir::GenericParamKind::Const { .. } => { + let const_param_def = + Def::ConstParam( + cx.tcx.hir().local_def_id_from_hir_id(param.hir_id)); + let mut j = 0; + let const_ = generic_args.args.iter().find_map(|arg| { + match arg { + hir::GenericArg::Const(ct) => { + if indices.consts == j { + return Some(ct); + } + j += 1; + None + } + _ => None, + } + }); + if let Some(ct) = const_.cloned() { + const_substs.insert(const_param_def, ct.clean(cx)); + } + // FIXME(const_generics:defaults) + indices.consts += 1; + } } } }); - return cx.enter_alias(ty_substs, lt_substs, || ty.clean(cx)); + return cx.enter_alias(ty_substs, lt_substs, const_substs, || ty.clean(cx)); } - resolve_type(cx, path.clean(cx), self.id) + resolve_type(cx, path.clean(cx), self.hir_id) } TyKind::Path(hir::QPath::Resolved(Some(ref qself), ref p)) => { let mut segments: Vec<_> = p.segments.clone().into(); @@ -2524,7 +2895,7 @@ impl Clean for hir::Ty { Type::QPath { name: p.segments.last().expect("segments were empty").ident.name.clean(cx), self_type: box qself.clean(cx), - trait_: box resolve_type(cx, trait_path.clean(cx), self.id) + trait_: box resolve_type(cx, trait_path.clean(cx), self.hir_id) } } TyKind::Path(hir::QPath::TypeRelative(ref qself, ref segment)) => { @@ -2541,12 +2912,12 @@ impl Clean for hir::Ty { Type::QPath { name: segment.ident.name.clean(cx), self_type: box qself.clean(cx), - trait_: box resolve_type(cx, trait_path.clean(cx), self.id) + trait_: box resolve_type(cx, trait_path.clean(cx), self.hir_id) } } TyKind::TraitObject(ref bounds, ref lifetime) => { match bounds[0].clean(cx).trait_ { - ResolvedPath { path, typarams: None, did, is_generic } => { + ResolvedPath { path, param_names: None, did, is_generic } => { let mut bounds: Vec = bounds[1..].iter().map(|bound| { self::GenericBound::TraitBound(bound.clean(cx), hir::TraitBoundModifier::None) @@ -2554,7 +2925,7 @@ impl Clean for hir::Ty { if !lifetime.is_elided() { bounds.push(self::GenericBound::Outlives(lifetime.clean(cx))); } - ResolvedPath { path, typarams: Some(bounds), did, is_generic, } + ResolvedPath { path, param_names: Some(bounds), did, is_generic, } } _ => Infer // shouldn't happen } @@ -2567,7 +2938,7 @@ impl Clean for hir::Ty { } impl<'tcx> Clean for Ty<'tcx> { - fn clean(&self, cx: &DocContext) -> Type { + fn clean(&self, cx: &DocContext<'_>) -> Type { match self.sty { ty::Never => Never, ty::Bool => Primitive(PrimitiveType::Bool), @@ -2578,7 +2949,7 @@ impl<'tcx> Clean for Ty<'tcx> { ty::Str => Primitive(PrimitiveType::Str), ty::Slice(ty) => Slice(box ty.clean(cx)), ty::Array(ty, n) => { - let mut n = cx.tcx.lift(&n).expect("array lift failed"); + let mut n = *cx.tcx.lift(&n).expect("array lift failed"); if let ConstValue::Unevaluated(def_id, substs) = n.val { let param_env = cx.tcx.param_env(def_id); let cid = GlobalId { @@ -2621,7 +2992,7 @@ impl<'tcx> Clean for Ty<'tcx> { None, false, vec![], substs); ResolvedPath { path, - typarams: None, + param_names: None, did, is_generic: false, } @@ -2629,22 +3000,33 @@ impl<'tcx> Clean for Ty<'tcx> { ty::Foreign(did) => { inline::record_extern_fqn(cx, did, TypeKind::Foreign); let path = external_path(cx, &cx.tcx.item_name(did).as_str(), - None, false, vec![], Substs::empty()); + None, false, vec![], InternalSubsts::empty()); ResolvedPath { path: path, - typarams: None, + param_names: None, did: did, is_generic: false, } } ty::Dynamic(ref obj, ref reg) => { - let principal = obj.principal(); - let did = principal.def_id(); + // HACK: pick the first `did` as the `did` of the trait object. Someone + // might want to implement "native" support for marker-trait-only + // trait objects. + let mut dids = obj.principal_def_id().into_iter().chain(obj.auto_traits()); + let did = dids.next().unwrap_or_else(|| { + panic!("found trait object `{:?}` with no traits?", self) + }); + let substs = match obj.principal() { + Some(principal) => principal.skip_binder().substs, + // marker traits have no substs. + _ => cx.tcx.intern_substs(&[]) + }; + inline::record_extern_fqn(cx, did, TypeKind::Trait); - let mut typarams = vec![]; - reg.clean(cx).map(|b| typarams.push(GenericBound::Outlives(b))); - for did in obj.auto_traits() { + let mut param_names = vec![]; + reg.clean(cx).map(|b| param_names.push(GenericBound::Outlives(b))); + for did in dids { let empty = cx.tcx.intern_substs(&[]); let path = external_path(cx, &cx.tcx.item_name(did).as_str(), Some(did), false, vec![], empty); @@ -2652,13 +3034,13 @@ impl<'tcx> Clean for Ty<'tcx> { let bound = GenericBound::TraitBound(PolyTrait { trait_: ResolvedPath { path, - typarams: None, + param_names: None, did, is_generic: false, }, generic_params: Vec::new(), }, hir::TraitBoundModifier::None); - typarams.push(bound); + param_names.push(bound); } let mut bindings = vec![]; @@ -2670,10 +3052,10 @@ impl<'tcx> Clean for Ty<'tcx> { } let path = external_path(cx, &cx.tcx.item_name(did).as_str(), Some(did), - false, bindings, principal.skip_binder().substs); + false, bindings, substs); ResolvedPath { path, - typarams: Some(typarams), + param_names: Some(param_names), did, is_generic: false, } @@ -2687,7 +3069,7 @@ impl<'tcx> Clean for Ty<'tcx> { ty::Opaque(def_id, substs) => { // Grab the "TraitA + TraitB" from `impl TraitA + TraitB`, // by looking up the projections associated with the def_id. - let predicates_of = cx.tcx.predicates_of(def_id); + let predicates_of = cx.tcx.explicit_predicates_of(def_id); let substs = cx.tcx.lift(&substs).expect("Opaque lift failed"); let bounds = predicates_of.instantiate(cx.tcx, substs); let mut regions = vec![]; @@ -2750,23 +3132,34 @@ impl<'tcx> Clean for Ty<'tcx> { } } +impl<'tcx> Clean for ty::Const<'tcx> { + fn clean(&self, cx: &DocContext<'_>) -> Constant { + Constant { + type_: self.ty.clean(cx), + expr: format!("{:?}", self.val), // FIXME(const_generics) + } + } +} + impl Clean for hir::StructField { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { + let local_did = cx.tcx.hir().local_def_id_from_hir_id(self.hir_id); + Item { name: Some(self.ident.name).clean(cx), attrs: self.attrs.clean(cx), source: self.span.clean(cx), visibility: self.vis.clean(cx), - stability: get_stability(cx, cx.tcx.hir().local_def_id(self.id)), - deprecation: get_deprecation(cx, cx.tcx.hir().local_def_id(self.id)), - def_id: cx.tcx.hir().local_def_id(self.id), + stability: get_stability(cx, local_did), + deprecation: get_deprecation(cx, local_did), + def_id: local_did, inner: StructFieldItem(self.ty.clean(cx)), } } } impl<'tcx> Clean for ty::FieldDef { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { Item { name: Some(self.ident.name).clean(cx), attrs: cx.tcx.get_attrs(self.did).clean(cx), @@ -2789,7 +3182,7 @@ pub enum Visibility { } impl Clean> for hir::Visibility { - fn clean(&self, cx: &DocContext) -> Option { + fn clean(&self, cx: &DocContext<'_>) -> Option { Some(match self.node { hir::VisibilityKind::Public => Visibility::Public, hir::VisibilityKind::Inherited => Visibility::Inherited, @@ -2804,7 +3197,7 @@ impl Clean> for hir::Visibility { } impl Clean> for ty::Visibility { - fn clean(&self, _: &DocContext) -> Option { + fn clean(&self, _: &DocContext<'_>) -> Option { Some(if *self == ty::Visibility::Public { Public } else { Inherited }) } } @@ -2826,12 +3219,12 @@ pub struct Union { } impl Clean for doctree::Struct { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { Item { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id), + def_id: cx.tcx.hir().local_def_id_from_hir_id(self.id), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), @@ -2846,12 +3239,12 @@ impl Clean for doctree::Struct { } impl Clean for doctree::Union { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { Item { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id), + def_id: cx.tcx.hir().local_def_id_from_hir_id(self.id), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), @@ -2876,7 +3269,7 @@ pub struct VariantStruct { } impl Clean for ::rustc::hir::VariantData { - fn clean(&self, cx: &DocContext) -> VariantStruct { + fn clean(&self, cx: &DocContext<'_>) -> VariantStruct { VariantStruct { struct_type: doctree::struct_type_from_def(self), fields: self.fields().iter().map(|x| x.clean(cx)).collect(), @@ -2893,12 +3286,12 @@ pub struct Enum { } impl Clean for doctree::Enum { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { Item { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id), + def_id: cx.tcx.hir().local_def_id_from_hir_id(self.id), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), @@ -2917,7 +3310,7 @@ pub struct Variant { } impl Clean for doctree::Variant { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { Item { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), @@ -2925,7 +3318,7 @@ impl Clean for doctree::Variant { visibility: None, stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.def.id()), + def_id: cx.tcx.hir().local_def_id_from_hir_id(self.id), inner: VariantItem(Variant { kind: self.def.clean(cx), }), @@ -2934,7 +3327,7 @@ impl Clean for doctree::Variant { } impl<'tcx> Clean for ty::VariantDef { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { let kind = match self.ctor_kind { CtorKind::Const => VariantKind::CLike, CtorKind::Fn => { @@ -2962,14 +3355,14 @@ impl<'tcx> Clean for ty::VariantDef { } }; Item { - name: Some(self.name.clean(cx)), - attrs: inline::load_attrs(cx, self.did), - source: cx.tcx.def_span(self.did).clean(cx), + name: Some(self.ident.clean(cx)), + attrs: inline::load_attrs(cx, self.def_id), + source: cx.tcx.def_span(self.def_id).clean(cx), visibility: Some(Inherited), - def_id: self.did, + def_id: self.def_id, inner: VariantItem(Variant { kind }), - stability: get_stability(cx, self.did), - deprecation: get_deprecation(cx, self.did), + stability: get_stability(cx, self.def_id), + deprecation: get_deprecation(cx, self.def_id), } } } @@ -2982,13 +3375,12 @@ pub enum VariantKind { } impl Clean for hir::VariantData { - fn clean(&self, cx: &DocContext) -> VariantKind { - if self.is_struct() { - VariantKind::Struct(self.clean(cx)) - } else if self.is_unit() { - VariantKind::CLike - } else { - VariantKind::Tuple(self.fields().iter().map(|x| x.ty.clean(cx)).collect()) + fn clean(&self, cx: &DocContext<'_>) -> VariantKind { + match self { + hir::VariantData::Struct(..) => VariantKind::Struct(self.clean(cx)), + hir::VariantData::Tuple(..) => + VariantKind::Tuple(self.fields().iter().map(|x| x.ty.clean(cx)).collect()), + hir::VariantData::Unit(..) => VariantKind::CLike, } } } @@ -3013,7 +3405,7 @@ impl Span { } impl Clean for syntax_pos::Span { - fn clean(&self, cx: &DocContext) -> Span { + fn clean(&self, cx: &DocContext<'_>) -> Span { if self.is_dummy() { return Span::empty(); } @@ -3046,7 +3438,7 @@ impl Path { } impl Clean for hir::Path { - fn clean(&self, cx: &DocContext) -> Path { + fn clean(&self, cx: &DocContext<'_>) -> Path { Path { global: self.is_global(), def: self.def, @@ -3055,11 +3447,27 @@ impl Clean for hir::Path { } } +#[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Debug, Hash)] +pub enum GenericArg { + Lifetime(Lifetime), + Type(Type), + Const(Constant), +} + +impl fmt::Display for GenericArg { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + GenericArg::Lifetime(lt) => lt.fmt(f), + GenericArg::Type(ty) => ty.fmt(f), + GenericArg::Const(ct) => ct.fmt(f), + } + } +} + #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Eq, Debug, Hash)] pub enum GenericArgs { AngleBracketed { - lifetimes: Vec, - types: Vec, + args: Vec, bindings: Vec, }, Parenthesized { @@ -3069,7 +3477,7 @@ pub enum GenericArgs { } impl Clean for hir::GenericArgs { - fn clean(&self, cx: &DocContext) -> GenericArgs { + fn clean(&self, cx: &DocContext<'_>) -> GenericArgs { if self.parenthesized { let output = self.bindings[0].ty.clean(cx); GenericArgs::Parenthesized { @@ -3077,24 +3485,19 @@ impl Clean for hir::GenericArgs { output: if output != Type::Tuple(Vec::new()) { Some(output) } else { None } } } else { - let (mut lifetimes, mut types) = (vec![], vec![]); - let mut elided_lifetimes = true; - for arg in &self.args { - match arg { - GenericArg::Lifetime(lt) => { - if !lt.is_elided() { - elided_lifetimes = false; - } - lifetimes.push(lt.clean(cx)); - } - GenericArg::Type(ty) => { - types.push(ty.clean(cx)); - } - } - } + let elide_lifetimes = self.args.iter().all(|arg| match arg { + hir::GenericArg::Lifetime(lt) => lt.is_elided(), + _ => true, + }); GenericArgs::AngleBracketed { - lifetimes: if elided_lifetimes { vec![] } else { lifetimes }, - types, + args: self.args.iter().filter_map(|arg| match arg { + hir::GenericArg::Lifetime(lt) if !elide_lifetimes => { + Some(GenericArg::Lifetime(lt.clean(cx))) + } + hir::GenericArg::Lifetime(_) => None, + hir::GenericArg::Type(ty) => Some(GenericArg::Type(ty.clean(cx))), + hir::GenericArg::Const(ct) => Some(GenericArg::Const(ct.clean(cx))), + }).collect(), bindings: self.bindings.clean(cx), } } @@ -3108,7 +3511,7 @@ pub struct PathSegment { } impl Clean for hir::PathSegment { - fn clean(&self, cx: &DocContext) -> PathSegment { + fn clean(&self, cx: &DocContext<'_>) -> PathSegment { PathSegment { name: self.ident.name.clean(cx), args: self.with_generic_args(|generic_args| generic_args.clean(cx)) @@ -3118,8 +3521,8 @@ impl Clean for hir::PathSegment { fn strip_type(ty: Type) -> Type { match ty { - Type::ResolvedPath { path, typarams, did, is_generic } => { - Type::ResolvedPath { path: strip_path(&path), typarams, did, is_generic } + Type::ResolvedPath { path, param_names, did, is_generic } => { + Type::ResolvedPath { path: strip_path(&path), param_names, did, is_generic } } Type::Tuple(inner_tys) => { Type::Tuple(inner_tys.iter().map(|t| strip_type(t.clone())).collect()) @@ -3146,9 +3549,8 @@ fn strip_path(path: &Path) -> Path { PathSegment { name: s.name.clone(), args: GenericArgs::AngleBracketed { - lifetimes: Vec::new(), - types: Vec::new(), - bindings: Vec::new(), + args: vec![], + bindings: vec![], } } }).collect(); @@ -3178,14 +3580,23 @@ fn qpath_to_string(p: &hir::QPath) -> String { s } +impl Clean for Ident { + #[inline] + fn clean(&self, cx: &DocContext<'_>) -> String { + self.name.clean(cx) + } +} + impl Clean for ast::Name { - fn clean(&self, _: &DocContext) -> String { + #[inline] + fn clean(&self, _: &DocContext<'_>) -> String { self.to_string() } } impl Clean for InternedString { - fn clean(&self, _: &DocContext) -> String { + #[inline] + fn clean(&self, _: &DocContext<'_>) -> String { self.to_string() } } @@ -3197,12 +3608,12 @@ pub struct Typedef { } impl Clean for doctree::Typedef { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { Item { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id.clone()), + def_id: cx.tcx.hir().local_def_id_from_hir_id(self.id), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), @@ -3221,12 +3632,12 @@ pub struct Existential { } impl Clean for doctree::Existential { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { Item { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id.clone()), + def_id: cx.tcx.hir().local_def_id_from_hir_id(self.id), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), @@ -3247,7 +3658,7 @@ pub struct BareFunctionDecl { } impl Clean for hir::BareFnTy { - fn clean(&self, cx: &DocContext) -> BareFunctionDecl { + fn clean(&self, cx: &DocContext<'_>) -> BareFunctionDecl { let (generic_params, decl) = enter_impl_trait(cx, || { (self.generic_params.clean(cx), (&*self.decl, &self.arg_names[..]).clean(cx)) }); @@ -3271,13 +3682,13 @@ pub struct Static { } impl Clean for doctree::Static { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { debug!("cleaning static {}: {:?}", self.name.clean(cx), self); Item { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id), + def_id: cx.tcx.hir().local_def_id_from_hir_id(self.id), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), @@ -3290,19 +3701,19 @@ impl Clean for doctree::Static { } } -#[derive(Clone, RustcEncodable, RustcDecodable, Debug)] +#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug)] pub struct Constant { pub type_: Type, pub expr: String, } impl Clean for doctree::Constant { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { Item { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id), + def_id: cx.tcx.hir().local_def_id_from_hir_id(self.id), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), @@ -3321,7 +3732,7 @@ pub enum Mutability { } impl Clean for hir::Mutability { - fn clean(&self, _: &DocContext) -> Mutability { + fn clean(&self, _: &DocContext<'_>) -> Mutability { match self { &hir::MutMutable => Mutable, &hir::MutImmutable => Immutable, @@ -3336,7 +3747,7 @@ pub enum ImplPolarity { } impl Clean for hir::ImplPolarity { - fn clean(&self, _: &DocContext) -> ImplPolarity { + fn clean(&self, _: &DocContext<'_>) -> ImplPolarity { match self { &hir::ImplPolarity::Positive => ImplPolarity::Positive, &hir::ImplPolarity::Negative => ImplPolarity::Negative, @@ -3357,30 +3768,44 @@ pub struct Impl { pub blanket_impl: Option, } -pub fn get_auto_traits_with_node_id(cx: &DocContext, id: ast::NodeId, name: String) -> Vec { +pub fn get_auto_traits_with_hir_id( + cx: &DocContext<'_>, + id: hir::HirId, + name: String +) -> Vec { let finder = AutoTraitFinder::new(cx); - finder.get_with_node_id(id, name) + finder.get_with_hir_id(id, name) } -pub fn get_auto_traits_with_def_id(cx: &DocContext, id: DefId) -> Vec { +pub fn get_auto_traits_with_def_id( + cx: &DocContext<'_>, + id: DefId +) -> Vec { let finder = AutoTraitFinder::new(cx); finder.get_with_def_id(id) } -pub fn get_blanket_impls_with_node_id(cx: &DocContext, id: ast::NodeId, name: String) -> Vec { +pub fn get_blanket_impls_with_hir_id( + cx: &DocContext<'_>, + id: hir::HirId, + name: String +) -> Vec { let finder = BlanketImplFinder::new(cx); - finder.get_with_node_id(id, name) + finder.get_with_hir_id(id, name) } -pub fn get_blanket_impls_with_def_id(cx: &DocContext, id: DefId) -> Vec { +pub fn get_blanket_impls_with_def_id( + cx: &DocContext<'_>, + id: DefId +) -> Vec { let finder = BlanketImplFinder::new(cx); finder.get_with_def_id(id) } impl Clean> for doctree::Impl { - fn clean(&self, cx: &DocContext) -> Vec { + fn clean(&self, cx: &DocContext<'_>) -> Vec { let mut ret = Vec::new(); let trait_ = self.trait_.clean(cx); let items = self.items.clean(cx); @@ -3402,7 +3827,7 @@ impl Clean> for doctree::Impl { name: None, attrs: self.attrs.clean(cx), source: self.whence.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id), + def_id: cx.tcx.hir().local_def_id_from_hir_id(self.id), visibility: self.vis.clean(cx), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), @@ -3422,7 +3847,7 @@ impl Clean> for doctree::Impl { } } -fn build_deref_target_impls(cx: &DocContext, +fn build_deref_target_impls(cx: &DocContext<'_>, items: &[Item], ret: &mut Vec) { use self::PrimitiveType::*; @@ -3470,6 +3895,7 @@ fn build_deref_target_impls(cx: &DocContext, Reference => None, Fn => None, Never => None, + CVarArgs => tcx.lang_items().va_list(), }; if let Some(did) = did { if !did.is_local() { @@ -3479,9 +3905,30 @@ fn build_deref_target_impls(cx: &DocContext, } } -impl Clean for doctree::ExternCrate { - fn clean(&self, cx: &DocContext) -> Item { - Item { +impl Clean> for doctree::ExternCrate { + fn clean(&self, cx: &DocContext<'_>) -> Vec { + + let please_inline = self.vis.node.is_pub() && self.attrs.iter().any(|a| { + a.check_name("doc") && match a.meta_item_list() { + Some(l) => attr::list_contains_name(&l, "inline"), + None => false, + } + }); + + if please_inline { + let mut visited = FxHashSet::default(); + + let def = Def::Mod(DefId { + krate: self.cnum, + index: CRATE_DEF_INDEX, + }); + + if let Some(items) = inline::try_inline(cx, def, self.name, &mut visited) { + return items; + } + } + + vec![Item { name: None, attrs: self.attrs.clean(cx), source: self.whence.clean(cx), @@ -3490,18 +3937,18 @@ impl Clean for doctree::ExternCrate { stability: None, deprecation: None, inner: ExternCrateItem(self.name.clean(cx), self.path.clone()) - } + }] } } impl Clean> for doctree::Import { - fn clean(&self, cx: &DocContext) -> Vec { + fn clean(&self, cx: &DocContext<'_>) -> Vec { // We consider inlining the documentation of `pub use` statements, but we // forcefully don't inline if this is not public or if the // #[doc(no_inline)] attribute is present. // Don't inline doc(hidden) imports so they can be stripped at a later stage. let mut denied = !self.vis.node.is_pub() || self.attrs.iter().any(|a| { - a.name() == "doc" && match a.meta_item_list() { + a.check_name("doc") && match a.meta_item_list() { Some(l) => attr::list_contains_name(&l, "no_inline") || attr::list_contains_name(&l, "hidden"), None => false, @@ -3569,7 +4016,7 @@ pub struct ImportSource { } impl Clean> for hir::ForeignMod { - fn clean(&self, cx: &DocContext) -> Vec { + fn clean(&self, cx: &DocContext<'_>) -> Vec { let mut items = self.items.clean(cx); for item in &mut items { if let ForeignFunctionItem(ref mut f) = item.inner { @@ -3581,12 +4028,13 @@ impl Clean> for hir::ForeignMod { } impl Clean for hir::ForeignItem { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { let inner = match self.node { hir::ForeignItemKind::Fn(ref decl, ref names, ref generics) => { let (generics, decl) = enter_impl_trait(cx, || { (generics.clean(cx), (&**decl, &names[..]).clean(cx)) }); + let (all_types, ret_types) = get_all_types(&generics, &decl, cx); ForeignFunctionItem(Function { decl, generics, @@ -3596,6 +4044,8 @@ impl Clean for hir::ForeignItem { constness: hir::Constness::NotConst, asyncness: hir::IsAsync::NotAsync, }, + all_types, + ret_types, }) } hir::ForeignItemKind::Static(ref ty, mutbl) => { @@ -3610,14 +4060,16 @@ impl Clean for hir::ForeignItem { } }; + let local_did = cx.tcx.hir().local_def_id_from_hir_id(self.hir_id); + Item { - name: Some(self.name.clean(cx)), + name: Some(self.ident.clean(cx)), attrs: self.attrs.clean(cx), source: self.span.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id), + def_id: local_did, visibility: self.vis.clean(cx), - stability: get_stability(cx, cx.tcx.hir().local_def_id(self.id)), - deprecation: get_deprecation(cx, cx.tcx.hir().local_def_id(self.id)), + stability: get_stability(cx, local_did), + deprecation: get_deprecation(cx, local_did), inner, } } @@ -3626,11 +4078,11 @@ impl Clean for hir::ForeignItem { // Utilities pub trait ToSource { - fn to_src(&self, cx: &DocContext) -> String; + fn to_src(&self, cx: &DocContext<'_>) -> String; } impl ToSource for syntax_pos::Span { - fn to_src(&self, cx: &DocContext) -> String { + fn to_src(&self, cx: &DocContext<'_>) -> String { debug!("converting span {:?} to snippet", self.clean(cx)); let sn = match cx.sess().source_map().span_to_snippet(*self) { Ok(x) => x, @@ -3677,11 +4129,11 @@ fn name_from_pat(p: &hir::Pat) -> String { } } -fn print_const(cx: &DocContext, n: &ty::Const) -> String { +fn print_const(cx: &DocContext<'_>, n: ty::Const<'_>) -> String { match n.val { ConstValue::Unevaluated(def_id, _) => { - if let Some(node_id) = cx.tcx.hir().as_local_node_id(def_id) { - print_const_expr(cx, cx.tcx.hir().body_owned_by(node_id)) + if let Some(hir_id) = cx.tcx.hir().as_local_hir_id(def_id) { + print_const_expr(cx, cx.tcx.hir().body_owned_by(hir_id)) } else { inline::print_inlined_const(cx, def_id) } @@ -3699,15 +4151,15 @@ fn print_const(cx: &DocContext, n: &ty::Const) -> String { } } -fn print_const_expr(cx: &DocContext, body: hir::BodyId) -> String { - cx.tcx.hir().node_to_pretty_string(body.node_id) +fn print_const_expr(cx: &DocContext<'_>, body: hir::BodyId) -> String { + cx.tcx.hir().hir_to_pretty_string(body.hir_id) } /// Given a type Path, resolve it to a Type using the TyCtxt -fn resolve_type(cx: &DocContext, +fn resolve_type(cx: &DocContext<'_>, path: Path, - id: ast::NodeId) -> Type { - if id == ast::DUMMY_NODE_ID { + id: hir::HirId) -> Type { + if id == hir::DUMMY_HIR_ID { debug!("resolve_type({:?})", path); } else { debug!("resolve_type({:?},{:?})", path, id); @@ -3732,10 +4184,10 @@ fn resolve_type(cx: &DocContext, _ => false, }; let did = register_def(&*cx, path.def); - ResolvedPath { path: path, typarams: None, did: did, is_generic: is_generic } + ResolvedPath { path: path, param_names: None, did: did, is_generic: is_generic } } -pub fn register_def(cx: &DocContext, def: Def) -> DefId { +pub fn register_def(cx: &DocContext<'_>, def: Def) -> DefId { debug!("register_def({:?})", def); let (did, kind) = match def { @@ -3749,7 +4201,7 @@ pub fn register_def(cx: &DocContext, def: Def) -> DefId { Def::ForeignTy(i) => (i, TypeKind::Foreign), Def::Const(i) => (i, TypeKind::Const), Def::Static(i, _) => (i, TypeKind::Static), - Def::Variant(i) => (cx.tcx.parent_def_id(i).expect("cannot get parent def id"), + Def::Variant(i) => (cx.tcx.parent(i).expect("cannot get parent def id"), TypeKind::Enum), Def::Macro(i, mac_kind) => match mac_kind { MacroKind::Bang => (i, TypeKind::Macro), @@ -3757,10 +4209,9 @@ pub fn register_def(cx: &DocContext, def: Def) -> DefId { MacroKind::Derive => (i, TypeKind::Derive), MacroKind::ProcMacroStub => unreachable!(), }, + Def::TraitAlias(i) => (i, TypeKind::TraitAlias), Def::SelfTy(Some(def_id), _) => (def_id, TypeKind::Trait), - Def::SelfTy(_, Some(impl_def_id)) => { - return impl_def_id - } + Def::SelfTy(_, Some(impl_def_id)) => return impl_def_id, _ => return def.def_id() }; if did.is_local() { return did } @@ -3771,9 +4222,9 @@ pub fn register_def(cx: &DocContext, def: Def) -> DefId { did } -fn resolve_use_source(cx: &DocContext, path: Path) -> ImportSource { +fn resolve_use_source(cx: &DocContext<'_>, path: Path) -> ImportSource { ImportSource { - did: if path.def == Def::Err { + did: if path.def.opt_def_id().is_none() { None } else { Some(register_def(cx, path.def)) @@ -3789,7 +4240,7 @@ pub struct Macro { } impl Clean for doctree::Macro { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { let name = self.name.clean(cx); Item { name: Some(name.clone()), @@ -3818,7 +4269,7 @@ pub struct ProcMacro { } impl Clean for doctree::ProcMacro { - fn clean(&self, cx: &DocContext) -> Item { + fn clean(&self, cx: &DocContext<'_>) -> Item { Item { name: Some(self.name.clean(cx)), attrs: self.attrs.clean(cx), @@ -3826,7 +4277,7 @@ impl Clean for doctree::ProcMacro { visibility: Some(Public), stability: self.stab.clean(cx), deprecation: self.depr.clean(cx), - def_id: cx.tcx.hir().local_def_id(self.id), + def_id: cx.tcx.hir().local_def_id_from_hir_id(self.id), inner: ProcMacroItem(ProcMacro { kind: self.kind, helpers: self.helpers.clean(cx), @@ -3838,40 +4289,37 @@ impl Clean for doctree::ProcMacro { #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct Stability { pub level: stability::StabilityLevel, - pub feature: String, + pub feature: Option, pub since: String, - pub deprecated_since: String, - pub deprecated_reason: String, - pub unstable_reason: String, - pub issue: Option + pub deprecation: Option, + pub unstable_reason: Option, + pub issue: Option, } #[derive(Clone, RustcEncodable, RustcDecodable, Debug)] pub struct Deprecation { - pub since: String, - pub note: String, + pub since: Option, + pub note: Option, } impl Clean for attr::Stability { - fn clean(&self, _: &DocContext) -> Stability { + fn clean(&self, _: &DocContext<'_>) -> Stability { Stability { level: stability::StabilityLevel::from_attr_level(&self.level), - feature: self.feature.to_string(), + feature: Some(self.feature.to_string()).filter(|f| !f.is_empty()), since: match self.level { attr::Stable {ref since} => since.to_string(), _ => String::new(), }, - deprecated_since: match self.rustc_depr { - Some(attr::RustcDeprecation {ref since, ..}) => since.to_string(), - _=> String::new(), - }, - deprecated_reason: match self.rustc_depr { - Some(ref depr) => depr.reason.to_string(), - _ => String::new(), - }, + deprecation: self.rustc_depr.as_ref().map(|d| { + Deprecation { + note: Some(d.reason.to_string()).filter(|r| !r.is_empty()), + since: Some(d.since.to_string()).filter(|d| !d.is_empty()), + } + }), unstable_reason: match self.level { - attr::Unstable { reason: Some(ref reason), .. } => reason.to_string(), - _ => String::new(), + attr::Unstable { reason: Some(ref reason), .. } => Some(reason.to_string()), + _ => None, }, issue: match self.level { attr::Unstable {issue, ..} => Some(issue), @@ -3882,21 +4330,21 @@ impl Clean for attr::Stability { } impl<'a> Clean for &'a attr::Stability { - fn clean(&self, dc: &DocContext) -> Stability { + fn clean(&self, dc: &DocContext<'_>) -> Stability { (**self).clean(dc) } } impl Clean for attr::Deprecation { - fn clean(&self, _: &DocContext) -> Deprecation { + fn clean(&self, _: &DocContext<'_>) -> Deprecation { Deprecation { - since: self.since.as_ref().map_or(String::new(), |s| s.to_string()), - note: self.note.as_ref().map_or(String::new(), |s| s.to_string()), + since: self.since.map(|s| s.to_string()).filter(|s| !s.is_empty()), + note: self.note.map(|n| n.to_string()).filter(|n| !n.is_empty()), } } } -/// An equality constraint on an associated type, e.g., `A=Bar` in `Foo` +/// An equality constraint on an associated type, e.g., `A = Bar` in `Foo` #[derive(Clone, PartialEq, Eq, RustcDecodable, RustcEncodable, Debug, Hash)] pub struct TypeBinding { pub name: String, @@ -3904,7 +4352,7 @@ pub struct TypeBinding { } impl Clean for hir::TypeBinding { - fn clean(&self, cx: &DocContext) -> TypeBinding { + fn clean(&self, cx: &DocContext<'_>) -> TypeBinding { TypeBinding { name: self.ident.name.clean(cx), ty: self.ty.clean(cx) @@ -3912,7 +4360,11 @@ impl Clean for hir::TypeBinding { } } -pub fn def_id_to_path(cx: &DocContext, did: DefId, name: Option) -> Vec { +pub fn def_id_to_path( + cx: &DocContext<'_>, + did: DefId, + name: Option +) -> Vec { let crate_name = name.unwrap_or_else(|| cx.tcx.crate_name(did.krate).to_string()); let relative = cx.tcx.def_path(did).data.into_iter().filter_map(|elem| { // extern blocks have an empty name @@ -3926,7 +4378,7 @@ pub fn def_id_to_path(cx: &DocContext, did: DefId, name: Option) -> Vec< once(crate_name).chain(relative).collect() } -pub fn enter_impl_trait(cx: &DocContext, f: F) -> R +pub fn enter_impl_trait(cx: &DocContext<'_>, f: F) -> R where F: FnOnce() -> R, { @@ -3939,7 +4391,7 @@ where // Start of code copied from rust-clippy -pub fn path_to_def_local(tcx: &TyCtxt, path: &[&str]) -> Option { +pub fn path_to_def_local(tcx: &TyCtxt<'_, '_, '_>, path: &[&str]) -> Option { let krate = tcx.hir().krate(); let mut items = krate.module.item_ids.clone(); let mut path_it = path.iter().peekable(); @@ -3948,10 +4400,10 @@ pub fn path_to_def_local(tcx: &TyCtxt, path: &[&str]) -> Option { let segment = path_it.next()?; for item_id in mem::replace(&mut items, HirVec::new()).iter() { - let item = tcx.hir().expect_item(item_id.id); - if item.name == *segment { + let item = tcx.hir().expect_item_by_hir_id(item_id.id); + if item.ident.name == *segment { if path_it.peek().is_none() { - return Some(tcx.hir().local_def_id(item_id.id)) + return Some(tcx.hir().local_def_id_from_hir_id(item_id.id)) } items = match &item.node { @@ -3964,7 +4416,7 @@ pub fn path_to_def_local(tcx: &TyCtxt, path: &[&str]) -> Option { } } -pub fn path_to_def(tcx: &TyCtxt, path: &[&str]) -> Option { +pub fn path_to_def(tcx: &TyCtxt<'_, '_, '_>, path: &[&str]) -> Option { let crates = tcx.crates(); let krate = crates @@ -4001,34 +4453,115 @@ pub fn path_to_def(tcx: &TyCtxt, path: &[&str]) -> Option { } } -pub fn get_path_for_type(tcx: TyCtxt, def_id: DefId, def_ctor: F) -> hir::Path -where F: Fn(DefId) -> Def { - #[derive(Debug)] - struct AbsolutePathBuffer { - names: Vec, +pub fn get_path_for_type( + tcx: TyCtxt<'_, '_, '_>, + def_id: DefId, + def_ctor: impl Fn(DefId) -> Def, +) -> hir::Path { + use rustc::ty::print::Printer; + + struct AbsolutePathPrinter<'a, 'tcx> { + tcx: TyCtxt<'a, 'tcx, 'tcx>, } - impl ty::item_path::ItemPathBuffer for AbsolutePathBuffer { - fn root_mode(&self) -> &ty::item_path::RootMode { - const ABSOLUTE: &'static ty::item_path::RootMode = &ty::item_path::RootMode::Absolute; - ABSOLUTE + impl Printer<'tcx, 'tcx> for AbsolutePathPrinter<'_, 'tcx> { + type Error = !; + + type Path = Vec; + type Region = (); + type Type = (); + type DynExistential = (); + + fn tcx(&'a self) -> TyCtxt<'a, 'tcx, 'tcx> { + self.tcx } - fn push(&mut self, text: &str) { - self.names.push(text.to_owned()); + fn print_region( + self, + _region: ty::Region<'_>, + ) -> Result { + Ok(()) } - } - let mut apb = AbsolutePathBuffer { names: vec![] }; + fn print_type( + self, + _ty: Ty<'tcx>, + ) -> Result { + Ok(()) + } + + fn print_dyn_existential( + self, + _predicates: &'tcx ty::List>, + ) -> Result { + Ok(()) + } + + fn path_crate( + self, + cnum: CrateNum, + ) -> Result { + Ok(vec![self.tcx.original_crate_name(cnum).to_string()]) + } + fn path_qualified( + self, + self_ty: Ty<'tcx>, + trait_ref: Option>, + ) -> Result { + // This shouldn't ever be needed, but just in case: + Ok(vec![match trait_ref { + Some(trait_ref) => format!("{:?}", trait_ref), + None => format!("<{}>", self_ty), + }]) + } + + fn path_append_impl( + self, + print_prefix: impl FnOnce(Self) -> Result, + _disambiguated_data: &DisambiguatedDefPathData, + self_ty: Ty<'tcx>, + trait_ref: Option>, + ) -> Result { + let mut path = print_prefix(self)?; + + // This shouldn't ever be needed, but just in case: + path.push(match trait_ref { + Some(trait_ref) => { + format!("", trait_ref, self_ty) + } + None => format!("", self_ty), + }); + + Ok(path) + } + fn path_append( + self, + print_prefix: impl FnOnce(Self) -> Result, + disambiguated_data: &DisambiguatedDefPathData, + ) -> Result { + let mut path = print_prefix(self)?; + path.push(disambiguated_data.data.as_interned_str().to_string()); + Ok(path) + } + fn path_generic_args( + self, + print_prefix: impl FnOnce(Self) -> Result, + _args: &[Kind<'tcx>], + ) -> Result { + print_prefix(self) + } + } - tcx.push_item_path(&mut apb, def_id, false); + let names = AbsolutePathPrinter { tcx: tcx.global_tcx() } + .print_def_path(def_id, &[]) + .unwrap(); hir::Path { span: DUMMY_SP, def: def_ctor(def_id), - segments: hir::HirVec::from_vec(apb.names.iter().map(|s| hir::PathSegment { + segments: hir::HirVec::from_vec(names.iter().map(|s| hir::PathSegment { ident: ast::Ident::from_str(&s), - id: None, + hir_id: None, def: None, args: None, infer_types: false, @@ -4077,9 +4610,9 @@ impl From for SimpleBound { match bound.clone() { GenericBound::Outlives(l) => SimpleBound::Outlives(l), GenericBound::TraitBound(t, mod_) => match t.trait_ { - Type::ResolvedPath { path, typarams, .. } => { + Type::ResolvedPath { path, param_names, .. } => { SimpleBound::TraitBound(path.segments, - typarams + param_names .map_or_else(|| Vec::new(), |v| v.iter() .map(|p| SimpleBound::from(p.clone())) .collect()), diff --git a/src/librustdoc/clean/simplify.rs b/src/librustdoc/clean/simplify.rs index 81608b380d057..8ca570cb443c9 100644 --- a/src/librustdoc/clean/simplify.rs +++ b/src/librustdoc/clean/simplify.rs @@ -1,21 +1,11 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Simplification of where clauses and parameter bounds into a prettier and +//! Simplification of where-clauses and parameter bounds into a prettier and //! more canonical form. //! //! Currently all cross-crate-inlined function use `rustc::ty` to reconstruct //! the AST (e.g., see all of `clean::inline`), but this is not always a -//! non-lossy transformation. The current format of storage for where clauses +//! non-lossy transformation. The current format of storage for where-clauses //! for functions and such is simply a list of predicates. One example of this -//! is that the AST predicate of: `where T: Trait` is encoded as: +//! is that the AST predicate of: `where T: Trait` is encoded as: //! `where T: Trait, ::Foo = Bar`. //! //! This module attempts to reconstruct the original where and/or parameter @@ -27,12 +17,12 @@ use std::collections::BTreeMap; use rustc::hir::def_id::DefId; use rustc::ty; -use clean::GenericArgs as PP; -use clean::WherePredicate as WP; -use clean; -use core::DocContext; +use crate::clean::GenericArgs as PP; +use crate::clean::WherePredicate as WP; +use crate::clean; +use crate::core::DocContext; -pub fn where_clauses(cx: &DocContext, clauses: Vec) -> Vec { +pub fn where_clauses(cx: &DocContext<'_>, clauses: Vec) -> Vec { // First, partition the where clause into its separate components let mut params: BTreeMap<_, Vec<_>> = BTreeMap::new(); let mut lifetimes = Vec::new(); @@ -151,7 +141,7 @@ fn ty_bounds(bounds: Vec) -> Vec { bounds } -fn trait_is_same_or_supertrait(cx: &DocContext, child: DefId, +fn trait_is_same_or_supertrait(cx: &DocContext<'_>, child: DefId, trait_: DefId) -> bool { if child == trait_ { return true diff --git a/src/librustdoc/config.rs b/src/librustdoc/config.rs index b421f07ddafa2..f2682e00430d0 100644 --- a/src/librustdoc/config.rs +++ b/src/librustdoc/config.rs @@ -1,13 +1,3 @@ -// Copyright 2018 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::collections::{BTreeMap, BTreeSet}; use std::fmt; use std::path::PathBuf; @@ -25,14 +15,14 @@ use rustc_driver; use rustc_target::spec::TargetTriple; use syntax::edition::Edition; -use core::new_handler; -use externalfiles::ExternalHtml; -use html; -use html::markdown::IdMap; -use html::static_files; -use opts; -use passes::{self, DefaultPassOption}; -use theme; +use crate::core::new_handler; +use crate::externalfiles::ExternalHtml; +use crate::html; +use crate::html::{static_files}; +use crate::html::markdown::{IdMap}; +use crate::opts; +use crate::passes::{self, DefaultPassOption}; +use crate::theme; /// Configuration options for rustdoc. #[derive(Clone)] @@ -78,6 +68,9 @@ pub struct Options { pub should_test: bool, /// List of arguments to pass to the test harness, if running tests. pub test_args: Vec, + /// Optional path to persist the doctest executables to, defaults to a + /// temporary directory if not set. + pub persist_doctests: Option, // Options that affect the documentation process @@ -92,6 +85,9 @@ pub struct Options { /// Whether to display warnings during doc generation or while gathering doctests. By default, /// all non-rustdoc-specific lints are allowed when generating docs. pub display_warnings: bool, + /// Whether to run the `calculate-doc-coverage` pass, which counts the number of public items + /// with and without documentation. + pub show_coverage: bool, // Options that alter generated documentation pages @@ -102,11 +98,11 @@ pub struct Options { } impl fmt::Debug for Options { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { struct FmtExterns<'a>(&'a Externs); impl<'a> fmt::Debug for FmtExterns<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map() .entries(self.0.iter()) .finish() @@ -131,9 +127,11 @@ impl fmt::Debug for Options { .field("lint_cap", &self.lint_cap) .field("should_test", &self.should_test) .field("test_args", &self.test_args) + .field("persist_doctests", &self.persist_doctests) .field("default_passes", &self.default_passes) .field("manual_passes", &self.manual_passes) .field("display_warnings", &self.display_warnings) + .field("show_coverage", &self.show_coverage) .field("crate_version", &self.crate_version) .field("render_options", &self.render_options) .finish() @@ -156,9 +154,9 @@ pub struct RenderOptions { pub playground_url: Option, /// Whether to sort modules alphabetically on a module page instead of using declaration order. /// `true` by default. - /// - /// FIXME(misdreavus): the flag name is `--sort-modules-by-appearance` but the meaning is - /// inverted once read + // + // FIXME(misdreavus): the flag name is `--sort-modules-by-appearance` but the meaning is + // inverted once read. pub sort_modules_alphabetically: bool, /// List of themes to extend the docs with. Original argument name is included to assist in /// displaying errors if it fails a theme check. @@ -171,9 +169,9 @@ pub struct RenderOptions { pub resource_suffix: String, /// Whether to run the static CSS/JavaScript through a minifier when outputting them. `true` by /// default. - /// - /// FIXME(misdreavus): the flag name is `--disable-minification` but the meaning is inverted - /// once read + // + // FIXME(misdreavus): the flag name is `--disable-minification` but the meaning is inverted + // once read. pub enable_minification: bool, /// Whether to create an index page in the root of the output directory. If this is true but /// `enable_index_page` is None, generate a static listing of crates instead. @@ -181,6 +179,9 @@ pub struct RenderOptions { /// A file to use as the index page at the root of the output directory. Overrides /// `enable_index_page` to be true if set. pub index_page: Option, + /// An optional path to use as the location of static files. If not set, uses combinations of + /// `../` to reach the documentation root. + pub static_root_path: Option, // Options specific to reading standalone Markdown files @@ -192,17 +193,22 @@ pub struct RenderOptions { /// If present, playground URL to use in the "Run" button added to code samples generated from /// standalone Markdown files. If not present, `playground_url` is used. pub markdown_playground_url: Option, + /// If false, the `select` element to have search filtering by crates on rendered docs + /// won't be generated. + pub generate_search_filter: bool, + /// Option (disabled by default) to generate files used by RLS and some other tools. + pub generate_redirect_pages: bool, } impl Options { /// Parses the given command-line for options. If an error message or other early-return has /// been printed, returns `Err` with the exit code. - pub fn from_matches(matches: &getopts::Matches) -> Result { + pub fn from_matches(matches: &getopts::Matches) -> Result { // Check for unstable options. nightly_options::check_nightly_options(&matches, &opts()); if matches.opt_present("h") || matches.opt_present("help") { - ::usage("rustdoc"); + crate::usage("rustdoc"); return Err(0); } else if matches.opt_present("version") { rustc_driver::version("rustdoc", &matches); @@ -212,7 +218,7 @@ impl Options { if matches.opt_strs("passes") == ["list"] { println!("Available passes for running rustdoc:"); for pass in passes::PASSES { - println!("{:>20} - {}", pass.name(), pass.description()); + println!("{:>20} - {}", pass.name, pass.description); } println!("\nDefault passes for rustdoc:"); for &name in passes::DEFAULT_PASSES { @@ -222,6 +228,18 @@ impl Options { for &name in passes::DEFAULT_PRIVATE_PASSES { println!("{:>20}", name); } + + if nightly_options::is_nightly_build() { + println!("\nPasses run with `--show-coverage`:"); + for &name in passes::DEFAULT_COVERAGE_PASSES { + println!("{:>20}", name); + } + println!("\nPasses run with `--show-coverage --document-private-items`:"); + for &name in passes::PRIVATE_COVERAGE_PASSES { + println!("{:>20}", name); + } + } + return Err(0); } @@ -411,9 +429,16 @@ impl Options { } }); + let show_coverage = matches.opt_present("show-coverage"); + let document_private = matches.opt_present("document-private-items"); + let default_passes = if matches.opt_present("no-defaults") { passes::DefaultPassOption::None - } else if matches.opt_present("document-private-items") { + } else if show_coverage && document_private { + passes::DefaultPassOption::PrivateCoverage + } else if show_coverage { + passes::DefaultPassOption::Coverage + } else if document_private { passes::DefaultPassOption::Private } else { passes::DefaultPassOption::Default @@ -433,6 +458,10 @@ impl Options { let markdown_playground_url = matches.opt_str("markdown-playground-url"); let crate_version = matches.opt_str("crate-version"); let enable_index_page = matches.opt_present("enable-index-page") || index_page.is_some(); + let static_root_path = matches.opt_str("static-root-path"); + let generate_search_filter = !matches.opt_present("disable-per-crate-search"); + let persist_doctests = matches.opt_str("persist-doctests").map(PathBuf::from); + let generate_redirect_pages = matches.opt_present("generate-redirect-pages"); let (lint_opts, describe_lints, lint_cap) = get_cmd_lint_options(matches, error_format); @@ -457,7 +486,9 @@ impl Options { default_passes, manual_passes, display_warnings, + show_coverage, crate_version, + persist_doctests, render_options: RenderOptions { output, external_html, @@ -471,14 +502,17 @@ impl Options { enable_minification, enable_index_page, index_page, + static_root_path, markdown_no_toc, markdown_css, markdown_playground_url, + generate_search_filter, + generate_redirect_pages, } }) } - /// Returns whether the file given as `self.input` is a Markdown file. + /// Returns `true` if the file given as `self.input` is a Markdown file. pub fn markdown_input(&self) -> bool { self.input.extension() .map_or(false, |e| e == "md" || e == "markdown") diff --git a/src/librustdoc/core.rs b/src/librustdoc/core.rs index b85342f631181..3cf6b32b07c4c 100644 --- a/src/librustdoc/core.rs +++ b/src/librustdoc/core.rs @@ -1,32 +1,22 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use rustc_lint; -use rustc_driver::{self, driver, target_features, abort_on_err}; use rustc::session::{self, config}; use rustc::hir::def_id::{DefId, DefIndex, DefIndexAddressSpace, CrateNum, LOCAL_CRATE}; use rustc::hir::def::Def; -use rustc::hir::{self, HirVec}; +use rustc::hir::{self, HirId, HirVec}; use rustc::middle::cstore::CrateStore; use rustc::middle::privacy::AccessLevels; -use rustc::ty::{self, TyCtxt, AllArenas}; -use rustc::hir::map as hir_map; +use rustc::ty::{self, TyCtxt}; use rustc::lint::{self, LintPass}; use rustc::session::config::ErrorOutputType; +use rustc::session::DiagnosticOutput; use rustc::util::nodemap::{FxHashMap, FxHashSet}; +use rustc_interface::interface; +use rustc_driver::abort_on_err; use rustc_resolve as resolve; -use rustc_metadata::creader::CrateLoader; use rustc_metadata::cstore::CStore; use rustc_target::spec::TargetTriple; -use syntax::ast::{self, Ident, NodeId}; +use syntax::ast::{self, Ident}; use syntax::source_map; use syntax::feature_gate::UnstableFeatures; use syntax::json::JsonEmitter; @@ -40,30 +30,29 @@ use parking_lot::ReentrantMutex; use std::cell::RefCell; use std::mem; use rustc_data_structures::sync::{self, Lrc}; -use std::rc::Rc; use std::sync::Arc; +use std::rc::Rc; -use visit_ast::RustdocVisitor; -use config::{Options as RustdocOptions, RenderOptions}; -use clean; -use clean::{get_path_for_type, Clean, MAX_DEF_ID, AttributesExt}; -use html::render::RenderInfo; -use passes; +use crate::visit_ast::RustdocVisitor; +use crate::config::{Options as RustdocOptions, RenderOptions}; +use crate::clean; +use crate::clean::{get_path_for_type, Clean, MAX_DEF_ID, AttributesExt}; +use crate::html::render::RenderInfo; + +use crate::passes; pub use rustc::session::config::{Input, Options, CodegenOptions}; pub use rustc::session::search_paths::SearchPath; pub type ExternalPaths = FxHashMap, clean::TypeKind)>; -pub struct DocContext<'a, 'tcx: 'a, 'rcx: 'a, 'cstore: 'rcx> { - pub tcx: TyCtxt<'a, 'tcx, 'tcx>, - pub resolver: &'a RefCell>, +pub struct DocContext<'tcx> { + + pub tcx: TyCtxt<'tcx, 'tcx, 'tcx>, + pub resolver: Rc>>, /// The stack of module NodeIds up till this point pub crate_name: Option, - pub cstore: Rc, - // Note that external items for which `doc(hidden)` applies to are shown as - // non-reachable while local items aren't. This is because we're reusing - // the access levels from crateanalysis. + pub cstore: Lrc, /// Later on moved into `html::render::CACHE_KEY` pub renderinfo: RefCell, /// Later on moved through `clean::Crate` into `html::render::CACHE_KEY` @@ -76,8 +65,10 @@ pub struct DocContext<'a, 'tcx: 'a, 'rcx: 'a, 'cstore: 'rcx> { /// Table type parameter definition -> substituted type pub ty_substs: RefCell>, - /// Table node id of lifetime parameter definition -> substituted lifetime + /// Table `NodeId` of lifetime parameter definition -> substituted lifetime pub lt_substs: RefCell>, + /// Table node id of const parameter definition -> substituted const + pub ct_substs: RefCell>, /// Table DefId of `impl Trait` in argument position -> bounds pub impl_trait_bounds: RefCell>>, pub send_trait: Option, @@ -88,24 +79,35 @@ pub struct DocContext<'a, 'tcx: 'a, 'rcx: 'a, 'cstore: 'rcx> { pub all_traits: Vec, } -impl<'a, 'tcx, 'rcx, 'cstore> DocContext<'a, 'tcx, 'rcx, 'cstore> { +impl<'tcx> DocContext<'tcx> { pub fn sess(&self) -> &session::Session { &self.tcx.sess } + pub fn enter_resolver(&self, f: F) -> R + where F: FnOnce(&mut resolve::Resolver<'_>) -> R { + let resolver = &*self.resolver; + let resolver = resolver.as_ref().unwrap(); + resolver.borrow_mut().access(f) + } + /// Call the closure with the given parameters set as /// the substitutions for a type alias' RHS. pub fn enter_alias(&self, ty_substs: FxHashMap, lt_substs: FxHashMap, + ct_substs: FxHashMap, f: F) -> R where F: FnOnce() -> R { - let (old_tys, old_lts) = - (mem::replace(&mut *self.ty_substs.borrow_mut(), ty_substs), - mem::replace(&mut *self.lt_substs.borrow_mut(), lt_substs)); + let (old_tys, old_lts, old_cts) = ( + mem::replace(&mut *self.ty_substs.borrow_mut(), ty_substs), + mem::replace(&mut *self.lt_substs.borrow_mut(), lt_substs), + mem::replace(&mut *self.ct_substs.borrow_mut(), ct_substs), + ); let r = f(); *self.ty_substs.borrow_mut() = old_tys; *self.lt_substs.borrow_mut() = old_lts; + *self.ct_substs.borrow_mut() = old_cts; r } @@ -164,7 +166,7 @@ impl<'a, 'tcx, 'rcx, 'cstore> DocContext<'a, 'tcx, 'rcx, 'cstore> { /// Like the function of the same name on the HIR map, but skips calling it on fake DefIds. /// (This avoids a slice-index-out-of-bounds panic.) - pub fn as_local_node_id(&self, def_id: DefId) -> Option { + pub fn as_local_node_id(&self, def_id: DefId) -> Option { if self.all_fake_def_ids.borrow().contains(&def_id) { None } else { @@ -172,6 +174,15 @@ impl<'a, 'tcx, 'rcx, 'cstore> DocContext<'a, 'tcx, 'rcx, 'cstore> { } } + // FIXME(@ljedrz): remove the NodeId variant + pub fn as_local_hir_id(&self, def_id: DefId) -> Option { + if self.all_fake_def_ids.borrow().contains(&def_id) { + None + } else { + self.tcx.hir().as_local_hir_id(def_id) + } + } + pub fn get_real_ty(&self, def_id: DefId, def_ctor: &F, @@ -198,7 +209,6 @@ impl<'a, 'tcx, 'rcx, 'cstore> DocContext<'a, 'tcx, 'rcx, 'cstore> { }; hir::Ty { - id: ast::DUMMY_NODE_ID, node: hir::TyKind::Path(hir::QPath::Resolved(None, P(new_path))), span: DUMMY_SP, hir_id: hir::DUMMY_HIR_ID, @@ -218,14 +228,25 @@ impl<'a, 'tcx, 'rcx, 'cstore> DocContext<'a, 'tcx, 'rcx, 'cstore> { }; args.push(hir::GenericArg::Lifetime(hir::Lifetime { - id: ast::DUMMY_NODE_ID, + hir_id: hir::DUMMY_HIR_ID, span: DUMMY_SP, name: hir::LifetimeName::Param(name), })); } - ty::GenericParamDefKind::Type {..} => { + ty::GenericParamDefKind::Type { .. } => { args.push(hir::GenericArg::Type(self.ty_param_to_ty(param.clone()))); } + ty::GenericParamDefKind::Const => { + args.push(hir::GenericArg::Const(hir::ConstArg { + value: hir::AnonConst { + hir_id: hir::DUMMY_HIR_ID, + body: hir::BodyId { + hir_id: hir::DUMMY_HIR_ID, + } + }, + span: DUMMY_SP, + })) + } } } @@ -239,7 +260,6 @@ impl<'a, 'tcx, 'rcx, 'cstore> DocContext<'a, 'tcx, 'rcx, 'cstore> { pub fn ty_param_to_ty(&self, param: ty::GenericParamDef) -> hir::Ty { debug!("ty_param_to_ty({:?}) {:?}", param, param.def_id); hir::Ty { - id: ast::DUMMY_NODE_ID, node: hir::TyKind::Path(hir::QPath::Resolved( None, P(hir::Path { @@ -272,7 +292,7 @@ impl DocAccessLevels for AccessLevels { /// will be created for the handler. pub fn new_handler(error_format: ErrorOutputType, source_map: Option>, - treat_err_as_bug: bool, + treat_err_as_bug: Option, ui_testing: bool, ) -> errors::Handler { // rustdoc doesn't override (or allow to override) anything from this that is relevant here, so @@ -363,19 +383,31 @@ pub fn run_core(options: RustdocOptions) -> (clean::Crate, RenderInfo, RenderOpt whitelisted_lints.extend(lint_opts.iter().map(|(lint, _)| lint).cloned()); - let lints = lint::builtin::HardwiredLints.get_lints() - .into_iter() - .chain(rustc_lint::SoftLints.get_lints().into_iter()) - .filter_map(|lint| { - if lint.name == warnings_lint_name || - lint.name == intra_link_resolution_failure_name { - None - } else { - Some((lint.name_lower(), lint::Allow)) - } - }) - .chain(lint_opts.into_iter()) - .collect::>(); + let lints = || { + lint::builtin::HardwiredLints + .get_lints() + .into_iter() + .chain(rustc_lint::SoftLints.get_lints().into_iter()) + }; + + let lint_opts = lints().filter_map(|lint| { + if lint.name == warnings_lint_name || + lint.name == intra_link_resolution_failure_name { + None + } else { + Some((lint.name_lower(), lint::Allow)) + } + }).chain(lint_opts.into_iter()).collect::>(); + + let lint_caps = lints().filter_map(|lint| { + // We don't want to whitelist *all* lints so let's + // ignore those ones. + if whitelisted_lints.iter().any(|l| &lint.name == l) { + None + } else { + Some((lint::LintId::of(lint), lint::Allow)) + } + }).collect(); let host_triple = TargetTriple::from_triple(config::host_triple()); // plays with error output here! @@ -384,7 +416,7 @@ pub fn run_core(options: RustdocOptions) -> (clean::Crate, RenderInfo, RenderOpt search_paths: libs, crate_types: vec![config::CrateType::Rlib], lint_opts: if !display_warnings { - lints + lint_opts } else { vec![] }, @@ -401,121 +433,47 @@ pub fn run_core(options: RustdocOptions) -> (clean::Crate, RenderInfo, RenderOpt describe_lints, ..Options::default() }; - driver::spawn_thread_pool(sessopts, move |sessopts| { - let source_map = Lrc::new(source_map::SourceMap::new(sessopts.file_path_mapping())); - let diagnostic_handler = new_handler(error_format, - Some(source_map.clone()), - debugging_options.treat_err_as_bug, - debugging_options.ui_testing); - - let mut sess = session::build_session_( - sessopts, cpath, diagnostic_handler, source_map, - ); - lint::builtin::HardwiredLints.get_lints() - .into_iter() - .chain(rustc_lint::SoftLints.get_lints().into_iter()) - .filter_map(|lint| { - // We don't want to whitelist *all* lints so let's - // ignore those ones. - if whitelisted_lints.iter().any(|l| &lint.name == l) { - None - } else { - Some(lint) - } - }) - .for_each(|l| { - sess.driver_lint_caps.insert(lint::LintId::of(l), - lint::Allow); - }); - - let codegen_backend = rustc_driver::get_codegen_backend(&sess); - let cstore = Rc::new(CStore::new(codegen_backend.metadata_loader())); - rustc_lint::register_builtins(&mut sess.lint_store.borrow_mut(), Some(&sess)); - - let mut cfg = config::build_configuration(&sess, config::parse_cfgspecs(cfgs)); - target_features::add_configuration(&mut cfg, &sess, &*codegen_backend); - sess.parse_sess.config = cfg; - - let control = &driver::CompileController::basic(); - - let krate = panictry!(driver::phase_1_parse_input(control, &sess, &input)); - - let name = match crate_name { - Some(ref crate_name) => crate_name.clone(), - None => ::rustc_codegen_utils::link::find_crate_name(Some(&sess), &krate.attrs, &input), - }; + let config = interface::Config { + opts: sessopts, + crate_cfg: config::parse_cfgspecs(cfgs), + input, + input_path: cpath, + output_file: None, + output_dir: None, + file_loader: None, + diagnostic_output: DiagnosticOutput::Default, + stderr: None, + crate_name: crate_name.clone(), + lint_caps, + }; + + interface::run_compiler_in_existing_thread_pool(config, |compiler| { + let sess = compiler.session(); - let mut crate_loader = CrateLoader::new(&sess, &cstore, &name); - - let resolver_arenas = resolve::Resolver::arenas(); - let result = driver::phase_2_configure_and_expand_inner(&sess, - &cstore, - krate, - None, - &name, - None, - resolve::MakeGlobMap::No, - &resolver_arenas, - &mut crate_loader, - |_| Ok(())); - let driver::InnerExpansionResult { - mut hir_forest, - resolver, - .. - } = abort_on_err(result, &sess); - - // We need to hold on to the complete resolver, so we clone everything - // for the analysis passes to use. Suboptimal, but necessary in the + // We need to hold on to the complete resolver, so we cause everything to be + // cloned for the analysis passes to use. Suboptimal, but necessary in the // current architecture. - let defs = resolver.definitions.clone(); - let resolutions = ty::Resolutions { - freevars: resolver.freevars.clone(), - export_map: resolver.export_map.clone(), - trait_map: resolver.trait_map.clone(), - maybe_unused_trait_imports: resolver.maybe_unused_trait_imports.clone(), - maybe_unused_extern_crates: resolver.maybe_unused_extern_crates.clone(), - extern_prelude: resolver.extern_prelude.iter().map(|(ident, entry)| { - (ident.name, entry.introduced_by_item) - }).collect(), - }; - let analysis = ty::CrateAnalysis { - access_levels: Lrc::new(AccessLevels::default()), - name: name.to_string(), - glob_map: if resolver.make_glob_map { Some(resolver.glob_map.clone()) } else { None }, - }; + let resolver = abort_on_err(compiler.expansion(), sess).peek().1.clone(); - let arenas = AllArenas::new(); - let hir_map = hir_map::map_crate(&sess, &*cstore, &mut hir_forest, &defs); - let output_filenames = driver::build_output_filenames(&input, - &None, - &None, - &[], - &sess); - - let resolver = RefCell::new(resolver); - abort_on_err(driver::phase_3_run_analysis_passes(&*codegen_backend, - control, - &sess, - &*cstore, - hir_map, - analysis, - resolutions, - &arenas, - &name, - &output_filenames, - |tcx, analysis, _, result| { - if result.is_err() { - sess.fatal("Compilation failed, aborting rustdoc"); - } + if sess.err_count() > 0 { + sess.fatal("Compilation failed, aborting rustdoc"); + } + + let mut global_ctxt = abort_on_err(compiler.global_ctxt(), sess).take(); + + global_ctxt.enter(|tcx| { + tcx.analysis(LOCAL_CRATE).ok(); - let ty::CrateAnalysis { access_levels, .. } = analysis; + // Abort if there were any errors so far + sess.abort_if_errors(); - // Convert from a NodeId set to a DefId set since we don't always have easy access - // to the map from defid -> nodeid + let access_levels = tcx.privacy_access_levels(LOCAL_CRATE); + // Convert from a HirId set to a DefId set since we don't always have easy access + // to the map from defid -> hirid let access_levels = AccessLevels { map: access_levels.map.iter() - .map(|(&k, &v)| (tcx.hir().local_def_id(k), v)) + .map(|(&k, &v)| (tcx.hir().local_def_id_from_hir_id(k), v)) .collect() }; @@ -530,14 +488,15 @@ pub fn run_core(options: RustdocOptions) -> (clean::Crate, RenderInfo, RenderOpt let ctxt = DocContext { tcx, - resolver: &resolver, + resolver, crate_name, - cstore: cstore.clone(), + cstore: compiler.cstore().clone(), external_traits: Default::default(), active_extern_traits: Default::default(), renderinfo: RefCell::new(renderinfo), ty_substs: Default::default(), lt_substs: Default::default(), + ct_substs: Default::default(), impl_trait_bounds: Default::default(), send_trait: send_trait, fake_def_ids: Default::default(), @@ -570,22 +529,21 @@ pub fn run_core(options: RustdocOptions) -> (clean::Crate, RenderInfo, RenderOpt for attr in krate.module.as_ref().unwrap().attrs.lists("doc") { let diag = ctxt.sess().diagnostic(); - let name = attr.name().map(|s| s.as_str()); - let name = name.as_ref().map(|s| &s[..]); + let name = attr.name_or_empty(); if attr.is_word() { - if name == Some("no_default_passes") { + if name == "no_default_passes" { report_deprecated_attr("no_default_passes", diag); if default_passes == passes::DefaultPassOption::Default { default_passes = passes::DefaultPassOption::None; } } } else if let Some(value) = attr.value_str() { - let sink = match name { - Some("passes") => { + let sink = match name.get() { + "passes" => { report_deprecated_attr("passes = \"...\"", diag); &mut manual_passes }, - Some("plugins") => { + "plugins" => { report_deprecated_attr("plugins = \"...\"", diag); eprintln!("WARNING: #![doc(plugins = \"...\")] no longer functions; \ see CVE-2018-1000622"); @@ -598,7 +556,7 @@ pub fn run_core(options: RustdocOptions) -> (clean::Crate, RenderInfo, RenderOpt } } - if attr.is_word() && name == Some("document_private_items") { + if attr.is_word() && name == "document_private_items" { if default_passes == passes::DefaultPassOption::Default { default_passes = passes::DefaultPassOption::Private; } @@ -609,16 +567,21 @@ pub fn run_core(options: RustdocOptions) -> (clean::Crate, RenderInfo, RenderOpt passes::defaults(default_passes).iter().map(|p| p.to_string()).collect(); passes.extend(manual_passes); - for pass in &passes { - // the "unknown pass" error will be reported when late passes are run - if let Some(pass) = passes::find_pass(pass).and_then(|p| p.early_fn()) { - krate = pass(krate, &ctxt); + info!("Executing passes"); + + for pass_name in &passes { + match passes::find_pass(pass_name).map(|p| p.pass) { + Some(pass) => { + debug!("running pass {}", pass_name); + krate = pass(krate, &ctxt); + } + None => error!("unknown pass {}, skipping", *pass_name), } } ctxt.sess().abort_if_errors(); (krate, ctxt.renderinfo.into_inner(), render_options, passes) - }), &sess) + }) }) } diff --git a/src/librustdoc/doctree.rs b/src/librustdoc/doctree.rs index 4a6a4ee09ea1a..7a528e50e9c3f 100644 --- a/src/librustdoc/doctree.rs +++ b/src/librustdoc/doctree.rs @@ -1,13 +1,3 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! This module is used to store stuff from Rust's AST in a more convenient //! manner (and with prettier names) before cleaning. pub use self::StructType::*; @@ -48,6 +38,7 @@ pub struct Module { pub foreigns: Vec, pub macros: Vec, pub proc_macros: Vec, + pub trait_aliases: Vec, pub is_crate: bool, } @@ -63,21 +54,22 @@ impl Module { where_inner: syntax_pos::DUMMY_SP, attrs : hir::HirVec::new(), extern_crates: Vec::new(), - imports : Vec::new(), - structs : Vec::new(), - unions : Vec::new(), - enums : Vec::new(), - fns : Vec::new(), - mods : Vec::new(), - typedefs : Vec::new(), - existentials: Vec::new(), - statics : Vec::new(), - constants : Vec::new(), - traits : Vec::new(), - impls : Vec::new(), - foreigns : Vec::new(), - macros : Vec::new(), - proc_macros: Vec::new(), + imports : Vec::new(), + structs : Vec::new(), + unions : Vec::new(), + enums : Vec::new(), + fns : Vec::new(), + mods : Vec::new(), + typedefs : Vec::new(), + existentials: Vec::new(), + statics : Vec::new(), + constants : Vec::new(), + traits : Vec::new(), + impls : Vec::new(), + foreigns : Vec::new(), + macros : Vec::new(), + proc_macros: Vec::new(), + trait_aliases: Vec::new(), is_crate : false, } } @@ -97,7 +89,7 @@ pub struct Struct { pub vis: hir::Visibility, pub stab: Option, pub depr: Option, - pub id: NodeId, + pub id: hir::HirId, pub struct_type: StructType, pub name: Name, pub generics: hir::Generics, @@ -110,7 +102,7 @@ pub struct Union { pub vis: hir::Visibility, pub stab: Option, pub depr: Option, - pub id: NodeId, + pub id: hir::HirId, pub struct_type: StructType, pub name: Name, pub generics: hir::Generics, @@ -126,13 +118,14 @@ pub struct Enum { pub variants: hir::HirVec, pub generics: hir::Generics, pub attrs: hir::HirVec, - pub id: NodeId, + pub id: hir::HirId, pub whence: Span, pub name: Name, } pub struct Variant { pub name: Name, + pub id: hir::HirId, pub attrs: hir::HirVec, pub def: hir::VariantData, pub stab: Option, @@ -143,7 +136,7 @@ pub struct Variant { pub struct Function { pub decl: hir::FnDecl, pub attrs: hir::HirVec, - pub id: NodeId, + pub id: hir::HirId, pub name: Name, pub vis: hir::Visibility, pub stab: Option, @@ -158,7 +151,7 @@ pub struct Typedef { pub ty: P, pub gen: hir::Generics, pub name: Name, - pub id: ast::NodeId, + pub id: hir::HirId, pub attrs: hir::HirVec, pub whence: Span, pub vis: hir::Visibility, @@ -169,7 +162,7 @@ pub struct Typedef { pub struct Existential { pub exist_ty: hir::ExistTy, pub name: Name, - pub id: ast::NodeId, + pub id: hir::HirId, pub attrs: hir::HirVec, pub whence: Span, pub vis: hir::Visibility, @@ -187,7 +180,7 @@ pub struct Static { pub vis: hir::Visibility, pub stab: Option, pub depr: Option, - pub id: ast::NodeId, + pub id: hir::HirId, pub whence: Span, } @@ -199,7 +192,7 @@ pub struct Constant { pub vis: hir::Visibility, pub stab: Option, pub depr: Option, - pub id: ast::NodeId, + pub id: hir::HirId, pub whence: Span, } @@ -211,7 +204,19 @@ pub struct Trait { pub generics: hir::Generics, pub bounds: hir::HirVec, pub attrs: hir::HirVec, - pub id: ast::NodeId, + pub id: hir::HirId, + pub whence: Span, + pub vis: hir::Visibility, + pub stab: Option, + pub depr: Option, +} + +pub struct TraitAlias { + pub name: Name, + pub generics: hir::Generics, + pub bounds: hir::HirVec, + pub attrs: hir::HirVec, + pub id: hir::HirId, pub whence: Span, pub vis: hir::Visibility, pub stab: Option, @@ -232,7 +237,7 @@ pub struct Impl { pub vis: hir::Visibility, pub stab: Option, pub depr: Option, - pub id: ast::NodeId, + pub id: hir::HirId, } // For Macro we store the DefId instead of the NodeId, since we also create @@ -259,7 +264,7 @@ pub struct ExternCrate { pub struct Import { pub name: Name, - pub id: NodeId, + pub id: hir::HirId, pub vis: hir::Visibility, pub attrs: hir::HirVec, pub path: hir::Path, @@ -269,7 +274,7 @@ pub struct Import { pub struct ProcMacro { pub name: Name, - pub id: NodeId, + pub id: hir::HirId, pub kind: MacroKind, pub helpers: Vec, pub attrs: hir::HirVec, diff --git a/src/librustdoc/externalfiles.rs b/src/librustdoc/externalfiles.rs index c7a2dd6da3f75..0378b12662da2 100644 --- a/src/librustdoc/externalfiles.rs +++ b/src/librustdoc/externalfiles.rs @@ -1,19 +1,10 @@ -// Copyright 2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - use std::fs; use std::path::Path; use std::str; use errors; -use syntax::feature_gate::UnstableFeatures; -use html::markdown::{IdMap, ErrorCodes, Markdown}; +use crate::syntax::feature_gate::UnstableFeatures; +use crate::html::markdown::{IdMap, ErrorCodes, Markdown}; + use std::cell::RefCell; #[derive(Clone, Debug)] diff --git a/src/librustdoc/fold.rs b/src/librustdoc/fold.rs index b8e27c5317083..cfa22bc27b758 100644 --- a/src/librustdoc/fold.rs +++ b/src/librustdoc/fold.rs @@ -1,14 +1,4 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use clean::*; +use crate::clean::*; pub struct StripItem(pub Item); diff --git a/src/librustdoc/html/escape.rs b/src/librustdoc/html/escape.rs index 1173e6447f50c..182a2dd2e9c9f 100644 --- a/src/librustdoc/html/escape.rs +++ b/src/librustdoc/html/escape.rs @@ -1,16 +1,6 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! HTML Escaping +//! HTML escaping. //! -//! This module contains one unit-struct which can be used to HTML-escape a +//! This module contains one unit struct, which can be used to HTML-escape a //! string of text (for use in a format string). use std::fmt; @@ -20,7 +10,7 @@ use std::fmt; pub struct Escape<'a>(pub &'a str); impl<'a> fmt::Display for Escape<'a> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { // Because the internet is always right, turns out there's not that many // characters to escape: http://stackoverflow.com/questions/7381974 let Escape(s) = *self; diff --git a/src/librustdoc/html/format.rs b/src/librustdoc/html/format.rs index 445fc2e833a3f..3d8af7c7716b1 100644 --- a/src/librustdoc/html/format.rs +++ b/src/librustdoc/html/format.rs @@ -1,13 +1,3 @@ -// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! HTML formatting module //! //! This module contains a large number of `fmt::Display` implementations for @@ -15,16 +5,18 @@ //! assume that HTML output is desired, although it may be possible to redesign //! them in the future to instead emit any format desired. +use std::borrow::Cow; use std::fmt; use rustc::hir::def_id::DefId; use rustc_target::spec::abi::Abi; use rustc::hir; -use clean::{self, PrimitiveType}; -use core::DocAccessLevels; -use html::item_type::ItemType; -use html::render::{self, cache, CURRENT_LOCATION_KEY}; +use crate::clean::{self, PrimitiveType}; +use crate::core::DocAccessLevels; +use crate::html::item_type::ItemType; +use crate::html::render::{self, cache, CURRENT_LOCATION_KEY}; + /// Helper to render an optional visibility with a space after it (if the /// visibility is preset) @@ -51,26 +43,32 @@ pub struct RawMutableSpace(pub clean::Mutability); /// Wrapper struct for emitting type parameter bounds. pub struct GenericBounds<'a>(pub &'a [clean::GenericBound]); /// Wrapper struct for emitting a comma-separated list of items -pub struct CommaSep<'a, T: 'a>(pub &'a [T]); +pub struct CommaSep<'a, T>(pub &'a [T]); pub struct AbiSpace(pub Abi); +pub struct DefaultSpace(pub bool); -/// Wrapper struct for properly emitting a method declaration. -pub struct Method<'a> { +/// Wrapper struct for properly emitting a function or method declaration. +pub struct Function<'a> { /// The declaration to emit. pub decl: &'a clean::FnDecl, - /// The length of the function's "name", used to determine line-wrapping. - pub name_len: usize, + /// The length of the function header and name. In other words, the number of characters in the + /// function declaration up to but not including the parentheses. + /// + /// Used to determine line-wrapping. + pub header_len: usize, /// The number of spaces to indent each successive line with, if line-wrapping is necessary. pub indent: usize, + /// Whether the function is async or not. + pub asyncness: hir::IsAsync, } -/// Wrapper struct for emitting a where clause from Generics. +/// Wrapper struct for emitting a where-clause from Generics. pub struct WhereClause<'a>{ - /// The Generics from which to emit a where clause. + /// The Generics from which to emit a where-clause. pub gens: &'a clean::Generics, /// The number of spaces to indent each line with. pub indent: usize, - /// Whether the where clause needs to add a comma and newline after the last bound. + /// Whether the where-clause needs to add a comma and newline after the last bound. pub end_newline: bool, } @@ -98,7 +96,7 @@ impl ConstnessSpace { } impl<'a, T: fmt::Display> fmt::Display for CommaSep<'a, T> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for (i, item) in self.0.iter().enumerate() { if i != 0 { write!(f, ", ")?; } fmt::Display::fmt(item, f)?; @@ -108,7 +106,7 @@ impl<'a, T: fmt::Display> fmt::Display for CommaSep<'a, T> { } impl<'a> fmt::Display for GenericBounds<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let &GenericBounds(bounds) = self; for (i, bound) in bounds.iter().enumerate() { if i > 0 { @@ -121,7 +119,7 @@ impl<'a> fmt::Display for GenericBounds<'a> { } impl fmt::Display for clean::GenericParamDef { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.kind { clean::GenericParamDefKind::Lifetime => write!(f, "{}", self.name), clean::GenericParamDefKind::Type { ref bounds, ref default, .. } => { @@ -145,12 +143,22 @@ impl fmt::Display for clean::GenericParamDef { Ok(()) } + clean::GenericParamDefKind::Const { ref ty, .. } => { + f.write_str("const ")?; + f.write_str(&self.name)?; + + if f.alternate() { + write!(f, ": {:#}", ty) + } else { + write!(f, ": {}", ty) + } + } } } } impl fmt::Display for clean::Generics { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let real_params = self.params .iter() .filter(|p| !p.is_synthetic_type_param()) @@ -167,7 +175,7 @@ impl fmt::Display for clean::Generics { } impl<'a> fmt::Display for WhereClause<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let &WhereClause { gens, indent, end_newline } = self; if gens.where_predicates.is_empty() { return Ok(()); @@ -246,14 +254,22 @@ impl<'a> fmt::Display for WhereClause<'a> { } impl fmt::Display for clean::Lifetime { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(self.get_ref())?; Ok(()) } } +impl fmt::Display for clean::Constant { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(&self.expr, f)?; + f.write_str(": ")?; + fmt::Display::fmt(&self.type_, f) + } +} + impl fmt::Display for clean::PolyTrait { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if !self.generic_params.is_empty() { if f.alternate() { write!(f, "for<{:#}> ", CommaSep(&self.generic_params))?; @@ -270,7 +286,7 @@ impl fmt::Display for clean::PolyTrait { } impl fmt::Display for clean::GenericBound { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { clean::GenericBound::Outlives(ref lt) => { write!(f, "{}", *lt) @@ -291,34 +307,25 @@ impl fmt::Display for clean::GenericBound { } impl fmt::Display for clean::GenericArgs { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { - clean::GenericArgs::AngleBracketed { - ref lifetimes, ref types, ref bindings - } => { - if !lifetimes.is_empty() || !types.is_empty() || !bindings.is_empty() { + clean::GenericArgs::AngleBracketed { ref args, ref bindings } => { + if !args.is_empty() || !bindings.is_empty() { if f.alternate() { f.write_str("<")?; } else { f.write_str("<")?; } let mut comma = false; - for lifetime in lifetimes { - if comma { - f.write_str(", ")?; - } - comma = true; - write!(f, "{}", *lifetime)?; - } - for ty in types { + for arg in args { if comma { f.write_str(", ")?; } comma = true; if f.alternate() { - write!(f, "{:#}", *ty)?; + write!(f, "{:#}", *arg)?; } else { - write!(f, "{}", *ty)?; + write!(f, "{}", *arg)?; } } for binding in bindings { @@ -368,7 +375,7 @@ impl fmt::Display for clean::GenericArgs { } impl fmt::Display for clean::PathSegment { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.name)?; if f.alternate() { write!(f, "{:#}", self.args) @@ -379,7 +386,7 @@ impl fmt::Display for clean::PathSegment { } impl fmt::Display for clean::Path { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if self.global { f.write_str("::")? } @@ -439,7 +446,7 @@ pub fn href(did: DefId) -> Option<(String, ItemType, Vec)> { /// Used when rendering a `ResolvedPath` structure. This invokes the `path` /// rendering function with the necessary arguments for linking to a local path. -fn resolved_path(w: &mut fmt::Formatter, did: DefId, path: &clean::Path, +fn resolved_path(w: &mut fmt::Formatter<'_>, did: DefId, path: &clean::Path, print_all: bool, use_absolute: bool) -> fmt::Result { let last = path.segments.last().unwrap(); @@ -468,7 +475,7 @@ fn resolved_path(w: &mut fmt::Formatter, did: DefId, path: &clean::Path, Ok(()) } -fn primitive_link(f: &mut fmt::Formatter, +fn primitive_link(f: &mut fmt::Formatter<'_>, prim: clean::PrimitiveType, name: &str) -> fmt::Result { let m = cache(); @@ -513,9 +520,9 @@ fn primitive_link(f: &mut fmt::Formatter, } /// Helper to render type parameters -fn tybounds(w: &mut fmt::Formatter, - typarams: &Option>) -> fmt::Result { - match *typarams { +fn tybounds(w: &mut fmt::Formatter<'_>, + param_names: &Option>) -> fmt::Result { + match *param_names { Some(ref params) => { for param in params { write!(w, " + ")?; @@ -534,7 +541,7 @@ impl<'a> HRef<'a> { } impl<'a> fmt::Display for HRef<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match href(self.did) { Some((url, shortty, fqp)) => if !f.alternate() { write!(f, "{}", @@ -547,18 +554,18 @@ impl<'a> fmt::Display for HRef<'a> { } } -fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter, use_absolute: bool) -> fmt::Result { +fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter<'_>, use_absolute: bool) -> fmt::Result { match *t { clean::Generic(ref name) => { f.write_str(name) } - clean::ResolvedPath{ did, ref typarams, ref path, is_generic } => { - if typarams.is_some() { + clean::ResolvedPath{ did, ref param_names, ref path, is_generic } => { + if param_names.is_some() { f.write_str("dyn ")?; } // Paths like T::Output and Self::Output should be rendered with all segments resolved_path(f, did, path, is_generic, use_absolute)?; - tybounds(f, typarams) + tybounds(f, param_names) } clean::Infer => write!(f, "_"), clean::Primitive(prim) => primitive_link(f, prim, prim.as_str()), @@ -602,6 +609,7 @@ fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter, use_absolute: bool) -> fmt: primitive_link(f, PrimitiveType::Array, &format!("; {}]", n)) } clean::Never => primitive_link(f, PrimitiveType::Never, "!"), + clean::CVarArgs => primitive_link(f, PrimitiveType::CVarArgs, "..."), clean::RawPointer(m, ref t) => { match **t { clean::Generic(_) | clean::ResolvedPath {is_generic: true, ..} => { @@ -655,7 +663,7 @@ fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter, use_absolute: bool) -> fmt: } } } - clean::ResolvedPath { typarams: Some(ref v), .. } if !v.is_empty() => { + clean::ResolvedPath { param_names: Some(ref v), .. } if !v.is_empty() => { write!(f, "{}{}{}(", amp, lt, m)?; fmt_type(&ty, f, use_absolute)?; write!(f, ")") @@ -672,7 +680,11 @@ fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter, use_absolute: bool) -> fmt: } } clean::ImplTrait(ref bounds) => { - write!(f, "impl {}", GenericBounds(bounds)) + if f.alternate() { + write!(f, "impl {:#}", GenericBounds(bounds)) + } else { + write!(f, "impl {}", GenericBounds(bounds)) + } } clean::QPath { ref name, ref self_type, ref trait_ } => { let should_show_cast = match *trait_ { @@ -705,7 +717,7 @@ fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter, use_absolute: bool) -> fmt: // the ugliness comes from inlining across crates where // everything comes in as a fully resolved QPath (hard to // look at). - box clean::ResolvedPath { did, ref typarams, .. } => { + box clean::ResolvedPath { did, ref param_names, .. } => { match href(did) { Some((ref url, _, ref path)) if !f.alternate() => { write!(f, @@ -719,8 +731,8 @@ fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter, use_absolute: bool) -> fmt: _ => write!(f, "{}", name)?, } - // FIXME: `typarams` are not rendered, and this seems bad? - drop(typarams); + // FIXME: `param_names` are not rendered, and this seems bad? + drop(param_names); Ok(()) } _ => { @@ -735,13 +747,13 @@ fn fmt_type(t: &clean::Type, f: &mut fmt::Formatter, use_absolute: bool) -> fmt: } impl fmt::Display for clean::Type { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt_type(self, f, false) } } fn fmt_impl(i: &clean::Impl, - f: &mut fmt::Formatter, + f: &mut fmt::Formatter<'_>, link_trait: bool, use_absolute: bool) -> fmt::Result { if f.alternate() { @@ -759,7 +771,7 @@ fn fmt_impl(i: &clean::Impl, fmt::Display::fmt(ty, f)?; } else { match *ty { - clean::ResolvedPath { typarams: None, ref path, is_generic: false, .. } => { + clean::ResolvedPath { param_names: None, ref path, is_generic: false, .. } => { let last = path.segments.last().unwrap(); fmt::Display::fmt(&last.name, f)?; fmt::Display::fmt(&last.args, f)?; @@ -781,20 +793,20 @@ fn fmt_impl(i: &clean::Impl, } impl fmt::Display for clean::Impl { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt_impl(self, f, true, false) } } // The difference from above is that trait is not hyperlinked. pub fn fmt_impl_for_trait_page(i: &clean::Impl, - f: &mut fmt::Formatter, + f: &mut fmt::Formatter<'_>, use_absolute: bool) -> fmt::Result { fmt_impl(i, f, false, use_absolute) } impl fmt::Display for clean::Arguments { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for (i, input) in self.values.iter().enumerate() { if !input.name.is_empty() { write!(f, "{}: ", input.name)?; @@ -811,7 +823,7 @@ impl fmt::Display for clean::Arguments { } impl fmt::Display for clean::FunctionRetTy { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { clean::Return(clean::Tuple(ref tys)) if tys.is_empty() => Ok(()), clean::Return(ref ty) if f.alternate() => write!(f, " -> {:#}", ty), @@ -822,26 +834,18 @@ impl fmt::Display for clean::FunctionRetTy { } impl fmt::Display for clean::FnDecl { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - if self.variadic { - if f.alternate() { - write!(f, "({args:#}, ...){arrow:#}", args = self.inputs, arrow = self.output) - } else { - write!(f, "({args}, ...){arrow}", args = self.inputs, arrow = self.output) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if f.alternate() { + write!(f, "({args:#}){arrow:#}", args = self.inputs, arrow = self.output) } else { - if f.alternate() { - write!(f, "({args:#}){arrow:#}", args = self.inputs, arrow = self.output) - } else { - write!(f, "({args}){arrow}", args = self.inputs, arrow = self.output) - } + write!(f, "({args}){arrow}", args = self.inputs, arrow = self.output) } } } -impl<'a> fmt::Display for Method<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let &Method { decl, name_len, indent } = self; +impl<'a> fmt::Display for Function<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let &Function { decl, header_len, indent, asyncness } = self; let amp = if f.alternate() { "&" } else { "&" }; let mut args = String::new(); let mut args_plain = String::new(); @@ -896,25 +900,23 @@ impl<'a> fmt::Display for Method<'a> { } } - if decl.variadic { - args.push_str(",
..."); - args_plain.push_str(", ..."); - } + let args_plain = format!("({})", args_plain); - let arrow_plain = format!("{:#}", decl.output); - let arrow = if f.alternate() { - format!("{:#}", decl.output) + let output = if let hir::IsAsync::Async = asyncness { + Cow::Owned(decl.sugared_async_return_type()) } else { - decl.output.to_string() + Cow::Borrowed(&decl.output) }; - let pad = " ".repeat(name_len); - let plain = format!("{pad}({args}){arrow}", - pad = pad, - args = args_plain, - arrow = arrow_plain); + let arrow_plain = format!("{:#}", &output); + let arrow = if f.alternate() { + format!("{:#}", &output) + } else { + output.to_string() + }; - let output = if plain.len() > 80 { + let declaration_len = header_len + args_plain.len() + arrow_plain.len(); + let output = if declaration_len > 80 { let full_pad = format!("
{}", " ".repeat(indent + 4)); let close_pad = format!("
{}", " ".repeat(indent)); format!("({args}{close}){arrow}", @@ -934,7 +936,7 @@ impl<'a> fmt::Display for Method<'a> { } impl<'a> fmt::Display for VisSpace<'a> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self.get() { Some(clean::Public) => f.write_str("pub "), Some(clean::Inherited) | None => Ok(()), @@ -954,7 +956,7 @@ impl<'a> fmt::Display for VisSpace<'a> { } impl fmt::Display for UnsafetySpace { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.get() { hir::Unsafety::Unsafe => write!(f, "unsafe "), hir::Unsafety::Normal => Ok(()) @@ -963,7 +965,7 @@ impl fmt::Display for UnsafetySpace { } impl fmt::Display for ConstnessSpace { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.get() { hir::Constness::Const => write!(f, "const "), hir::Constness::NotConst => Ok(()) @@ -972,7 +974,7 @@ impl fmt::Display for ConstnessSpace { } impl fmt::Display for AsyncSpace { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.0 { hir::IsAsync::Async => write!(f, "async "), hir::IsAsync::NotAsync => Ok(()), @@ -981,7 +983,7 @@ impl fmt::Display for AsyncSpace { } impl fmt::Display for clean::Import { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { clean::Import::Simple(ref name, ref src) => { if *name == src.path.last_name() { @@ -1002,7 +1004,7 @@ impl fmt::Display for clean::Import { } impl fmt::Display for clean::ImportSource { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self.did { Some(did) => resolved_path(f, did, &self.path, true, false), _ => { @@ -1019,7 +1021,7 @@ impl fmt::Display for clean::ImportSource { } impl fmt::Display for clean::TypeBinding { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if f.alternate() { write!(f, "{} = {:#}", self.name, self.ty) } else { @@ -1029,7 +1031,7 @@ impl fmt::Display for clean::TypeBinding { } impl fmt::Display for MutableSpace { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { MutableSpace(clean::Immutable) => Ok(()), MutableSpace(clean::Mutable) => write!(f, "mut "), @@ -1038,7 +1040,7 @@ impl fmt::Display for MutableSpace { } impl fmt::Display for RawMutableSpace { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { RawMutableSpace(clean::Immutable) => write!(f, "const "), RawMutableSpace(clean::Mutable) => write!(f, "mut "), @@ -1047,7 +1049,7 @@ impl fmt::Display for RawMutableSpace { } impl fmt::Display for AbiSpace { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let quot = if f.alternate() { "\"" } else { """ }; match self.0 { Abi::Rust => Ok(()), @@ -1055,3 +1057,13 @@ impl fmt::Display for AbiSpace { } } } + +impl fmt::Display for DefaultSpace { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.0 { + write!(f, "default ") + } else { + Ok(()) + } + } +} diff --git a/src/librustdoc/html/highlight.rs b/src/librustdoc/html/highlight.rs index 8fb91cc23f7ba..d66455f91ba1a 100644 --- a/src/librustdoc/html/highlight.rs +++ b/src/librustdoc/html/highlight.rs @@ -1,13 +1,3 @@ -// Copyright 2014-2016 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - //! Basic syntax highlighting functionality. //! //! This module uses libsyntax's lexer to provide token-based highlighting for @@ -15,7 +5,7 @@ //! //! Use the `render_with_highlighting` to highlight some rust code. -use html::escape::Escape; +use crate::html::escape::Escape; use std::fmt::Display; use std::io; @@ -35,40 +25,51 @@ pub fn render_with_highlighting( tooltip: Option<(&str, &str)>, ) -> String { debug!("highlighting: ================\n{}\n==============", src); - let sess = parse::ParseSess::new(FilePathMapping::empty()); - let fm = sess.source_map().new_source_file(FileName::Custom("stdin".to_string()), - src.to_string()); - let mut out = Vec::new(); if let Some((tooltip, class)) = tooltip { write!(out, "